text
stringlengths
2.5k
6.39M
kind
stringclasses
3 values
#include <assert.h> #include <stdio.h> #include <unistd.h> #include <complex> // Needed for std::min and max to work on device. #include <limits> int verbose = 0; #if __cplusplus >= 201103L #include <type_traits> // Convert a function into a functor with two arguments. We rely on SFINAE to // instantiate a function template call() which will invoke FUNC() with one or // two arguments. #define F(FUNC, NELTS, NARGS) \ typedef struct FUNC##_f { \ static const int num_args = NARGS; \ static const int num_elts = NELTS; \ template <typename T, int NA = num_args> \ __device__ static typename std::enable_if<NA == 1, unsigned int>::type \ call(T a, T b) { \ return FUNC(a); \ } \ template <typename T, int NA = num_args> \ __device__ static typename std::enable_if<NA == 2, unsigned int>::type \ call(T a, T b) { \ return FUNC(a, b); \ } \ } FUNC##_f template <int N, typename T> __device__ unsigned int pack(T a[N]) { unsigned int mask = (N == 2) ? 0xffff : 0xff; unsigned int shift = (N == 2) ? 16 : 8; unsigned int r = 0; for (int i = 0; i < N; ++i) { r |= ((unsigned int)a[i] & mask) << (shift * i); } return r; } template <int N, typename T> __device__ void unpack(unsigned int r, T (&a)[N]) { unsigned int mask = (N == 2) ? 0xffff : 0xff; unsigned int shift = (N == 2) ? 16 : 8; for (int i = 0; i < N; ++i) { a[i] = ((r >> (shift * i)) & mask); } } enum op_t { OP_ABS, OP_ABSDIFF, OP_ABSS, OP_ADD, OP_ADDS, OP_AVG, OP_CMPEQ, OP_CMPGE, OP_CMPGT, OP_CMPLE, OP_CMPLT, OP_CMPNE, OP_HADD, OP_MAX, OP_MIN, OP_NEG, OP_SAD, OP_SETEQ, OP_SETGE, OP_SETGT, OP_SETLE, OP_SETLT, OP_SETNE, OP_SUB, OP_SUBS, OP_LAST }; template <enum op_t OP, typename T> __device__ inline T elt_op(T a, T b = INT_MIN) { switch (OP) { case OP_ABS: if (!std::numeric_limits<T>::is_signed) return a; // This is wrong, but that's what __vabsN() returns. We also need to // handle that because abs(std::numeric_limits<T>::min()) would be an // undefined behavior otherwise. if (a == std::numeric_limits<T>::min()) return std::numeric_limits<T>::min(); return (a >= 0) ? a : -a; case OP_ABSDIFF: return std::abs(a - b); case OP_ABSS: { int result = std::abs(a); if (result > std::numeric_limits<T>::max()) return std::numeric_limits<T>::max(); return result; } case OP_ADD: return a + b; case OP_ADDS: { int result = (int)a + (int)b; if (result > std::numeric_limits<T>::max()) return std::numeric_limits<T>::max(); if (std::numeric_limits<T>::is_signed && result < std::numeric_limits<T>::min()) return std::numeric_limits<T>::min(); return result; } case OP_AVG: // This is *rounded* average. For simplicity let FP do the // rounding. Considering that T is byte or short, we're guaranteed not to // lose any bits. return round(((float)a + (float)b) / 2.0f); case OP_CMPEQ: return a == b ? -1 : 0; case OP_CMPGE: return a >= b ? -1 : 0; case OP_CMPGT: return a > b ? -1 : 0; case OP_CMPLE: return a <= b ? -1 : 0; case OP_CMPLT: return a < b ? -1 : 0; case OP_CMPNE: return a != b ? -1 : 0; case OP_HADD: return (a + b) / 2; case OP_MAX: return std::max(a, b); case OP_MIN: return std::min(a, b); case OP_NEG: // This is wrong, but that's what __vnegN() returns. We also need to // handle that because abs(std::numeric_limits<T>::min()) would be an // undefined behavior otherwise. if (std::numeric_limits<T>::is_signed && a == std::numeric_limits<T>::min()) return std::numeric_limits<T>::min(); return -a; case OP_SAD: return std::abs(a - b); // need to sum per-element results later. case OP_SETEQ: return a == b ? 1 : 0; case OP_SETGE: return a >= b ? 1 : 0; case OP_SETGT: return a > b ? 1 : 0; case OP_SETLE: return a <= b ? 1 : 0; case OP_SETLT: return a < b ? 1 : 0; case OP_SETNE: return a != b ? 1 : 0; case OP_SUB: return a - b; case OP_SUBS: { int result = (int)a - (int)b; if (result > std::numeric_limits<T>::max()) return std::numeric_limits<T>::max(); if (result < std::numeric_limits<T>::min()) return std::numeric_limits<T>::min(); return result; } default: assert(false && "unknown OP"); } assert(false && "Unreachable."); return 0; } template <op_t OP, typename T, int N> __device__ void simd_op(T (&r)[N], T a[N], T b[N]) { if (OP == OP_SAD) { // Sum up all elements in r[0] and clear the rest of r. int result = 0; for (int i = 0; i < N; ++i) { result += elt_op<OP, T>(a[i], b[i]); r[i] = 0; } r[0] = result; } else { // Just an element-wise op. for (int i = 0; i < N; ++i) { r[i] = elt_op<OP, T>(a[i], b[i]); } } } template <op_t OP, class SIMD_OP, typename T> __device__ void test_func(int verbose, int a, int b) { constexpr int N = SIMD_OP::num_elts; int dummy_args[] = {0, 1, -1, std::numeric_limits<T>::max(), std::numeric_limits<T>::max() - 1, std::numeric_limits<T>::min(), std::numeric_limits<T>::min() + 1}; for (T x : dummy_args) { for (int e = 0; e < N; ++e) { T args_a[N]; T args_b[N]; for (int i = 0; i < N; ++i) { args_a[i] = x; args_b[i] = x; } args_a[e] = a; args_b[e] = b; unsigned int va = pack<N, T>(args_a); unsigned int vb = pack<N, T>(args_b); T expected_r[N]; simd_op<OP, T>(expected_r, args_a, args_b); unsigned int evr = pack<N, T>(expected_r); // This is weird and I don't understand what's going on. With T = short, // compiler ends up generating code which triggers the assert below // if verbose == false, but triggers no assert if verbose == 1. It may be // due to an undefined behavior somewhere, but the same code (with SIMD_OP // below replaced with a pack(simd_op(a,b)) (so it could run on host) // triggerend no ubsan reports. asm volatile("" ::: "memory"); unsigned int vr = SIMD_OP::call(va, vb); if (verbose && vr != evr) { printf("e=%d a=%d b=%d va=%08x vb=%08x vr=%08x expected vr=%08x\n", e, a, b, va, vb, vr, evr); } assert((vr == evr) && "Value mismatch"); } } } template <op_t OP, class SIMD_OP, typename T> __global__ void test_kernel(int verbose) { int a = blockIdx.x * blockDim.x + threadIdx.x; int b = blockIdx.y * blockDim.y + threadIdx.y; test_func<OP, SIMD_OP, T>(verbose, a, b); } template <op_t OP, class SIMD_OP, typename T> void test_op() { int elements_a = SIMD_OP::num_elts == 2 ? 0x10000 : 0x100; // Collapse second dimension if we test single-operand function. int elements_b = SIMD_OP::num_args == 2 ? elements_a : 0; dim3 grid_size(elements_a / 32, elements_b ? elements_b / 32 : 1, 1); dim3 block_size(32, elements_b ? 32 : 1, 1); printf("Testing %s...", __PRETTY_FUNCTION__); test_kernel<OP, SIMD_OP, T><<<grid_size, block_size>>>(verbose); cudaError_t err = cudaDeviceSynchronize(); if (err != cudaSuccess) { printf("%s failed\n", __PRETTY_FUNCTION__); printf("CUDA error %d\n", (int)err); exit(EXIT_FAILURE); } else { printf("OK\n"); } } // Define functor types which we can then use to parametrize device-side tests. // F(function, num-elements, num-args) F(__vabs2, 2, 1); F(__vabs4, 4, 1); F(__vabsdiffs2, 2, 2); F(__vabsdiffs4, 4, 2); F(__vabsdiffu2, 2, 2); F(__vabsdiffu4, 4, 2); F(__vabsss2, 2, 1); F(__vabsss4, 4, 1); F(__vadd2, 2, 2); F(__vadd4, 4, 2); F(__vaddss2, 2, 2); F(__vaddus2, 2, 2); F(__vaddss4, 4, 2); F(__vaddus4, 4, 2); F(__vavgs2, 2, 2); F(__vavgu2, 2, 2); F(__vavgs4, 4, 2); F(__vavgu4, 4, 2); F(__vcmpeq2, 2, 2); F(__vcmpeq4, 4, 2); F(__vcmpges2, 2, 2); F(__vcmpges4, 4, 2); F(__vcmpgeu2, 2, 2); F(__vcmpgeu4, 4, 2); F(__vcmpgts2, 2, 2); F(__vcmpgts4, 4, 2); F(__vcmpgtu2, 2, 2); F(__vcmpgtu4, 4, 2); F(__vcmples2, 2, 2); F(__vcmples4, 4, 2); F(__vcmpleu2, 2, 2); F(__vcmpleu4, 4, 2); F(__vcmplts2, 2, 2); F(__vcmplts4, 4, 2); F(__vcmpltu2, 2, 2); F(__vcmpltu4, 4, 2); F(__vcmpne2, 2, 2); F(__vcmpne4, 4, 2); F(__vhaddu2, 2, 2); F(__vhaddu4, 4, 2); F(__vmaxs2, 2, 2); F(__vmaxs4, 4, 2); F(__vmaxu2, 2, 2); F(__vmaxu4, 4, 2); F(__vmins2, 2, 2); F(__vmins4, 4, 2); F(__vminu2, 2, 2); F(__vminu4, 4, 2); F(__vneg2, 2, 1); F(__vneg4, 4, 1); F(__vsads2, 2, 2); F(__vsadu2, 2, 2); F(__vsads4, 4, 2); F(__vsadu4, 4, 2); F(__vseteq2, 2, 2); F(__vseteq4, 4, 2); F(__vsetges2, 2, 2); F(__vsetges4, 4, 2); F(__vsetgeu2, 2, 2); F(__vsetgeu4, 4, 2); F(__vsetgts2, 2, 2); F(__vsetgts4, 4, 2); F(__vsetgtu2, 2, 2); F(__vsetgtu4, 4, 2); F(__vsetles2, 2, 2); F(__vsetles4, 4, 2); F(__vsetleu2, 2, 2); F(__vsetleu4, 4, 2); F(__vsetlts2, 2, 2); F(__vsetlts4, 4, 2); F(__vsetltu2, 2, 2); F(__vsetltu4, 4, 2); F(__vsetne2, 2, 2); F(__vsetne4, 4, 2); F(__vsub2, 2, 2); F(__vsub4, 4, 2); F(__vsubss2, 2, 2); F(__vsubus2, 2, 2); F(__vsubss4, 4, 2); F(__vsubus4, 4, 2); void tests() { test_op<OP_NEG, __vneg2_f, short>(); test_op<OP_ABS, __vabs2_f, short>(); test_op<OP_ABS, __vabs4_f, signed char>(); test_op<OP_ABSDIFF, __vabsdiffs2_f, short>(); test_op<OP_ABSDIFF, __vabsdiffs4_f, signed char>(); test_op<OP_ABSDIFF, __vabsdiffu2_f, unsigned short>(); test_op<OP_ABSDIFF, __vabsdiffu4_f, unsigned char>(); test_op<OP_ABSS, __vabsss2_f, short>(); test_op<OP_ABSS, __vabsss4_f, signed char>(); test_op<OP_ADD, __vadd2_f, short>(); test_op<OP_ADD, __vadd4_f, signed char>(); test_op<OP_ADDS, __vaddss2_f, short>(); test_op<OP_ADDS, __vaddss4_f, signed char>(); test_op<OP_ADDS, __vaddus2_f, unsigned short>(); test_op<OP_ADDS, __vaddus4_f, unsigned char>(); test_op<OP_AVG, __vavgs2_f, short>(); test_op<OP_AVG, __vavgs4_f, signed char>(); test_op<OP_AVG, __vavgu2_f, unsigned short>(); test_op<OP_AVG, __vavgu4_f, unsigned char>(); test_op<OP_CMPEQ, __vcmpeq2_f, short>(); test_op<OP_CMPEQ, __vcmpeq4_f, signed char>(); test_op<OP_CMPGE, __vcmpges2_f, short>(); test_op<OP_CMPGE, __vcmpges4_f, signed char>(); test_op<OP_CMPGE, __vcmpgeu2_f, unsigned short>(); test_op<OP_CMPGE, __vcmpgeu4_f, unsigned char>(); test_op<OP_CMPGT, __vcmpgts2_f, short>(); test_op<OP_CMPGT, __vcmpgts4_f, signed char>(); test_op<OP_CMPGT, __vcmpgtu2_f, unsigned short>(); test_op<OP_CMPGT, __vcmpgtu4_f, unsigned char>(); test_op<OP_CMPLE, __vcmples2_f, short>(); test_op<OP_CMPLE, __vcmples4_f, signed char>(); test_op<OP_CMPLE, __vcmpleu2_f, unsigned short>(); test_op<OP_CMPLE, __vcmpleu4_f, unsigned char>(); test_op<OP_CMPLT, __vcmplts2_f, short>(); test_op<OP_CMPLT, __vcmplts4_f, signed char>(); test_op<OP_CMPLT, __vcmpltu2_f, unsigned short>(); test_op<OP_CMPLT, __vcmpltu4_f, unsigned char>(); test_op<OP_CMPNE, __vcmpne2_f, short>(); test_op<OP_CMPNE, __vcmpne4_f, signed char>(); test_op<OP_HADD, __vhaddu2_f, unsigned short>(); test_op<OP_HADD, __vhaddu4_f, unsigned char>(); test_op<OP_MAX, __vmaxs2_f, short>(); // ??? Fails? test_op<OP_MAX, __vmaxs4_f, signed char>(); test_op<OP_MAX, __vmaxu2_f, unsigned short>(); test_op<OP_MAX, __vmaxu4_f, unsigned char>(); test_op<OP_MIN, __vmins2_f, short>(); test_op<OP_MIN, __vmins4_f, signed char>(); test_op<OP_MIN, __vminu2_f, unsigned short>(); test_op<OP_MIN, __vminu4_f, unsigned char>(); test_op<OP_NEG, __vneg2_f, short>(); test_op<OP_NEG, __vneg4_f, signed char>(); test_op<OP_SAD, __vsads2_f, short>(); test_op<OP_SAD, __vsads4_f, signed char>(); test_op<OP_SAD, __vsadu2_f, unsigned short>(); test_op<OP_SAD, __vsadu4_f, unsigned char>(); test_op<OP_SETEQ, __vseteq2_f, short>(); test_op<OP_SETEQ, __vseteq4_f, signed char>(); test_op<OP_SETGE, __vsetges2_f, short>(); test_op<OP_SETGE, __vsetges4_f, signed char>(); test_op<OP_SETGE, __vsetgeu2_f, unsigned short>(); test_op<OP_SETGE, __vsetgeu4_f, unsigned char>(); test_op<OP_SETGT, __vsetgts2_f, short>(); test_op<OP_SETGT, __vsetgts4_f, signed char>(); test_op<OP_SETGT, __vsetgtu2_f, unsigned short>(); test_op<OP_SETGT, __vsetgtu4_f, unsigned char>(); test_op<OP_SETLE, __vsetles2_f, short>(); test_op<OP_SETLE, __vsetles4_f, signed char>(); test_op<OP_SETLE, __vsetleu2_f, unsigned short>(); test_op<OP_SETLE, __vsetleu4_f, unsigned char>(); test_op<OP_SETLT, __vsetlts2_f, short>(); test_op<OP_SETLT, __vsetlts4_f, signed char>(); test_op<OP_SETLT, __vsetltu2_f, unsigned short>(); test_op<OP_SETLT, __vsetltu4_f, unsigned char>(); test_op<OP_SETNE, __vsetne2_f, short>(); test_op<OP_SETNE, __vsetne4_f, signed char>(); test_op<OP_SUB, __vsub2_f, short>(); test_op<OP_SUB, __vsub4_f, signed char>(); test_op<OP_SUBS, __vsubss2_f, short>(); test_op<OP_SUBS, __vsubss4_f, signed char>(); test_op<OP_SUBS, __vsubus2_f, unsigned short>(); test_op<OP_SUBS, __vsubus4_f, unsigned char>(); } #else // !C++11 void tests() { // These tests need C++11 to compile. } #endif int main(int argc, char** argv) { int opt; while ((opt = getopt(argc, argv, "v")) != -1) { switch (opt) { case 'v': verbose = 1; break; default: /* '?' */ fprintf(stderr, "Usage: %s [-v]\n", argv[0]); exit(EXIT_FAILURE); } } tests(); printf("Success!\n"); return 0; }
the_stack
#define WARPSIZE 32 #include <cooperative_groups.h> // since we need to do a full O(N^2) computing and we don't need to broadcast the forces, // this should just be extremely efficient already // #define RADII_EXP 4, used for switching function shenanigans, disabled for now // nope, still need to parallelize out template <typename RealType> __global__ void k_compute_born_radii( const int N, const double *coords, const double lambda, const int *lambda_plane_idxs, // 0 or 1, which non-interacting plane we're on const int *lambda_offset_idxs, // 0 or 1, how much we offset from the plane by cutoff const double *gb_params, const double dielectric_offset, const double cutoff, const double *block_bounds_ctr, const double *block_bounds_ext, unsigned long long *born_psi) { RealType block_d2ij = 0; for (int d = 0; d < 3; d++) { RealType block_row_ctr = block_bounds_ctr[blockIdx.x * 3 + d]; RealType block_col_ctr = block_bounds_ctr[blockIdx.y * 3 + d]; RealType block_row_ext = block_bounds_ext[blockIdx.x * 3 + d]; RealType block_col_ext = block_bounds_ext[blockIdx.y * 3 + d]; RealType dx = max(0.0, fabs(block_row_ctr - block_col_ctr) - (block_row_ext + block_col_ext)); block_d2ij += dx * dx; } if (block_d2ij > cutoff * cutoff) { return; } int atom_i_idx = blockIdx.x * 32 + threadIdx.x; int lambda_plane_i = 0; int lambda_offset_i = 0; if (atom_i_idx < N) { lambda_plane_i = lambda_plane_idxs[atom_i_idx]; lambda_offset_i = lambda_offset_idxs[atom_i_idx]; } RealType ci[3]; for (int d = 0; d < 3; d++) { ci[d] = atom_i_idx < N ? coords[atom_i_idx * 3 + d] : 0; } // int radii_param_idx_i = atom_i_idx < N ? atomic_radii_idxs[atom_i_idx] : 0; int radii_param_idx_i = atom_i_idx < N ? atom_i_idx * 2 + 0 : 0; RealType radiusI = atom_i_idx < N ? gb_params[radii_param_idx_i] : 0; RealType offsetRadiusI = radiusI - dielectric_offset; RealType radiusIInverse = 1 / offsetRadiusI; int atom_j_idx = blockIdx.y * 32 + threadIdx.x; int lambda_plane_j = 0; int lambda_offset_j = 0; if (atom_j_idx < N) { lambda_plane_j = lambda_plane_idxs[atom_j_idx]; lambda_offset_j = lambda_offset_idxs[atom_j_idx]; } // *always* accumulate in 64 bit. double sum = 0; RealType cj[3]; for (int d = 0; d < 3; d++) { cj[d] = atom_j_idx < N ? coords[atom_j_idx * 3 + d] : 0; } int radii_param_idx_j = atom_j_idx < N ? atom_j_idx * 2 + 0 : 0; int scale_param_idx_j = atom_j_idx < N ? atom_j_idx * 2 + 1 : 0; RealType radiusJ = atom_j_idx < N ? gb_params[radii_param_idx_j] : 0; RealType scaleFactorJ = atom_j_idx < N ? gb_params[scale_param_idx_j] : 0; RealType offsetRadiusJ = radiusJ - dielectric_offset; RealType scaledRadiusJ = offsetRadiusJ * scaleFactorJ; for (int round = 0; round < 32; round++) { RealType dxs[4]; for (int d = 0; d < 3; d++) { dxs[d] = ci[d] - cj[d]; } // RealType delta_lambda = lambda_i - lambda_j; RealType delta_lambda = (lambda_plane_i - lambda_plane_j) * cutoff + (lambda_offset_i - lambda_offset_j) * lambda; dxs[3] = delta_lambda; RealType r = fast_vec_norm<RealType, 4>(dxs); RealType rInverse = 1 / r; RealType rScaledRadiusJ = r + scaledRadiusJ; RealType rSubScaledRadiusJ = r - scaledRadiusJ; if (atom_j_idx != atom_i_idx && r < cutoff && atom_j_idx < N && atom_i_idx < N) { if (offsetRadiusI < rScaledRadiusJ) { RealType l_ij = 0; if (offsetRadiusI > abs(rSubScaledRadiusJ)) { l_ij = offsetRadiusI; } else { l_ij = abs(rSubScaledRadiusJ); } l_ij = 1 / l_ij; // RealType inv_uij = rScaledRadiusJ; RealType u_ij = 1 / rScaledRadiusJ; RealType l_ij2 = l_ij * l_ij; RealType u_ij2 = u_ij * u_ij; RealType ratio = log(u_ij / l_ij); // RealType term = l_ij - u_ij + 0.25*r*(u_ij2 - l_ij2) + (0.5*rInverse*ratio) + (0.25*scaledRadiusJ*scaledRadiusJ*rInverse)*(l_ij2 - u_ij2); RealType term = l_ij - u_ij + r * (u_ij2 - l_ij2) / 4 + scaledRadiusJ * scaledRadiusJ * rInverse * (l_ij2 - u_ij2) / 4 + rInverse * ratio / 2; // this case (atom i completely inside atom j) is not considered in the original paper // Jay Ponder and the authors of Tinker recognized this and // worked out the details if (offsetRadiusI < (scaledRadiusJ - r)) { term += 2 * (radiusIInverse - l_ij); } // RealType inner = (PI*pow(r,RADII_EXP))/(BOXSIZE); // RealType sw = cos(inner); // sw = sw*sw; RealType sw = 1; term = sw * term; sum += term; } } const int srcLane = (threadIdx.x + 1) % WARPSIZE; scaledRadiusJ = __shfl_sync(0xffffffff, scaledRadiusJ, srcLane); atom_j_idx = __shfl_sync(0xffffffff, atom_j_idx, srcLane); for (int d = 0; d < 3; d++) { cj[d] = __shfl_sync(0xffffffff, cj[d], srcLane); } lambda_plane_j = __shfl_sync(0xffffffff, lambda_plane_j, srcLane); lambda_offset_j = __shfl_sync(0xffffffff, lambda_offset_j, srcLane); } if (atom_i_idx < N) { atomicAdd(born_psi + atom_i_idx, static_cast<unsigned long long>((long long)(sum * FIXED_BORN_PSI))); } } template <typename RealType> void __global__ k_compute_born_first_loop_gpu( const int N, const double *coords, const double lambda, const int *lambda_plane_idxs, // 0 or 1, which non-interacting plane we're on const int *lambda_offset_idxs, // 0 or 1, how much we offset from the plane by cutoff const double *charge_params, const double *born_radii, const double prefactor, const double cutoff, const double *block_bounds_ctr, const double *block_bounds_ext, unsigned long long *bornForces, unsigned long long *out_forces, double *du_dl, double *out_energy) { if (blockIdx.y > blockIdx.x) { return; } RealType block_d2ij = 0; for (int d = 0; d < 3; d++) { RealType block_row_ctr = block_bounds_ctr[blockIdx.x * 3 + d]; RealType block_col_ctr = block_bounds_ctr[blockIdx.y * 3 + d]; RealType block_row_ext = block_bounds_ext[blockIdx.x * 3 + d]; RealType block_col_ext = block_bounds_ext[blockIdx.y * 3 + d]; RealType dx = max(0.0, fabs(block_row_ctr - block_col_ctr) - (block_row_ext + block_col_ext)); block_d2ij += dx * dx; } if (block_d2ij > cutoff * cutoff) { return; } int atom_i_idx = blockIdx.x * 32 + threadIdx.x; RealType du_dl_i = 0; int lambda_plane_i = 0; int lambda_offset_i = 0; if (atom_i_idx < N) { lambda_plane_i = lambda_plane_idxs[atom_i_idx]; lambda_offset_i = lambda_offset_idxs[atom_i_idx]; } RealType ci[3]; RealType gi[3] = {0}; for (int d = 0; d < 3; d++) { ci[d] = atom_i_idx < N ? coords[atom_i_idx * 3 + d] : 0; } int charge_param_idx_i = atom_i_idx < N ? atom_i_idx : 0; RealType qi = atom_i_idx < N ? charge_params[charge_param_idx_i] : 0; RealType born_radii_i = atom_i_idx < N ? born_radii[atom_i_idx] : 0; // RealType dE_dqi_accum = 0; RealType born_force_i_accum = 0; int atom_j_idx = blockIdx.y * 32 + threadIdx.x; RealType du_dl_j = 0; int lambda_plane_j = 0; int lambda_offset_j = 0; if (atom_j_idx < N) { lambda_plane_j = lambda_plane_idxs[atom_j_idx]; lambda_offset_j = lambda_offset_idxs[atom_j_idx]; } RealType cj[3]; RealType gj[3] = {0}; for (int d = 0; d < 3; d++) { cj[d] = atom_j_idx < N ? coords[atom_j_idx * 3 + d] : 0; } int charge_param_idx_j = atom_j_idx < N ? atom_j_idx : 0; RealType qj = atom_j_idx < N ? charge_params[charge_param_idx_j] : 0; RealType born_radii_j = atom_j_idx < N ? born_radii[atom_j_idx] : 0; // RealType dE_dqj_accum = 0; RealType born_force_j_accum = 0; RealType obc_energy = 0; // In inference mode, we don't care about gradients with respect to parameters. for (int round = 0; round < 32; round++) { RealType dxs[4]; for (int d = 0; d < 3; d++) { dxs[d] = ci[d] - cj[d]; } RealType delta_lambda = (lambda_plane_i - lambda_plane_j) * cutoff + (lambda_offset_i - lambda_offset_j) * lambda; dxs[3] = delta_lambda; RealType r = fast_vec_norm<RealType, 4>(dxs); RealType r2 = r * r; // RealType rInverse = 1/r; // RealType rInverse = fast_vec_rnorm<RealType, D>(dxs); if (atom_j_idx <= atom_i_idx && r < cutoff && atom_j_idx < N && atom_i_idx < N) { RealType alpha2_ij = born_radii_i * born_radii_j; RealType D_ij = r2 / (4 * alpha2_ij); RealType expTerm = exp(-D_ij); RealType denominator2 = r2 + alpha2_ij * expTerm; RealType denominator = sqrt(denominator2); RealType Gpol = (prefactor * qi * qj) / denominator; RealType dGpol_dr = -Gpol * (1 - expTerm / 4) / denominator2; RealType dGpol_dalpha2_ij = -(Gpol / 2) * expTerm * (1 + D_ij) / denominator2; RealType energy = Gpol; // RealType inner = (PI*pow(r,8))/(BOXSIZE); // RealType sw = cos(inner); // sw = sw*sw; // RealType dsw_dr = -(RADII_EXP)*pow(r, RADII_EXP-1)*(PI/cutoff)*sin(inner)*cos(inner); RealType sw = 1; RealType dsw_dr = 0; RealType dsw_dr_dot_E = dsw_dr * energy; if (atom_i_idx != atom_j_idx) { energy = sw * energy; born_force_j_accum += sw * dGpol_dalpha2_ij * born_radii_i; for (int d = 0; d < 3; d++) { // gi[d] += dxs[d]*dGpol_dr; // gj[d] -= dxs[d]*dGpol_dr; gi[d] += dxs[d] * sw * dGpol_dr + dsw_dr_dot_E * dxs[d] / r; gj[d] -= dxs[d] * sw * dGpol_dr + dsw_dr_dot_E * dxs[d] / r; } int dw_i = lambda_offset_i; int dw_j = lambda_offset_j; // du_dl_i += dxs[3]*dGpol_dr*dw_i; // du_dl_j -= dxs[3]*dGpol_dr*dw_j; du_dl_i += (dxs[3] * sw * dGpol_dr + dsw_dr_dot_E * dxs[3] / r) * dw_i; du_dl_j -= (dxs[3] * sw * dGpol_dr + dsw_dr_dot_E * dxs[3] / r) * dw_j; } else { energy *= 0.5; } obc_energy += energy; born_force_i_accum += sw * dGpol_dalpha2_ij * born_radii_j; } const int srcLane = (threadIdx.x + 1) % WARPSIZE; atom_j_idx = __shfl_sync(0xffffffff, atom_j_idx, srcLane); qj = __shfl_sync(0xffffffff, qj, srcLane); born_radii_j = __shfl_sync(0xffffffff, born_radii_j, srcLane); born_force_j_accum = __shfl_sync(0xffffffff, born_force_j_accum, srcLane); for (size_t d = 0; d < 3; d++) { cj[d] = __shfl_sync(0xffffffff, cj[d], srcLane); gj[d] = __shfl_sync(0xffffffff, gj[d], srcLane); } lambda_plane_j = __shfl_sync(0xffffffff, lambda_plane_j, srcLane); lambda_offset_j = __shfl_sync(0xffffffff, lambda_offset_j, srcLane); du_dl_j = __shfl_sync(0xffffffff, du_dl_j, srcLane); } for (int d = 0; d < 3; d++) { if (atom_i_idx < N) { atomicAdd( out_forces + atom_i_idx * 3 + d, static_cast<unsigned long long>((long long)(gi[d] * FIXED_EXPONENT))); } if (atom_j_idx < N) { atomicAdd( out_forces + atom_j_idx * 3 + d, static_cast<unsigned long long>((long long)(gj[d] * FIXED_EXPONENT))); } } if (atom_i_idx < N) { atomicAdd( bornForces + atom_i_idx, static_cast<unsigned long long>((long long)(born_force_i_accum * FIXED_EXPONENT_BORN_FORCES))); } if (atom_j_idx < N) { atomicAdd( bornForces + atom_j_idx, static_cast<unsigned long long>((long long)(born_force_j_accum * FIXED_EXPONENT_BORN_FORCES))); } atomicAdd(du_dl, du_dl_i + du_dl_j); atomicAdd(out_energy, obc_energy); } __global__ void k_reduce_born_radii( const int N, const double *gb_params, const double dielectric_offset, const double alpha_obc, const double beta_obc, const double gamma_obc, const unsigned long long *born_psi, double *born_radii, double *obc_chain) { int atom_i_idx = blockIdx.x * 32 + threadIdx.x; if (atom_i_idx >= N) { return; } int radii_param_idx_i = atom_i_idx < N ? atom_i_idx * 2 + 0 : 0; double radiusI = atom_i_idx < N ? gb_params[radii_param_idx_i] : 0; double offsetRadiusI = radiusI - dielectric_offset; double sum = static_cast<double>(static_cast<long long>(born_psi[atom_i_idx])) / FIXED_BORN_PSI; sum *= offsetRadiusI / 2; double sum2 = sum * sum; double sum3 = sum * sum2; double inner = alpha_obc * sum - beta_obc * sum2 + gamma_obc * sum3; double tanhSum = tanh(inner); if (atom_i_idx < N) { double br = offsetRadiusI * radiusI / (radiusI - offsetRadiusI * tanhSum); born_radii[atom_i_idx] = br; obc_chain[atom_i_idx] = br * br * (1 - tanhSum * tanhSum) * (alpha_obc - 2 * beta_obc * sum + 3 * gamma_obc * sum2) / radiusI; } } // this is entirely done in double precision __global__ void k_reduce_born_forces( const int N, const double *gb_params, const double *born_radii, const double *obc_chain, const double surface_tension, // surface area factor const double probe_radius, unsigned long long *bornForces, // dU/Ri double *energy) { // surface area term int atomI = blockIdx.x * 32 + threadIdx.x; if (atomI >= N) { return; } // double radii_derivs = 0; double born_force_i = static_cast<double>(static_cast<long long>(bornForces[atomI])) / FIXED_EXPONENT_BORN_FORCES; double br = born_radii[atomI]; // ACE term if (br > 0.0) { int atomic_radii_idx_i = atomI * 2 + 0; // double atomic_radii = params[atomic_radii_idxs[atomI]]; double atomic_radii = gb_params[atomic_radii_idx_i]; double r = atomic_radii + probe_radius; double ratio6 = pow(atomic_radii / born_radii[atomI], 6.0); double saTerm = surface_tension * r * r * ratio6; atomicAdd(energy, saTerm); born_force_i -= 6.0 * saTerm / born_radii[atomI]; } born_force_i *= obc_chain[atomI]; bornForces[atomI] = static_cast<unsigned long long>((long long)(born_force_i * FIXED_EXPONENT_BORN_FORCES)); } template <typename RealType> __global__ void k_compute_born_energy_and_forces( const int N, const double *coords, const double lambda, const int *lambda_plane_idxs, // 0 or 1, which non-interacting plane we're on const int *lambda_offset_idxs, // 0 or 1, how much we offset from the plane by cutoff const double *gb_params, const double *born_radii, const double *obc_chain, const double dielectric_offset, const double cutoff, const double *block_bounds_ctr, const double *block_bounds_ext, const unsigned long long *bornForces, unsigned long long *out_forces, double *du_dl) { RealType block_d2ij = 0; for (int d = 0; d < 3; d++) { RealType block_row_ctr = block_bounds_ctr[blockIdx.x * 3 + d]; RealType block_col_ctr = block_bounds_ctr[blockIdx.y * 3 + d]; RealType block_row_ext = block_bounds_ext[blockIdx.x * 3 + d]; RealType block_col_ext = block_bounds_ext[blockIdx.y * 3 + d]; RealType dx = max(0.0, fabs(block_row_ctr - block_col_ctr) - (block_row_ext + block_col_ext)); block_d2ij += dx * dx; } if (block_d2ij > cutoff * cutoff) { return; } int atom_i_idx = blockIdx.x * 32 + threadIdx.x; RealType du_dl_i = 0; int lambda_plane_i = 0; int lambda_offset_i = 0; if (atom_i_idx < N) { lambda_plane_i = lambda_plane_idxs[atom_i_idx]; lambda_offset_i = lambda_offset_idxs[atom_i_idx]; } RealType ci[3]; RealType dPsi_dx_i[3] = {0}; for (int d = 0; d < 3; d++) { ci[d] = atom_i_idx < N ? coords[atom_i_idx * 3 + d] : 0; } int atomic_radii_idx_i = atom_i_idx < N ? atom_i_idx * 2 + 0 : 0; RealType radiusI = atom_i_idx < N ? gb_params[atomic_radii_idx_i] : 0; RealType born_force_i = atom_i_idx < N ? static_cast<RealType>(static_cast<long long>(bornForces[atom_i_idx])) / FIXED_EXPONENT_BORN_FORCES : 0; // RealType born_radii_i = atom_i_idx < N ? born_radii[atom_i_idx] : 0; // RealType dPsi_dri = 0; int atom_j_idx = blockIdx.y * 32 + threadIdx.x; RealType du_dl_j = 0; int lambda_plane_j = 0; int lambda_offset_j = 0; if (atom_j_idx < N) { lambda_plane_j = lambda_plane_idxs[atom_j_idx]; lambda_offset_j = lambda_offset_idxs[atom_j_idx]; } RealType cj[3]; RealType dPsi_dx_j[3] = {0}; for (int d = 0; d < 3; d++) { cj[d] = atom_j_idx < N ? coords[atom_j_idx * 3 + d] : 0; } int atomic_radii_idx_j = atom_j_idx < N ? atom_j_idx * 2 + 0 : 0; RealType radiusJ = atom_j_idx < N ? gb_params[atomic_radii_idx_j] : 0; int scale_factor_idx_j = atom_j_idx < N ? atom_j_idx * 2 + 1 : 0; RealType scaleFactorJ = atom_j_idx < N ? gb_params[scale_factor_idx_j] : 0; RealType born_radii_j = atom_j_idx < N ? born_radii[atom_j_idx] : 0; const RealType dielectricOffset = dielectric_offset; RealType offsetRadiusI = radiusI - dielectricOffset; // int atomI = atom_i_idx; // int atomJ = atom_j_idx; for (int round = 0; round < 32; round++) { RealType dxs[4]; for (int d = 0; d < 3; d++) { dxs[d] = ci[d] - cj[d]; } // RealType delta_lambda = lambda_i - lambda_j; RealType delta_lambda = (lambda_plane_i - lambda_plane_j) * cutoff + (lambda_offset_i - lambda_offset_j) * lambda; dxs[3] = delta_lambda; RealType r = fast_vec_norm<RealType, 4>(dxs); if (atom_j_idx != atom_i_idx && r < cutoff && atom_j_idx < N && atom_i_idx < N) { RealType rInverse = 1 / r; // RealType rInverse = fast_vec_rnorm<RealType, D>(dxs); // radius w/ dielectric offset applied RealType offsetRadiusJ = radiusJ - dielectricOffset; RealType scaledRadiusJ = offsetRadiusJ * scaleFactorJ; RealType scaledRadiusJ2 = scaledRadiusJ * scaledRadiusJ; RealType rScaledRadiusJ = r + scaledRadiusJ; if (offsetRadiusI < rScaledRadiusJ) { // RealType l_ij = offsetRadiusI > abs(rSubScaledRadiusJ) ? offsetRadiusI : abs(rSubScaledRadiusJ); // l_ij = 1.0/l_ij; // RealType u_ij = 1.0/rScaledRadiusJ; // RealType l_ij2 = l_ij*l_ij; // RealType u_ij2 = u_ij*u_ij; // RealType rInverse = 1.0/r; // RealType r2Inverse = rInverse*rInverse; // RealType t3 = 0.125*(1.0 + scaledRadiusJ2*r2Inverse)*(l_ij2 - u_ij2) + 0.25*log(u_ij/l_ij)*r2Inverse; RealType rSubScaledRadiusJ = r - scaledRadiusJ; RealType rSubScaledRadiusJ2 = rSubScaledRadiusJ * rSubScaledRadiusJ; RealType arss = abs(rSubScaledRadiusJ); RealType l_ij = offsetRadiusI > arss ? offsetRadiusI : arss; l_ij = 1 / l_ij; RealType l_ij2 = l_ij * l_ij; RealType u_ij = 1 / rScaledRadiusJ; RealType u_ij2 = u_ij * u_ij; // original expression // RealType term = l_ij - u_ij + 0.25*(u_ij2 - l_ij2)*t1 + (0.5*rInverse*ratio); RealType dl_dr = offsetRadiusI > arss ? 0 : -l_ij2 * sign(rSubScaledRadiusJ); RealType du_dr = -u_ij2 * sign(rScaledRadiusJ); RealType t1 = r - scaledRadiusJ2 * rInverse; RealType dt1_dr = (1 + scaledRadiusJ2 * rInverse * rInverse); RealType ratio = log(u_ij / l_ij); RealType de1 = dl_dr - du_dr; // we may need three separate accumulators for precision RealType de2 = (u_ij * du_dr - l_ij * dl_dr) * t1; RealType de3 = (u_ij2 - l_ij2) * dt1_dr / 2; RealType de4 = rInverse * (rInverse * ratio - (du_dr / u_ij - dl_dr / l_ij)); RealType de = de1 + (de2 + de3 - de4) / 2; // this is the derivative with respect to r if (offsetRadiusI > arss) { // if(offsetRadiusI < (scaledRadiusJ - r)) { // de += 0; // } } else { if (offsetRadiusI >= rSubScaledRadiusJ) { de += 2 * sign(rSubScaledRadiusJ) / rSubScaledRadiusJ2; } } // needed for switch rescale RealType term = l_ij - u_ij + r * (u_ij2 - l_ij2) / 4 + scaledRadiusJ * scaledRadiusJ * rInverse * (l_ij2 - u_ij2) / 4 + rInverse * ratio / 2; if (offsetRadiusI < (scaledRadiusJ - r)) { RealType radiusIInverse = 1 / offsetRadiusI; term += 2 * (radiusIInverse - l_ij); } // RealType inner = (PI*pow(r, RADII_EXP))/(BOXSIZE); // RealType sw = cos(inner); // sw = sw*sw; // RealType dsw_dr = -(RADII_EXP)*pow(r, RADII_EXP-1)*(PI/cutoff)*sin(inner)*cos(inner); RealType sw = 1; RealType dsw_dr = 0; de = dsw_dr * term + de * sw; de *= born_force_i * offsetRadiusI / 2; for (int d = 0; d < 3; d++) { RealType deriv = dxs[d] * de * rInverse; dPsi_dx_i[d] += deriv; dPsi_dx_j[d] -= deriv; } int dw_i = lambda_offset_i; int dw_j = lambda_offset_j; du_dl_i += dxs[3] * de * dw_i * rInverse; du_dl_j -= dxs[3] * de * dw_j * rInverse; } } const int srcLane = (threadIdx.x + 1) % WARPSIZE; atom_j_idx = __shfl_sync(0xffffffff, atom_j_idx, srcLane); born_radii_j = __shfl_sync(0xffffffff, born_radii_j, srcLane); radiusJ = __shfl_sync(0xffffffff, radiusJ, srcLane); scaleFactorJ = __shfl_sync(0xffffffff, scaleFactorJ, srcLane); atomic_radii_idx_i = __shfl_sync(0xffffffff, atomic_radii_idx_i, srcLane); scale_factor_idx_j = __shfl_sync(0xffffffff, scale_factor_idx_j, srcLane); for (int d = 0; d < 3; d++) { cj[d] = __shfl_sync(0xffffffff, cj[d], srcLane); dPsi_dx_j[d] = __shfl_sync(0xffffffff, dPsi_dx_j[d], srcLane); } lambda_plane_j = __shfl_sync(0xffffffff, lambda_plane_j, srcLane); lambda_offset_j = __shfl_sync(0xffffffff, lambda_offset_j, srcLane); du_dl_j = __shfl_sync(0xffffffff, du_dl_j, srcLane); } for (int d = 0; d < 3; d++) { if (atom_i_idx < N) { atomicAdd( out_forces + atom_i_idx * 3 + d, static_cast<unsigned long long>((long long)(dPsi_dx_i[d] * FIXED_EXPONENT))); } if (atom_j_idx < N) { atomicAdd( out_forces + atom_j_idx * 3 + d, static_cast<unsigned long long>((long long)(dPsi_dx_j[d] * FIXED_EXPONENT))); } } atomicAdd(du_dl, du_dl_i + du_dl_j); }
the_stack
\brief Unit tests for thread-level GEMM */ #include "../../common/cutlass_unit_test.h" #include "cutlass/gemm/thread/mma.h" #include "testbed.h" ///////////////////////////////////////////////////////////////////////////////////////////////// // // Compute capability SM60 // TEST(SM60_Hgemm_thread, col_row_col_1x1x16) { test::gemm::thread::Testbed< cutlass::gemm::GemmShape<1, 1, 16>, cutlass::half_t, cutlass::layout::ColumnMajor, cutlass::half_t, cutlass::layout::RowMajor, cutlass::half_t, cutlass::layout::ColumnMajor >().run(); } TEST(SM60_Hgemm_thread, row_col_row_1x1x16) { test::gemm::thread::Testbed< cutlass::gemm::GemmShape<1, 1, 16>, cutlass::half_t, cutlass::layout::RowMajor, cutlass::half_t, cutlass::layout::ColumnMajor, cutlass::half_t, cutlass::layout::RowMajor >().run(); } TEST(SM60_Hgemm_thread, row_col_col_1x3x8) { test::gemm::thread::Testbed< cutlass::gemm::GemmShape<1, 3, 8>, cutlass::half_t, cutlass::layout::RowMajor, cutlass::half_t, cutlass::layout::ColumnMajor, cutlass::half_t, cutlass::layout::ColumnMajor >().run(); } TEST(SM60_Hgemm_thread, row_row_row_7x8x3) { test::gemm::thread::Testbed< cutlass::gemm::GemmShape<7, 8, 3>, cutlass::half_t, cutlass::layout::RowMajor, cutlass::half_t, cutlass::layout::RowMajor, cutlass::half_t, cutlass::layout::RowMajor >().run(); } TEST(SM60_Hgemm_thread, row_col_row_7x8x3) { test::gemm::thread::Testbed< cutlass::gemm::GemmShape<7, 8, 3>, cutlass::half_t, cutlass::layout::RowMajor, cutlass::half_t, cutlass::layout::ColumnMajor, cutlass::half_t, cutlass::layout::RowMajor >().run(); } TEST(SM60_Hgemm_thread, col_row_row_7x8x3) { test::gemm::thread::Testbed< cutlass::gemm::GemmShape<7, 8, 3>, cutlass::half_t, cutlass::layout::ColumnMajor, cutlass::half_t, cutlass::layout::RowMajor, cutlass::half_t, cutlass::layout::RowMajor >().run(); } TEST(SM60_Hgemm_thread, col_col_row_7x8x3) { test::gemm::thread::Testbed< cutlass::gemm::GemmShape<7, 8, 3>, cutlass::half_t, cutlass::layout::ColumnMajor, cutlass::half_t, cutlass::layout::ColumnMajor, cutlass::half_t, cutlass::layout::RowMajor >().run(); } TEST(SM60_Hgemm_thread, row_row_row_7x8x4) { test::gemm::thread::Testbed< cutlass::gemm::GemmShape<7, 8, 4>, cutlass::half_t, cutlass::layout::RowMajor, cutlass::half_t, cutlass::layout::RowMajor, cutlass::half_t, cutlass::layout::RowMajor >().run(); } TEST(SM60_Hgemm_thread, row_col_row_7x8x4) { test::gemm::thread::Testbed< cutlass::gemm::GemmShape<7, 8, 4>, cutlass::half_t, cutlass::layout::RowMajor, cutlass::half_t, cutlass::layout::ColumnMajor, cutlass::half_t, cutlass::layout::RowMajor >().run(); } TEST(SM60_Hgemm_thread, col_row_row_7x8x4) { test::gemm::thread::Testbed< cutlass::gemm::GemmShape<7, 8, 4>, cutlass::half_t, cutlass::layout::ColumnMajor, cutlass::half_t, cutlass::layout::RowMajor, cutlass::half_t, cutlass::layout::RowMajor >().run(); } TEST(SM60_Hgemm_thread, col_col_row_7x8x4) { test::gemm::thread::Testbed< cutlass::gemm::GemmShape<7, 8, 4>, cutlass::half_t, cutlass::layout::ColumnMajor, cutlass::half_t, cutlass::layout::ColumnMajor, cutlass::half_t, cutlass::layout::RowMajor >().run(); } TEST(SM60_Hgemm_thread, row_row_col_16x3x3) { test::gemm::thread::Testbed< cutlass::gemm::GemmShape<16, 3, 3>, cutlass::half_t, cutlass::layout::RowMajor, cutlass::half_t, cutlass::layout::RowMajor, cutlass::half_t, cutlass::layout::ColumnMajor >().run(); } TEST(SM60_Hgemm_thread, row_col_col_16x3x3) { test::gemm::thread::Testbed< cutlass::gemm::GemmShape<16, 3, 3>, cutlass::half_t, cutlass::layout::RowMajor, cutlass::half_t, cutlass::layout::ColumnMajor, cutlass::half_t, cutlass::layout::ColumnMajor >().run(); } TEST(SM60_Hgemm_thread, col_row_col_16x3x3) { test::gemm::thread::Testbed< cutlass::gemm::GemmShape<16, 3, 3>, cutlass::half_t, cutlass::layout::ColumnMajor, cutlass::half_t, cutlass::layout::RowMajor, cutlass::half_t, cutlass::layout::ColumnMajor >().run(); } TEST(SM60_Hgemm_thread, col_col_col_16x3x3) { test::gemm::thread::Testbed< cutlass::gemm::GemmShape<16, 3, 3>, cutlass::half_t, cutlass::layout::ColumnMajor, cutlass::half_t, cutlass::layout::ColumnMajor, cutlass::half_t, cutlass::layout::ColumnMajor >().run(); } TEST(SM60_Hgemm_thread, row_row_col_16x3x4) { test::gemm::thread::Testbed< cutlass::gemm::GemmShape<16, 3, 4>, cutlass::half_t, cutlass::layout::RowMajor, cutlass::half_t, cutlass::layout::RowMajor, cutlass::half_t, cutlass::layout::ColumnMajor >().run(); } TEST(SM60_Hgemm_thread, row_col_col_16x3x4) { test::gemm::thread::Testbed< cutlass::gemm::GemmShape<16, 3, 4>, cutlass::half_t, cutlass::layout::RowMajor, cutlass::half_t, cutlass::layout::ColumnMajor, cutlass::half_t, cutlass::layout::ColumnMajor >().run(); } TEST(SM60_Hgemm_thread, col_row_col_16x3x4) { test::gemm::thread::Testbed< cutlass::gemm::GemmShape<16, 3, 4>, cutlass::half_t, cutlass::layout::ColumnMajor, cutlass::half_t, cutlass::layout::RowMajor, cutlass::half_t, cutlass::layout::ColumnMajor >().run(); } TEST(SM60_Hgemm_thread, col_col_col_16x3x4) { test::gemm::thread::Testbed< cutlass::gemm::GemmShape<16, 3, 4>, cutlass::half_t, cutlass::layout::ColumnMajor, cutlass::half_t, cutlass::layout::ColumnMajor, cutlass::half_t, cutlass::layout::ColumnMajor >().run(); } TEST(SM60_Hgemm_thread, row_row_row_16x8x3) { test::gemm::thread::Testbed< cutlass::gemm::GemmShape<16, 8, 3>, cutlass::half_t, cutlass::layout::RowMajor, cutlass::half_t, cutlass::layout::RowMajor, cutlass::half_t, cutlass::layout::RowMajor >().run(); } TEST(SM60_Hgemm_thread, row_row_col_16x8x3) { test::gemm::thread::Testbed< cutlass::gemm::GemmShape<16, 8, 3>, cutlass::half_t, cutlass::layout::RowMajor, cutlass::half_t, cutlass::layout::RowMajor, cutlass::half_t, cutlass::layout::ColumnMajor >().run(); } TEST(SM60_Hgemm_thread, row_col_row_16x8x3) { test::gemm::thread::Testbed< cutlass::gemm::GemmShape<16, 8, 3>, cutlass::half_t, cutlass::layout::RowMajor, cutlass::half_t, cutlass::layout::ColumnMajor, cutlass::half_t, cutlass::layout::RowMajor >().run(); }TEST(SM60_Hgemm_thread, row_col_col_16x8x3) { test::gemm::thread::Testbed< cutlass::gemm::GemmShape<16, 8, 3>, cutlass::half_t, cutlass::layout::RowMajor, cutlass::half_t, cutlass::layout::ColumnMajor, cutlass::half_t, cutlass::layout::ColumnMajor >().run(); } TEST(SM60_Hgemm_thread, col_row_row_16x8x3) { test::gemm::thread::Testbed< cutlass::gemm::GemmShape<16, 8, 3>, cutlass::half_t, cutlass::layout::ColumnMajor, cutlass::half_t, cutlass::layout::RowMajor, cutlass::half_t, cutlass::layout::RowMajor >().run(); } TEST(SM60_Hgemm_thread, col_row_col_16x8x3) { test::gemm::thread::Testbed< cutlass::gemm::GemmShape<16, 8, 3>, cutlass::half_t, cutlass::layout::ColumnMajor, cutlass::half_t, cutlass::layout::RowMajor, cutlass::half_t, cutlass::layout::ColumnMajor >().run(); } TEST(SM60_Hgemm_thread, col_col_row_16x8x3) { test::gemm::thread::Testbed< cutlass::gemm::GemmShape<16, 8, 3>, cutlass::half_t, cutlass::layout::ColumnMajor, cutlass::half_t, cutlass::layout::ColumnMajor, cutlass::half_t, cutlass::layout::RowMajor >().run(); } TEST(SM60_Hgemm_thread, col_col_col_16x8x3) { test::gemm::thread::Testbed< cutlass::gemm::GemmShape<16, 8, 3>, cutlass::half_t, cutlass::layout::ColumnMajor, cutlass::half_t, cutlass::layout::ColumnMajor, cutlass::half_t, cutlass::layout::ColumnMajor >().run(); } TEST(SM60_Hgemm_thread, row_row_row_16x8x4) { test::gemm::thread::Testbed< cutlass::gemm::GemmShape<16, 8, 4>, cutlass::half_t, cutlass::layout::RowMajor, cutlass::half_t, cutlass::layout::RowMajor, cutlass::half_t, cutlass::layout::RowMajor >().run(); } TEST(SM60_Hgemm_thread, row_row_col_16x8x4) { test::gemm::thread::Testbed< cutlass::gemm::GemmShape<16, 8, 4>, cutlass::half_t, cutlass::layout::RowMajor, cutlass::half_t, cutlass::layout::RowMajor, cutlass::half_t, cutlass::layout::ColumnMajor >().run(); } TEST(SM60_Hgemm_thread, row_col_row_16x8x4) { test::gemm::thread::Testbed< cutlass::gemm::GemmShape<16, 8, 4>, cutlass::half_t, cutlass::layout::RowMajor, cutlass::half_t, cutlass::layout::ColumnMajor, cutlass::half_t, cutlass::layout::RowMajor >().run(); } TEST(SM60_Hgemm_thread, row_col_col_16x8x4) { test::gemm::thread::Testbed< cutlass::gemm::GemmShape<16, 8, 4>, cutlass::half_t, cutlass::layout::RowMajor, cutlass::half_t, cutlass::layout::ColumnMajor, cutlass::half_t, cutlass::layout::ColumnMajor >().run(); } TEST(SM60_Hgemm_thread, col_row_row_16x8x4) { test::gemm::thread::Testbed< cutlass::gemm::GemmShape<16, 8, 4>, cutlass::half_t, cutlass::layout::ColumnMajor, cutlass::half_t, cutlass::layout::RowMajor, cutlass::half_t, cutlass::layout::RowMajor >().run(); } TEST(SM60_Hgemm_thread, col_row_col_16x8x4) { test::gemm::thread::Testbed< cutlass::gemm::GemmShape<16, 8, 4>, cutlass::half_t, cutlass::layout::ColumnMajor, cutlass::half_t, cutlass::layout::RowMajor, cutlass::half_t, cutlass::layout::ColumnMajor >().run(); } TEST(SM60_Hgemm_thread, col_col_row_16x8x4) { test::gemm::thread::Testbed< cutlass::gemm::GemmShape<16, 8, 4>, cutlass::half_t, cutlass::layout::ColumnMajor, cutlass::half_t, cutlass::layout::ColumnMajor, cutlass::half_t, cutlass::layout::RowMajor >().run(); } TEST(SM60_Hgemm_thread, col_col_col_16x8x4) { test::gemm::thread::Testbed< cutlass::gemm::GemmShape<16, 8, 4>, cutlass::half_t, cutlass::layout::ColumnMajor, cutlass::half_t, cutlass::layout::ColumnMajor, cutlass::half_t, cutlass::layout::ColumnMajor >().run(); } /////////////////////////////////////////////////////////////////////////////////////////////////
the_stack
#include "nnnormalizelp.hpp" #include "datacu.hpp" #include <vector> #include <algorithm> // ------------------------------------------------------------------- // Helpers // ------------------------------------------------------------------- struct GPUVisitPattern { size_t normsVolume ; size_t inputVolume ; int dims [4] {1,1,1,1} ; int strides [4] {0,0,0,0} ; int ndims [4] {1,1,1,1} ; int nstrides [4] {0,0,0,0} ; } ; GPUVisitPattern getGPUVisitPatternForInput(NormalizeLp const & op, vl::Tensor input) { // Compute tensor geometry. int n = input.getNumDimensions() ; auto inputDimensions = std::vector<size_t>(input.getDimensions(), input.getDimensions() + n) ; assert(n <= 4) ; // Todo: relax. size_t inputVolume = 1 ; size_t normsVolume = 1 ; auto dims = std::vector<ptrdiff_t>{} ; auto steps = std::vector<ptrdiff_t>{} ; auto ndims = std::vector<ptrdiff_t>{} ; auto nstrides = std::vector<ptrdiff_t>{} ; // Find out how to traverse the reduced results as the input is // scanned from first to last element. for (int d = 0 ; d < n ; ++d) { bool squashed = (find(op.selectedDimensions.begin(), op.selectedDimensions.end(), d) != op.selectedDimensions.end()) ; if (squashed) { dims.push_back(inputDimensions[d]) ; steps.push_back(inputVolume) ; } else { ndims.push_back(inputDimensions[d]) ; nstrides.push_back(inputVolume) ; normsVolume *= inputDimensions[d] ; } inputVolume *= inputDimensions[d] ; } //cout << steps.size() << " " << inputVolume << endl ; for (int d = steps.size() ; d < 5 ; ++d) { steps.push_back(inputVolume) ; dims.push_back(1) ; } for (int d = 3 ; d >= 0 ; d--) { steps[d+1] -= steps[d] * dims[d] ; } GPUVisitPattern vp ; vp.inputVolume = inputVolume ; vp.normsVolume = normsVolume ; std::copy(dims.begin(),dims.end(),vp.dims) ; std::copy(steps.begin(),steps.end(),vp.strides) ; std::copy(ndims.begin(),ndims.end(),vp.ndims) ; std::copy(nstrides.begin(),nstrides.end(),vp.nstrides) ; return vp ; } template<typename type> __global__ void computeNorms(type * normsData, type const * inputData, type exponent, type epsilon, GPUVisitPattern vp) { int tid = threadIdx.x ; if (tid >= vp.normsVolume) { return ; } normsData += tid ; int i0 = tid % vp.ndims[0] ; tid /= vp.ndims[0] ; int i1 = tid % vp.ndims[1] ; tid /= vp.ndims[1] ; int i2 = tid % vp.ndims[2] ; tid /= vp.ndims[2] ; int i3 = tid % vp.ndims[3] ; inputData += i0 * vp.nstrides[0] + i1 * vp.nstrides[1] + i2 * vp.nstrides[2] + i3 * vp.nstrides[3] ; type value = 0 ; for (int i3 = 0 ; i3 < vp.dims[3] ; ++i3) { for (int i2 = 0 ; i2 < vp.dims[2] ; ++i2) { for (int i1 = 0 ; i1 < vp.dims[1] ; ++i1) { for (int i0 = 0 ; i0 < vp.dims[0] ; ++i0) { value = value + pow(*inputData, exponent) ; inputData += vp.strides[0] ; } inputData += vp.strides[1] ; } inputData += vp.strides[2] ; } inputData += vp.strides[3] ; } *normsData = pow(value + epsilon, static_cast<type>(1.0)/exponent) ; } template<typename type> __global__ void divideByNorms(type * outputData, type const * inputData, type const * normsData, GPUVisitPattern vp) { int tid = threadIdx.x ; if (tid >= vp.normsVolume) { return ; } normsData += tid ; int i0 = tid % vp.ndims[0] ; tid /= vp.ndims[0] ; int i1 = tid % vp.ndims[1] ; tid /= vp.ndims[1] ; int i2 = tid % vp.ndims[2] ; tid /= vp.ndims[2] ; int i3 = tid % vp.ndims[3] ; int offset = i0 * vp.nstrides[0] + i1 * vp.nstrides[1] + i2 * vp.nstrides[2] + i3 * vp.nstrides[3] ; inputData += offset ; outputData += offset ; type value = *normsData ; for (int i3 = 0 ; i3 < vp.dims[3] ; ++i3) { for (int i2 = 0 ; i2 < vp.dims[2] ; ++i2) { for (int i1 = 0 ; i1 < vp.dims[1] ; ++i1) { for (int i0 = 0 ; i0 < vp.dims[0] ; ++i0) { *outputData = *inputData / value ; inputData += vp.strides[0] ; outputData += vp.strides[0] ; } inputData += vp.strides[1] ; outputData += vp.strides[1] ; } inputData += vp.strides[2] ; outputData += vp.strides[2] ; } inputData += vp.strides[3] ; outputData += vp.strides[3] ; } } template<typename type> __global__ void computeSum(type * scratchData, type const * inputData, type const * derOutputData, GPUVisitPattern vp) { int tid = threadIdx.x ; if (tid >= vp.normsVolume) { return ; } scratchData += tid ; int i0 = tid % vp.ndims[0] ; tid /= vp.ndims[0] ; int i1 = tid % vp.ndims[1] ; tid /= vp.ndims[1] ; int i2 = tid % vp.ndims[2] ; tid /= vp.ndims[2] ; int i3 = tid % vp.ndims[3] ; int offset = i0 * vp.nstrides[0] + i1 * vp.nstrides[1] + i2 * vp.nstrides[2] + i3 * vp.nstrides[3] ; inputData += offset ; derOutputData += offset ; type value = 0 ; for (int i3 = 0 ; i3 < vp.dims[3] ; ++i3) { for (int i2 = 0 ; i2 < vp.dims[2] ; ++i2) { for (int i1 = 0 ; i1 < vp.dims[1] ; ++i1) { for (int i0 = 0 ; i0 < vp.dims[0] ; ++i0) { value += (*inputData) * (*derOutputData) ; inputData += vp.strides[0] ; derOutputData += vp.strides[0] ; } inputData += vp.strides[1] ; derOutputData += vp.strides[1] ; } inputData += vp.strides[2] ; derOutputData += vp.strides[2] ; } inputData += vp.strides[3] ; derOutputData += vp.strides[3] ; } *scratchData = value ; } template<typename type> __global__ void computeDerInput(type * derInputData, type const * inputData, type const * normsData, type const * derOutputData, type const * scratchData, type exponent, GPUVisitPattern vp) { int tid = threadIdx.x ; if (tid >= vp.normsVolume) { return ; } normsData += tid ; scratchData += tid ; int i0 = tid % vp.ndims[0] ; tid /= vp.ndims[0] ; int i1 = tid % vp.ndims[1] ; tid /= vp.ndims[1] ; int i2 = tid % vp.ndims[2] ; tid /= vp.ndims[2] ; int i3 = tid % vp.ndims[3] ; int offset = i0 * vp.nstrides[0] + i1 * vp.nstrides[1] + i2 * vp.nstrides[2] + i3 * vp.nstrides[3] ; derInputData += offset ; inputData += offset ; derOutputData += offset ; type const nv = *normsData ; type const sv = *scratchData ; for (int i3 = 0 ; i3 < vp.dims[3] ; ++i3) { for (int i2 = 0 ; i2 < vp.dims[2] ; ++i2) { for (int i1 = 0 ; i1 < vp.dims[1] ; ++i1) { for (int i0 = 0 ; i0 < vp.dims[0] ; ++i0) { type iv = *inputData ; type dov = *derOutputData ; *derInputData = dov / nv - sv * pow(iv,exponent-1) / pow(nv,exponent+1) ; derInputData += vp.strides[0] ; inputData += vp.strides[0] ; derOutputData += vp.strides[0] ; } derInputData += vp.strides[1] ; inputData += vp.strides[1] ; derOutputData += vp.strides[1] ; } derInputData += vp.strides[2] ; inputData += vp.strides[2] ; derOutputData += vp.strides[2] ; } derInputData += vp.strides[3] ; inputData += vp.strides[3] ; derOutputData += vp.strides[3] ; } } // ------------------------------------------------------------------- // GPU forward // ------------------------------------------------------------------- template<vl::DataType dataType, bool givenNorms> struct NormalizeLpForwardGPU { vl::ErrorCode operator()(NormalizeLp & op, Tensor &output, typename NormAgrument<givenNorms>::type norms, Tensor const &input) { assert(norms || !givenNorms) ; typedef typename vl::DataTypeTraits<dataType>::type type ; auto vp = getGPUVisitPatternForInput(op,input) ; // Get buffers. type const * inputData = (type const*)input.getMemory() ; type * normsData ; if (norms) { normsData = (type*)norms.getMemory() ; } else { normsData = (type*)op.context.getWorkspace (vl::VLDT_GPU, vp.normsVolume * sizeof(type)) ; } // Accumulate norms. if (!givenNorms) { computeNorms<type> <<< divideAndRoundUp(vp.normsVolume, (size_t)VL_CUDA_NUM_THREADS), VL_CUDA_NUM_THREADS >>> (normsData,inputData,op.exponent,op.epsilon,vp) ; } // Divide by them. type * outputData = (type*)output.getMemory() ; divideByNorms<type> <<< divideAndRoundUp(vp.normsVolume, (size_t)VL_CUDA_NUM_THREADS), VL_CUDA_NUM_THREADS >>> (outputData,inputData,normsData,vp) ; //cout << "n vol " << vp.normsVolume << endl ; return vl::VLE_Success ; } } ; template<vl::DataType dataType> struct NormalizeLpForward<vl::VLDT_GPU, dataType> : public NormalizeLpForwardGPU<dataType,false> { } ; template<vl::DataType dataType> struct NormalizeLpForwardWithNorms<vl::VLDT_GPU, dataType> : public NormalizeLpForwardGPU<dataType,true> { } ; // ------------------------------------------------------------------- // GPU backward // ------------------------------------------------------------------- template<vl::DataType dataType, bool givenNorms> struct NormalizeLpBackwardGPU { vl::ErrorCode operator()(NormalizeLp &op, Tensor &derInput, typename NormAgrument<givenNorms>::type norms, Tensor const &input, Tensor const& derOutput) { assert(norms || !givenNorms) ; typedef typename vl::DataTypeTraits<dataType>::type type ; auto vp = getGPUVisitPatternForInput(op,input) ; // Get buffers. size_t workspaceSize = vp.normsVolume * sizeof(type) ; type const * inputData = (type const*)input.getMemory() ; type * normsData ; if (norms) { normsData = (type*)norms.getMemory() ; } else { normsData = 0 ; workspaceSize *= 2 ; } type * scratchData = (type*)op.context.getWorkspace(vl::VLDT_GPU, workspaceSize) ; if (normsData == NULL) { normsData = scratchData + vp.normsVolume ; } // Accumulate norms. if (!givenNorms) { computeNorms<type> <<< divideAndRoundUp(vp.normsVolume, (size_t)VL_CUDA_NUM_THREADS), VL_CUDA_NUM_THREADS >>> (normsData,inputData,op.exponent,op.epsilon,vp) ; } // Compute sum(derOutput .* input). type const* derOutputData = (type const*)derOutput.getMemory() ; computeSum<type> <<< divideAndRoundUp(vp.normsVolume, (size_t)VL_CUDA_NUM_THREADS), VL_CUDA_NUM_THREADS >>> (scratchData,inputData,derOutputData,vp) ; // Compute derInputs. type * derInputData = (type*)derInput.getMemory() ; computeDerInput<type> <<< divideAndRoundUp(vp.normsVolume, (size_t)VL_CUDA_NUM_THREADS), VL_CUDA_NUM_THREADS >>> (derInputData,inputData,normsData,derOutputData,scratchData,op.exponent,vp) ; return vl::VLE_Success ; } } ; template<vl::DataType dataType> struct NormalizeLpBackward<vl::VLDT_GPU, dataType> : public NormalizeLpBackwardGPU<dataType,false> { } ; template<vl::DataType dataType> struct NormalizeLpBackwardWithNorms<vl::VLDT_GPU, dataType> : public NormalizeLpBackwardGPU<dataType,true> { } ;
the_stack
// cudaColormapFromStr cudaColormapType cudaColormapFromStr( const char* str ) { if( !str ) return COLORMAP_DEFAULT; if( strcasecmp(str, "inferno") == 0 ) return COLORMAP_INFERNO; else if( strcasecmp(str, "magma") == 0 ) return COLORMAP_MAGMA; else if( strcasecmp(str, "parula") == 0 ) return COLORMAP_PARULA; else if( strcasecmp(str, "plasma") == 0 ) return COLORMAP_PLASMA; else if( strcasecmp(str, "turbo") == 0 ) return COLORMAP_TURBO; else if( strcasecmp(str, "viridis") == 0 ) return COLORMAP_VIRIDIS; else if( strcasecmp(str, "inferno-inverted") == 0 || strcasecmp(str, "inferno_inverted") == 0 ) return COLORMAP_INFERNO_INVERTED; else if( strcasecmp(str, "magma-inverted") == 0 || strcasecmp(str, "magma_inverted") == 0 ) return COLORMAP_MAGMA_INVERTED; else if( strcasecmp(str, "parula-inverted") == 0 || strcasecmp(str, "parula_inverted") == 0 ) return COLORMAP_PARULA_INVERTED; else if( strcasecmp(str, "plasma-inverted") == 0 || strcasecmp(str, "plasma_inverted") == 0 ) return COLORMAP_PLASMA_INVERTED; else if( strcasecmp(str, "turbo-inverted") == 0 || strcasecmp(str, "turbo_inverted") == 0 ) return COLORMAP_TURBO_INVERTED; else if( strcasecmp(str, "viridis-inverted") == 0 || strcasecmp(str, "viridis_inverted") == 0 ) return COLORMAP_VIRIDIS_INVERTED; else if( strcasecmp(str, "flow") == 0 ) return COLORMAP_FLOW; else if( strcasecmp(str, "none") == 0 ) return COLORMAP_NONE; else if( strcasecmp(str, "linear") == 0 ) return COLORMAP_LINEAR; else if( strcasecmp(str, "default") == 0 ) return COLORMAP_DEFAULT; return COLORMAP_DEFAULT; } // cudaColormapToStr const char* cudaColormapToStr( cudaColormapType colormap ) { switch(colormap) { case COLORMAP_INFERNO: return "inferno"; case COLORMAP_MAGMA: return "magma"; case COLORMAP_PARULA: return "parula"; case COLORMAP_PLASMA: return "plasma"; case COLORMAP_TURBO: return "turbo"; case COLORMAP_VIRIDIS: return "viridis"; case COLORMAP_INFERNO_INVERTED: return "inferno-inverted"; case COLORMAP_MAGMA_INVERTED: return "magma-inverted"; case COLORMAP_PARULA_INVERTED: return "parula-inverted"; case COLORMAP_PLASMA_INVERTED: return "plasma-inverted"; case COLORMAP_TURBO_INVERTED: return "turbo-inverted"; case COLORMAP_VIRIDIS_INVERTED: return "viridis-inverted"; case COLORMAP_FLOW: return "flow"; case COLORMAP_NONE: return "none"; case COLORMAP_LINEAR: return "linear"; } return "default"; } static const float4 colormapPalettes[] = { { 0.372810, 0.118830, 3.535830, 255.0f }, { 0.578085, 0.323850, 4.735350, 255.0f }, { 0.841245, 0.573495, 6.180945, 255.0f }, { 1.159485, 0.864960, 7.881795, 255.0f }, { 1.531530, 1.196460, 9.832290, 255.0f }, { 1.957380, 1.564680, 11.943180, 255.0f }, { 2.438055, 1.966815, 14.061464, 255.0f }, { 2.974065, 2.401335, 16.182301, 255.0f }, { 3.568725, 2.862375, 18.324810, 255.0f }, { 4.223055, 3.349680, 20.471910, 255.0f }, { 4.940115, 3.858915, 22.635586, 255.0f }, { 5.723985, 4.385745, 24.818386, 255.0f }, { 6.577215, 4.929405, 27.012150, 255.0f }, { 7.505160, 5.483265, 29.228355, 255.0f }, { 8.513175, 6.044010, 31.466234, 255.0f }, { 9.605340, 6.609855, 33.719158, 255.0f }, { 10.774515, 7.175445, 35.990955, 255.0f }, { 11.963325, 7.732620, 38.291817, 255.0f }, { 13.169220, 8.280870, 40.609772, 255.0f }, { 14.394495, 8.815095, 42.945568, 255.0f }, { 15.641700, 9.330450, 45.298710, 255.0f }, { 16.914404, 9.818521, 47.675308, 255.0f }, { 18.214396, 10.274970, 50.070271, 255.0f }, { 19.542435, 10.685775, 52.478745, 255.0f }, { 20.900309, 11.048639, 54.898693, 255.0f }, { 22.289804, 11.361780, 57.327316, 255.0f }, { 23.712450, 11.623665, 59.761288, 255.0f }, { 25.169010, 11.832510, 62.195518, 255.0f }, { 26.660505, 11.987041, 64.624649, 255.0f }, { 28.186680, 12.086745, 67.042564, 255.0f }, { 29.747280, 12.131370, 69.441849, 255.0f }, { 31.341541, 12.121680, 71.814117, 255.0f }, { 32.967674, 12.059715, 74.150940, 255.0f }, { 34.623390, 11.948280, 76.442879, 255.0f }, { 36.306389, 11.791710, 78.681015, 255.0f }, { 38.013615, 11.594339, 80.856674, 255.0f }, { 39.741749, 11.362545, 82.961189, 255.0f }, { 41.485695, 11.106270, 84.985634, 255.0f }, { 43.241627, 10.834695, 86.922867, 255.0f }, { 45.005714, 10.557510, 88.768303, 255.0f }, { 46.774395, 10.283895, 90.517601, 255.0f }, { 48.543583, 10.023795, 92.168983, 255.0f }, { 50.310738, 9.792001, 93.721420, 255.0f }, { 52.073296, 9.596160, 95.175690, 255.0f }, { 53.829227, 9.442650, 96.533562, 255.0f }, { 55.576996, 9.336824, 97.798111, 255.0f }, { 57.314568, 9.283276, 98.972893, 255.0f }, { 59.042191, 9.283276, 100.061996, 255.0f }, { 60.759613, 9.338355, 101.070015, 255.0f }, { 62.466583, 9.449025, 102.001785, 255.0f }, { 64.163101, 9.614775, 102.861389, 255.0f }, { 65.849670, 9.835605, 103.653671, 255.0f }, { 67.526550, 10.109985, 104.382973, 255.0f }, { 69.193481, 10.435110, 105.053879, 255.0f }, { 70.851753, 10.800015, 105.669960, 255.0f }, { 72.501854, 11.202915, 106.235039, 255.0f }, { 74.144562, 11.639220, 106.752434, 255.0f }, { 75.780388, 12.104850, 107.225204, 255.0f }, { 77.409843, 12.595981, 107.656410, 255.0f }, { 79.033424, 13.108785, 108.048851, 255.0f }, { 80.651909, 13.639951, 108.404579, 255.0f }, { 82.265549, 14.186670, 108.726135, 255.0f }, { 83.874855, 14.745885, 109.015305, 255.0f }, { 85.480331, 15.315300, 109.273621, 255.0f }, { 87.082504, 15.892876, 109.503372, 255.0f }, { 88.681602, 16.477081, 109.705338, 255.0f }, { 90.278160, 17.065874, 109.881027, 255.0f }, { 91.872421, 17.657986, 110.031738, 255.0f }, { 93.464890, 18.252645, 110.158470, 255.0f }, { 95.055847, 18.848324, 110.261993, 255.0f }, { 96.645256, 19.444513, 110.343346, 255.0f }, { 98.233139, 20.040705, 110.403526, 255.0f }, { 99.820511, 20.636385, 110.442795, 255.0f }, { 101.406868, 21.230534, 110.461670, 255.0f }, { 102.992973, 21.822899, 110.460640, 255.0f }, { 104.578819, 22.413479, 110.439987, 255.0f }, { 106.164406, 23.001766, 110.400459, 255.0f }, { 107.750000, 23.587755, 110.342064, 255.0f }, { 109.335846, 24.171450, 110.265060, 255.0f }, { 110.921684, 24.752596, 110.169945, 255.0f }, { 112.507782, 25.331190, 110.056473, 255.0f }, { 114.094139, 25.907236, 109.925400, 255.0f }, { 115.681007, 26.481241, 109.776993, 255.0f }, { 117.268120, 27.052696, 109.610725, 255.0f }, { 118.855499, 27.622110, 109.426880, 255.0f }, { 120.443642, 28.189486, 109.225166, 255.0f }, { 122.032288, 28.754820, 109.006126, 255.0f }, { 123.621201, 29.318371, 108.769745, 255.0f }, { 125.210609, 29.880644, 108.515762, 255.0f }, { 126.800537, 30.441645, 108.244446, 255.0f }, { 128.390717, 31.001625, 107.955780, 255.0f }, { 129.981140, 31.561094, 107.649780, 255.0f }, { 131.571594, 32.119801, 107.326187, 255.0f }, { 133.162537, 32.678249, 106.984993, 255.0f }, { 134.753220, 33.236954, 106.626205, 255.0f }, { 136.344162, 33.796169, 106.250092, 255.0f }, { 137.934601, 34.355896, 105.856361, 255.0f }, { 139.525040, 34.916897, 105.445305, 255.0f }, { 141.114960, 35.479172, 105.016396, 255.0f }, { 142.704117, 36.043228, 104.569885, 255.0f }, { 144.292770, 36.609585, 104.105789, 255.0f }, { 145.880661, 37.178234, 103.624092, 255.0f }, { 147.467514, 37.749943, 103.124802, 255.0f }, { 149.052856, 38.324970, 102.608177, 255.0f }, { 150.637177, 38.903568, 102.073952, 255.0f }, { 152.219696, 39.486240, 101.521873, 255.0f }, { 153.800446, 40.073505, 100.952209, 255.0f }, { 155.379150, 40.665871, 100.365196, 255.0f }, { 156.955826, 41.263336, 99.760841, 255.0f }, { 158.529678, 41.866920, 99.139160, 255.0f }, { 160.100983, 42.476624, 98.500381, 255.0f }, { 161.669479, 43.092960, 97.844521, 255.0f }, { 163.234421, 43.716690, 97.171577, 255.0f }, { 164.796310, 44.348068, 96.481544, 255.0f }, { 166.354095, 44.987354, 95.774429, 255.0f }, { 167.908066, 45.635311, 95.050735, 255.0f }, { 169.457703, 46.292446, 94.310730, 255.0f }, { 171.002747, 46.959015, 93.554138, 255.0f }, { 172.542694, 47.635788, 92.781502, 255.0f }, { 174.077271, 48.322754, 91.993034, 255.0f }, { 175.606506, 49.020947, 91.188766, 255.0f }, { 177.129883, 49.730354, 90.368942, 255.0f }, { 178.646881, 50.452007, 89.533813, 255.0f }, { 180.157501, 51.185638, 88.683136, 255.0f }, { 181.660980, 51.932281, 87.817665, 255.0f }, { 183.157318, 52.692181, 86.937401, 255.0f }, { 184.646255, 53.465851, 86.043121, 255.0f }, { 186.126801, 54.253548, 85.134552, 255.0f }, { 187.599167, 55.056030, 84.212471, 255.0f }, { 189.062866, 55.873558, 83.276878, 255.0f }, { 190.517380, 56.706390, 82.328285, 255.0f }, { 191.962479, 57.555031, 81.366676, 255.0f }, { 193.397614, 58.419735, 80.392830, 255.0f }, { 194.822556, 59.301270, 79.406746, 255.0f }, { 196.236771, 60.199635, 78.408676, 255.0f }, { 197.640045, 61.115086, 77.399132, 255.0f }, { 199.031830, 62.048386, 76.378365, 255.0f }, { 200.411896, 62.999283, 75.346634, 255.0f }, { 201.779724, 63.968285, 74.304451, 255.0f }, { 203.134796, 64.955635, 73.252319, 255.0f }, { 204.477112, 65.961868, 72.190247, 255.0f }, { 205.805908, 66.986458, 71.118988, 255.0f }, { 207.120941, 68.030434, 70.038559, 255.0f }, { 208.421967, 69.093277, 68.949448, 255.0f }, { 209.708435, 70.175232, 67.851677, 255.0f }, { 210.979858, 71.276833, 66.746254, 255.0f }, { 212.236237, 72.397812, 65.632660, 255.0f }, { 213.477081, 73.538177, 64.511940, 255.0f }, { 214.702103, 74.697914, 63.383820, 255.0f }, { 215.910797, 75.877541, 62.248814, 255.0f }, { 217.102921, 77.076302, 61.107182, 255.0f }, { 218.277969, 78.294693, 59.958916, 255.0f }, { 219.435913, 79.532463, 58.804531, 255.0f }, { 220.576538, 80.789612, 57.644024, 255.0f }, { 221.699295, 82.065880, 56.477909, 255.0f }, { 222.803955, 83.361031, 55.305931, 255.0f }, { 223.890244, 84.675301, 54.128338, 255.0f }, { 224.957947, 86.008186, 52.945141, 255.0f }, { 226.007004, 87.359436, 51.756840, 255.0f }, { 227.036957, 88.729034, 50.562931, 255.0f }, { 228.047775, 90.116745, 49.363918, 255.0f }, { 229.038956, 91.522308, 48.159298, 255.0f }, { 230.010757, 92.945457, 46.949581, 255.0f }, { 230.962433, 94.385696, 45.734249, 255.0f }, { 231.894440, 95.843285, 44.513565, 255.0f }, { 232.806335, 97.317177, 43.287525, 255.0f }, { 233.697815, 98.807655, 42.055618, 255.0f }, { 234.569153, 100.314194, 40.817852, 255.0f }, { 235.419815, 101.836540, 39.574215, 255.0f }, { 236.249847, 103.374199, 38.324459, 255.0f }, { 237.059219, 104.927147, 37.068584, 255.0f }, { 237.847931, 106.494888, 35.806335, 255.0f }, { 238.615494, 108.076904, 34.537201, 255.0f }, { 239.362122, 109.673203, 33.261688, 255.0f }, { 240.087845, 111.283279, 31.979298, 255.0f }, { 240.792664, 112.906860, 30.690269, 255.0f }, { 241.476074, 114.543709, 29.394360, 255.0f }, { 242.138321, 116.193298, 28.091820, 255.0f }, { 242.779129, 117.855385, 26.782906, 255.0f }, { 243.399017, 119.529724, 25.467869, 255.0f }, { 243.997269, 121.215782, 24.147226, 255.0f }, { 244.574081, 122.913567, 22.822245, 255.0f }, { 245.129715, 124.622581, 21.493694, 255.0f }, { 245.663681, 126.342812, 20.163614, 255.0f }, { 246.176239, 128.073502, 18.834045, 255.0f }, { 246.667114, 129.814896, 17.508045, 255.0f }, { 247.136566, 131.566223, 16.189440, 255.0f }, { 247.584351, 133.327515, 14.883585, 255.0f }, { 248.010452, 135.098480, 13.597620, 255.0f }, { 248.414871, 136.878906, 12.339960, 255.0f }, { 248.797638, 138.668503, 11.122590, 255.0f }, { 249.158463, 140.466736, 9.957750, 255.0f }, { 249.497604, 142.273941, 8.907405, 255.0f }, { 249.814835, 144.089523, 8.009295, 255.0f }, { 250.110123, 145.913300, 7.269540, 255.0f }, { 250.383240, 147.744965, 6.693750, 255.0f }, { 250.634659, 149.584534, 6.288555, 255.0f }, { 250.863647, 151.431503, 6.061350, 255.0f }, { 251.070709, 153.286118, 6.019530, 255.0f }, { 251.255325, 155.147598, 6.171510, 255.0f }, { 251.417770, 157.016251, 6.525960, 255.0f }, { 251.558014, 158.891769, 7.092570, 255.0f }, { 251.675812, 160.773682, 7.881540, 255.0f }, { 251.770935, 162.661942, 8.903580, 255.0f }, { 251.843613, 164.556595, 10.170930, 255.0f }, { 251.893845, 166.457123, 11.623156, 255.0f }, { 251.921127, 168.363754, 13.196250, 255.0f }, { 251.925980, 170.275726, 14.873896, 255.0f }, { 251.907867, 172.193085, 16.640535, 255.0f }, { 251.867065, 174.115799, 18.484695, 255.0f }, { 251.803329, 176.043335, 20.397449, 255.0f }, { 251.716629, 177.975723, 22.371405, 255.0f }, { 251.606964, 179.912704, 24.401970, 255.0f }, { 251.474625, 181.854019, 26.485065, 255.0f }, { 251.319336, 183.799408, 28.618395, 255.0f }, { 251.140579, 185.748886, 30.800175, 255.0f }, { 250.939133, 187.702194, 33.029385, 255.0f }, { 250.714981, 189.658295, 35.305515, 255.0f }, { 250.468140, 191.617706, 37.629078, 255.0f }, { 250.199112, 193.579422, 40.000065, 255.0f }, { 249.908173, 195.543442, 42.420017, 255.0f }, { 249.595535, 197.508972, 44.889435, 255.0f }, { 249.261734, 199.475784, 47.410362, 255.0f }, { 248.907547, 201.443359, 49.984589, 255.0f }, { 248.532684, 203.411453, 52.614658, 255.0f }, { 248.137451, 205.379303, 55.303635, 255.0f }, { 247.724335, 207.346100, 58.052792, 255.0f }, { 247.294662, 209.310364, 60.864929, 255.0f }, { 246.850449, 211.271317, 63.742859, 255.0f }, { 246.391968, 213.228699, 66.691170, 255.0f }, { 245.920456, 215.181244, 69.714706, 255.0f }, { 245.441833, 217.126389, 72.814232, 255.0f }, { 244.959625, 219.062592, 75.992546, 255.0f }, { 244.473602, 220.989120, 79.259102, 255.0f }, { 243.992676, 222.902893, 82.613373, 255.0f }, { 243.524231, 224.800095, 86.056122, 255.0f }, { 243.069824, 226.680206, 89.599091, 255.0f }, { 242.644226, 228.537628, 93.234886, 255.0f }, { 242.254593, 230.369293, 96.969101, 255.0f }, { 241.914169, 232.170609, 100.798698, 255.0f }, { 241.636459, 233.936737, 104.719574, 255.0f }, { 241.436295, 235.662842, 108.725113, 255.0f }, { 241.329956, 237.344055, 112.803581, 255.0f }, { 241.332779, 238.975540, 116.940956, 255.0f }, { 241.460266, 240.553741, 121.117355, 255.0f }, { 241.723938, 242.076080, 125.313629, 255.0f }, { 242.133987, 243.541061, 129.504303, 255.0f }, { 242.693710, 244.949692, 133.671768, 255.0f }, { 243.404892, 246.303482, 137.792053, 255.0f }, { 244.263474, 247.605759, 141.850128, 255.0f }, { 245.262070, 248.860626, 145.840866, 255.0f }, { 246.393494, 250.072891, 149.737534, 255.0f }, { 247.646317, 251.246918, 153.549271, 255.0f }, { 249.010300, 252.387024, 157.273804, 255.0f }, { 250.475540, 253.497787, 160.909348, 255.0f }, { 252.032318, 254.582809, 164.455612, 255.0f }, { 0.372810, 0.118830, 3.535830, 255.0f }, { 0.575790, 0.330225, 4.674405, 255.0f }, { 0.836145, 0.587775, 6.045540, 255.0f }, { 1.150560, 0.889950, 7.641075, 255.0f }, { 1.517250, 1.234965, 9.468149, 255.0f }, { 1.934940, 1.620780, 11.468115, 255.0f }, { 2.403630, 2.045610, 13.475220, 255.0f }, { 2.923575, 2.506140, 15.491250, 255.0f }, { 3.495540, 3.001605, 17.510086, 255.0f }, { 4.119780, 3.529200, 19.533766, 255.0f }, { 4.797825, 4.086630, 21.568920, 255.0f }, { 5.531460, 4.671600, 23.615551, 255.0f }, { 6.321960, 5.282325, 25.672380, 255.0f }, { 7.171365, 5.916255, 27.740685, 255.0f }, { 8.082479, 6.570075, 29.826077, 255.0f }, { 9.057600, 7.241235, 31.928295, 255.0f }, { 10.100040, 7.927950, 34.046326, 255.0f }, { 11.176650, 8.626650, 36.180927, 255.0f }, { 12.255810, 9.334785, 38.333385, 255.0f }, { 13.341600, 10.048785, 40.504456, 255.0f }, { 14.436825, 10.750800, 42.698730, 255.0f }, { 15.541995, 11.422470, 44.912895, 255.0f }, { 16.659149, 12.066090, 47.147461, 255.0f }, { 17.789822, 12.680130, 49.402428, 255.0f }, { 18.935535, 13.264335, 51.678299, 255.0f }, { 20.097824, 13.816920, 53.975086, 255.0f }, { 21.278730, 14.337376, 56.292522, 255.0f }, { 22.479525, 14.823915, 58.630108, 255.0f }, { 23.701996, 15.275520, 60.986820, 255.0f }, { 24.947414, 15.690405, 63.361633, 255.0f }, { 26.217825, 16.067551, 65.752777, 255.0f }, { 27.514246, 16.405426, 68.158699, 255.0f }, { 28.838970, 16.700459, 70.579918, 255.0f }, { 30.193275, 16.952145, 73.011856, 255.0f }, { 31.577415, 17.160225, 75.449150, 255.0f }, { 32.991901, 17.323423, 77.887962, 255.0f }, { 34.438515, 17.439705, 80.324997, 255.0f }, { 35.918789, 17.506771, 82.757187, 255.0f }, { 37.430176, 17.528189, 85.172806, 255.0f }, { 38.973946, 17.502434, 87.568016, 255.0f }, { 40.549587, 17.430271, 89.935440, 255.0f }, { 42.153542, 17.317305, 92.263077, 255.0f }, { 43.786812, 17.162775, 94.546600, 255.0f }, { 45.444061, 16.976879, 96.771736, 255.0f }, { 47.124256, 16.761662, 98.933121, 255.0f }, { 48.822300, 16.528591, 101.018761, 255.0f }, { 50.535133, 16.284811, 103.022301, 255.0f }, { 52.258427, 16.041286, 104.936073, 255.0f }, { 53.988087, 15.807961, 106.754982, 255.0f }, { 55.720558, 15.595290, 108.474960, 255.0f }, { 57.452007, 15.413475, 110.094215, 255.0f }, { 59.179634, 15.271695, 111.612221, 255.0f }, { 60.900631, 15.176835, 113.030281, 255.0f }, { 62.613464, 15.134760, 114.351181, 255.0f }, { 64.316101, 15.150826, 115.578239, 255.0f }, { 66.008537, 15.225030, 116.716049, 255.0f }, { 67.688980, 15.360435, 117.769203, 255.0f }, { 69.358467, 15.553470, 118.743301, 255.0f }, { 71.015709, 15.804390, 119.643448, 255.0f }, { 72.662506, 16.107840, 120.475006, 255.0f }, { 74.298332, 16.461016, 121.242805, 255.0f }, { 75.923706, 16.859837, 121.951965, 255.0f }, { 77.540649, 17.297926, 122.607063, 255.0f }, { 79.147415, 17.774010, 123.212433, 255.0f }, { 80.746773, 18.280951, 123.771896, 255.0f }, { 82.339249, 18.814409, 124.289040, 255.0f }, { 83.924065, 19.372860, 124.768181, 255.0f }, { 85.503540, 19.950180, 125.211121, 255.0f }, { 87.077911, 20.543819, 125.620903, 255.0f }, { 88.647186, 21.151230, 126.000854, 255.0f }, { 90.212112, 21.770115, 126.352760, 255.0f }, { 91.773987, 22.396904, 126.678391, 255.0f }, { 93.333061, 23.030069, 126.979797, 255.0f }, { 94.889580, 23.668081, 127.258514, 255.0f }, { 96.443802, 24.309660, 127.517082, 255.0f }, { 97.996246, 24.953026, 127.755516, 255.0f }, { 99.547920, 25.596643, 127.975327, 255.0f }, { 101.099083, 26.240011, 128.177795, 255.0f }, { 102.649734, 26.882101, 128.363434, 255.0f }, { 104.200394, 27.522150, 128.533249, 255.0f }, { 105.750793, 28.159904, 128.688797, 255.0f }, { 107.301704, 28.794600, 128.829819, 255.0f }, { 108.853630, 29.425726, 128.957077, 255.0f }, { 110.406586, 30.053024, 129.070801, 255.0f }, { 111.960808, 30.675989, 129.171524, 255.0f }, { 113.516571, 31.294619, 129.259766, 255.0f }, { 115.074104, 31.908659, 129.335480, 255.0f }, { 116.633430, 32.518112, 129.399246, 255.0f }, { 118.194542, 33.122715, 129.451263, 255.0f }, { 119.758194, 33.722477, 129.491287, 255.0f }, { 121.323906, 34.317135, 129.519852, 255.0f }, { 122.891899, 34.907207, 129.537186, 255.0f }, { 124.462440, 35.492428, 129.542801, 255.0f }, { 126.035789, 36.072811, 129.536926, 255.0f }, { 127.611687, 36.648346, 129.519608, 255.0f }, { 129.190399, 37.219292, 129.490524, 255.0f }, { 130.771896, 37.785645, 129.450241, 255.0f }, { 132.356476, 38.347664, 129.397964, 255.0f }, { 133.943848, 38.905094, 129.333969, 255.0f }, { 135.534286, 39.458447, 129.258224, 255.0f }, { 137.127533, 40.007969, 129.170517, 255.0f }, { 138.723816, 40.553413, 129.070541, 255.0f }, { 140.323181, 41.095287, 128.958344, 255.0f }, { 141.925613, 41.633595, 128.833649, 255.0f }, { 143.530823, 42.168842, 128.696472, 255.0f }, { 145.138870, 42.700771, 128.546768, 255.0f }, { 146.749954, 43.230152, 128.383835, 255.0f }, { 148.363846, 43.756981, 128.208130, 255.0f }, { 149.980286, 44.281258, 128.018936, 255.0f }, { 151.599533, 44.803757, 127.816460, 255.0f }, { 153.221344, 45.324467, 127.600464, 255.0f }, { 154.845688, 45.843643, 127.370461, 255.0f }, { 156.472336, 46.361805, 127.126678, 255.0f }, { 158.101273, 46.879200, 126.868622, 255.0f }, { 159.732254, 47.396084, 126.596283, 255.0f }, { 161.365265, 47.912716, 126.309662, 255.0f }, { 163.000076, 48.429855, 126.008255, 255.0f }, { 164.636414, 48.947762, 125.692047, 255.0f }, { 166.274277, 49.466431, 125.360809, 255.0f }, { 167.913177, 49.986885, 125.014519, 255.0f }, { 169.553329, 50.509125, 124.653175, 255.0f }, { 171.193985, 51.033913, 124.276291, 255.0f }, { 172.835434, 51.561768, 123.883850, 255.0f }, { 174.477127, 52.092930, 123.475845, 255.0f }, { 176.118561, 52.627922, 123.052292, 255.0f }, { 177.759995, 53.167755, 122.612923, 255.0f }, { 179.400650, 53.712692, 122.157494, 255.0f }, { 181.040314, 54.263237, 121.686256, 255.0f }, { 182.678680, 54.820412, 121.198952, 255.0f }, { 184.315277, 55.384472, 120.695587, 255.0f }, { 185.950073, 55.956436, 120.176147, 255.0f }, { 187.582092, 56.536816, 119.640900, 255.0f }, { 189.211014, 57.126373, 119.089592, 255.0f }, { 190.836380, 57.726135, 118.522476, 255.0f }, { 192.457947, 58.336861, 117.939796, 255.0f }, { 194.074631, 58.959572, 117.341316, 255.0f }, { 195.686493, 59.594776, 116.727524, 255.0f }, { 197.292221, 60.243496, 116.098694, 255.0f }, { 198.891846, 60.907005, 115.455070, 255.0f }, { 200.484070, 61.586071, 114.796913, 255.0f }, { 202.068893, 62.281708, 114.123466, 255.0f }, { 203.645035, 62.995201, 113.436241, 255.0f }, { 205.211761, 63.727303, 112.736008, 255.0f }, { 206.768021, 64.479553, 112.022774, 255.0f }, { 208.313080, 65.253220, 111.297554, 255.0f }, { 209.846130, 66.049080, 110.561119, 255.0f }, { 211.365921, 66.868393, 109.814224, 255.0f }, { 212.871704, 67.712700, 109.056099, 255.0f }, { 214.362183, 68.583015, 108.289825, 255.0f }, { 215.836075, 69.480614, 107.515907, 255.0f }, { 217.292130, 70.407028, 106.736115, 255.0f }, { 218.729568, 71.363541, 105.951477, 255.0f }, { 220.146591, 72.350891, 105.162766, 255.0f }, { 221.542221, 73.370644, 104.372269, 255.0f }, { 222.914886, 74.424042, 103.582275, 255.0f }, { 224.263306, 75.511871, 102.795097, 255.0f }, { 225.586014, 76.635147, 102.011986, 255.0f }, { 226.881409, 77.795151, 101.235512, 255.0f }, { 228.148499, 78.992111, 100.468719, 255.0f }, { 229.385757, 80.227081, 99.714432, 255.0f }, { 230.591660, 81.500549, 98.974937, 255.0f }, { 231.765411, 82.812531, 98.253540, 255.0f }, { 232.905258, 84.163254, 97.553566, 255.0f }, { 234.010696, 85.552498, 96.878326, 255.0f }, { 235.080414, 86.979996, 96.230881, 255.0f }, { 236.113937, 88.445213, 95.614540, 255.0f }, { 237.110474, 89.947166, 95.032631, 255.0f }, { 238.069534, 91.484818, 94.487953, 255.0f }, { 238.991348, 93.056892, 93.984581, 255.0f }, { 239.875183, 94.662117, 93.524315, 255.0f }, { 240.721542, 96.298965, 93.109680, 255.0f }, { 241.530899, 97.965393, 92.743752, 255.0f }, { 242.303543, 99.659096, 92.429344, 255.0f }, { 243.040253, 101.378571, 92.166695, 255.0f }, { 243.741486, 103.121994, 91.957848, 255.0f }, { 244.408325, 104.887619, 91.803566, 255.0f }, { 245.041992, 106.672371, 91.705650, 255.0f }, { 245.644058, 108.474449, 91.664597, 255.0f }, { 246.214996, 110.292343, 91.679893, 255.0f }, { 246.756104, 112.124260, 91.751549, 255.0f }, { 247.268402, 113.968681, 91.879303, 255.0f }, { 247.753403, 115.823555, 92.062653, 255.0f }, { 248.212158, 117.687599, 92.301079, 255.0f }, { 248.645905, 119.559555, 92.593300, 255.0f }, { 249.055954, 121.437630, 92.938835, 255.0f }, { 249.443542, 123.321060, 93.336372, 255.0f }, { 249.809479, 125.208572, 93.784668, 255.0f }, { 250.154999, 127.099136, 94.282166, 255.0f }, { 250.481140, 128.992004, 94.827873, 255.0f }, { 250.788666, 130.886398, 95.420486, 255.0f }, { 251.078613, 132.781815, 96.057983, 255.0f }, { 251.351715, 134.677734, 96.739601, 255.0f }, { 251.608505, 136.573410, 97.463547, 255.0f }, { 251.849731, 138.468826, 98.228554, 255.0f }, { 252.075928, 140.363724, 99.033073, 255.0f }, { 252.287567, 142.257614, 99.876106, 255.0f }, { 252.485184, 144.150482, 100.756111, 255.0f }, { 252.672104, 146.040024, 101.672073, 255.0f }, { 252.847290, 147.927277, 102.622452, 255.0f }, { 253.009979, 149.813004, 103.606247, 255.0f }, { 253.160172, 151.697205, 104.622162, 255.0f }, { 253.298126, 153.580124, 105.669449, 255.0f }, { 253.427673, 155.459213, 106.746315, 255.0f }, { 253.548798, 157.334732, 107.852249, 255.0f }, { 253.658188, 159.209259, 108.986237, 255.0f }, { 253.756119, 161.082474, 110.147499, 255.0f }, { 253.847397, 162.951889, 111.334785, 255.0f }, { 253.931549, 164.817719, 112.547058, 255.0f }, { 254.004486, 166.683044, 113.784317, 255.0f }, { 254.066956, 168.547104, 115.045807, 255.0f }, { 254.127899, 170.405273, 116.328957, 255.0f }, { 254.177612, 172.262955, 117.635071, 255.0f }, { 254.215881, 174.121140, 118.964134, 255.0f }, { 254.254639, 175.972427, 120.311806, 255.0f }, { 254.282425, 177.824005, 121.681412, 255.0f }, { 254.299774, 179.675812, 123.071922, 255.0f }, { 254.317871, 181.521240, 124.479271, 255.0f }, { 254.324509, 183.367691, 125.907532, 255.0f }, { 254.324509, 185.212631, 127.354141, 255.0f }, { 254.321945, 187.053970, 128.817581, 255.0f }, { 254.307678, 188.896866, 130.300659, 255.0f }, { 254.293152, 190.735153, 131.799042, 255.0f }, { 254.270203, 192.573456, 133.315521, 255.0f }, { 254.239838, 194.411484, 134.849350, 255.0f }, { 254.208984, 196.245697, 136.397461, 255.0f }, { 254.165375, 198.082733, 137.964935, 255.0f }, { 254.125610, 199.914124, 139.544418, 255.0f }, { 254.074097, 201.747589, 141.142242, 255.0f }, { 254.021317, 203.578735, 142.754105, 255.0f }, { 253.962662, 205.409378, 144.381500, 255.0f }, { 253.898392, 207.240021, 146.024475, 255.0f }, { 253.833115, 209.068130, 147.680695, 255.0f }, { 253.758408, 210.898254, 149.353760, 255.0f }, { 253.686996, 212.724319, 151.038284, 255.0f }, { 253.603622, 214.553680, 152.740662, 255.0f }, { 253.526611, 216.377701, 154.452484, 255.0f }, { 253.435837, 218.206299, 156.182907, 255.0f }, { 253.353973, 220.029053, 157.921249, 255.0f }, { 253.258362, 221.856125, 159.678192, 255.0f }, { 253.171906, 223.677841, 161.442795, 255.0f }, { 253.072189, 225.504150, 163.225235, 255.0f }, { 252.982681, 227.324860, 165.014587, 255.0f }, { 252.880447, 229.149887, 166.821503, 255.0f }, { 252.789658, 230.969559, 168.633789, 255.0f }, { 252.687149, 232.793320, 170.462646, 255.0f }, { 252.595352, 234.612503, 172.297119, 255.0f }, { 252.494629, 236.434982, 174.146133, 255.0f }, { 252.402817, 238.253891, 176.000488, 255.0f }, { 252.305664, 240.074860, 177.867340, 255.0f }, { 252.214630, 241.894012, 179.740067, 255.0f }, { 252.122833, 243.714203, 181.621719, 255.0f }, { 252.033585, 245.533890, 183.510498, 255.0f }, { 251.948410, 247.353058, 185.404633, 255.0f }, { 251.861206, 249.174271, 187.306671, 255.0f }, { 251.783691, 250.993439, 189.210510, 255.0f }, { 251.698502, 252.816681, 191.123535, 255.0f }, { 53.065502, 42.406498, 134.945999, 255.0f }, { 53.320499, 43.885498, 137.980499, 255.0f }, { 53.575500, 45.364502, 141.014999, 255.0f }, { 53.779499, 46.843498, 144.074997, 255.0f }, { 53.958000, 48.322502, 147.160492, 255.0f }, { 54.085499, 49.827000, 150.246002, 255.0f }, { 54.162003, 51.331497, 153.331497, 255.0f }, { 54.187500, 52.836002, 156.442505, 255.0f }, { 54.136501, 54.366001, 159.579010, 255.0f }, { 54.008999, 55.896000, 162.715500, 255.0f }, { 53.830498, 57.451500, 165.877502, 255.0f }, { 53.524502, 59.032501, 169.039490, 255.0f }, { 53.141998, 60.613499, 172.201508, 255.0f }, { 52.606503, 62.220001, 175.388992, 255.0f }, { 51.968998, 63.826496, 178.576492, 255.0f }, { 51.153000, 65.484001, 181.789490, 255.0f }, { 50.183998, 67.116005, 185.002502, 255.0f }, { 48.985500, 68.799004, 188.215500, 255.0f }, { 47.608501, 70.482002, 191.428497, 255.0f }, { 45.951000, 72.215996, 194.667007, 255.0f }, { 44.064003, 74.000999, 197.931000, 255.0f }, { 41.845501, 75.862503, 201.194992, 255.0f }, { 39.295502, 77.826004, 204.433502, 255.0f }, { 36.388500, 79.865997, 207.697495, 255.0f }, { 33.022499, 82.033501, 210.859497, 255.0f }, { 29.248499, 84.303001, 213.868500, 255.0f }, { 25.143000, 86.623505, 216.622498, 255.0f }, { 20.808001, 88.892998, 218.994003, 255.0f }, { 16.473000, 91.085999, 220.932007, 255.0f }, { 12.291000, 93.100502, 222.411011, 255.0f }, { 8.389501, 94.961998, 223.507507, 255.0f }, { 5.431500, 96.695999, 224.298004, 255.0f }, { 3.468000, 98.251503, 224.782501, 255.0f }, { 2.193000, 99.730499, 225.088501, 255.0f }, { 1.530000, 101.107498, 225.241501, 255.0f }, { 1.300500, 102.433495, 225.266998, 255.0f }, { 1.377000, 103.682999, 225.190491, 255.0f }, { 1.708500, 104.881500, 225.037491, 255.0f }, { 2.269500, 106.054497, 224.807999, 255.0f }, { 2.958000, 107.176498, 224.527512, 255.0f }, { 3.774000, 108.273003, 224.221497, 255.0f }, { 4.692000, 109.343994, 223.864502, 255.0f }, { 5.686500, 110.389503, 223.456497, 255.0f }, { 6.732000, 111.435005, 223.048508, 255.0f }, { 7.803000, 112.455002, 222.589508, 255.0f }, { 8.899500, 113.449501, 222.130508, 255.0f }, { 10.047000, 114.444000, 221.645996, 255.0f }, { 11.143499, 115.413002, 221.136002, 255.0f }, { 12.163500, 116.382004, 220.625992, 255.0f }, { 13.106999, 117.351006, 220.115997, 255.0f }, { 13.999500, 118.320000, 219.580505, 255.0f }, { 14.841001, 119.263504, 219.019501, 255.0f }, { 15.606000, 120.207001, 218.484009, 255.0f }, { 16.320002, 121.150505, 217.923004, 255.0f }, { 16.983000, 122.094002, 217.387512, 255.0f }, { 17.569500, 123.037498, 216.826508, 255.0f }, { 18.105001, 123.981003, 216.265503, 255.0f }, { 18.589499, 124.924500, 215.730011, 255.0f }, { 19.023001, 125.893501, 215.194504, 255.0f }, { 19.405500, 126.836998, 214.658997, 255.0f }, { 19.711500, 127.806007, 214.149002, 255.0f }, { 19.941000, 128.800507, 213.639008, 255.0f }, { 20.119501, 129.769501, 213.154495, 255.0f }, { 20.247002, 130.789505, 212.695496, 255.0f }, { 20.272499, 131.809494, 212.262009, 255.0f }, { 20.221500, 132.855011, 211.854004, 255.0f }, { 20.094000, 133.900497, 211.471497, 255.0f }, { 19.838999, 135.022507, 211.139999, 255.0f }, { 19.481998, 136.144501, 210.885010, 255.0f }, { 19.023001, 137.291992, 210.655502, 255.0f }, { 18.462002, 138.490494, 210.451492, 255.0f }, { 17.799000, 139.714508, 210.298492, 255.0f }, { 17.034000, 140.938492, 210.196503, 255.0f }, { 16.218000, 142.213501, 210.094498, 255.0f }, { 15.299999, 143.488495, 210.043503, 255.0f }, { 14.331000, 144.763504, 209.967010, 255.0f }, { 13.336499, 146.038513, 209.890488, 255.0f }, { 12.342000, 147.313507, 209.813995, 255.0f }, { 11.347500, 148.563004, 209.686508, 255.0f }, { 10.404000, 149.787003, 209.533493, 255.0f }, { 9.486000, 151.011002, 209.329498, 255.0f }, { 8.721001, 152.184006, 209.049011, 255.0f }, { 8.083500, 153.306000, 208.742996, 255.0f }, { 7.548000, 154.402496, 208.360504, 255.0f }, { 7.114500, 155.473511, 207.927002, 255.0f }, { 6.757500, 156.493500, 207.442490, 255.0f }, { 6.502500, 157.488007, 206.906998, 255.0f }, { 6.324000, 158.457001, 206.320496, 255.0f }, { 6.196500, 159.375000, 205.682999, 255.0f }, { 6.094500, 160.267502, 204.994507, 255.0f }, { 6.043500, 161.134506, 204.255005, 255.0f }, { 5.992500, 161.976013, 203.489990, 255.0f }, { 5.941500, 162.792007, 202.673996, 255.0f }, { 5.890500, 163.582504, 201.858002, 255.0f }, { 5.865000, 164.347504, 200.965500, 255.0f }, { 5.839500, 165.087006, 200.072998, 255.0f }, { 5.788500, 165.826508, 199.154999, 255.0f }, { 5.788500, 166.540497, 198.211502, 255.0f }, { 5.916000, 167.228989, 197.242508, 255.0f }, { 6.069000, 167.917511, 196.247986, 255.0f }, { 6.273000, 168.580490, 195.228012, 255.0f }, { 6.706500, 169.243500, 194.182495, 255.0f }, { 7.191000, 169.906494, 193.136993, 255.0f }, { 7.803000, 170.544006, 192.065994, 255.0f }, { 8.618999, 171.155991, 190.995010, 255.0f }, { 9.511500, 171.793488, 189.873001, 255.0f }, { 10.659000, 172.405502, 188.750992, 255.0f }, { 11.908501, 172.991989, 187.629013, 255.0f }, { 13.158000, 173.604004, 186.481506, 255.0f }, { 14.637000, 174.190491, 185.308502, 255.0f }, { 16.039499, 174.777008, 184.135498, 255.0f }, { 17.646000, 175.363495, 182.911499, 255.0f }, { 19.252499, 175.924500, 181.712997, 255.0f }, { 20.910000, 176.485504, 180.488998, 255.0f }, { 22.669500, 177.046494, 179.239502, 255.0f }, { 24.378000, 177.607498, 177.964493, 255.0f }, { 26.290501, 178.143005, 176.689499, 255.0f }, { 28.152000, 178.678497, 175.388992, 255.0f }, { 30.090000, 179.213989, 174.088501, 255.0f }, { 32.078999, 179.749512, 172.762497, 255.0f }, { 34.042500, 180.259506, 171.436493, 255.0f }, { 36.159000, 180.769501, 170.059494, 255.0f }, { 38.224503, 181.279495, 168.707993, 255.0f }, { 40.417500, 181.789490, 167.305496, 255.0f }, { 42.610500, 182.274002, 165.928497, 255.0f }, { 44.828999, 182.783997, 164.500504, 255.0f }, { 47.149502, 183.242996, 163.072510, 255.0f }, { 49.418999, 183.727493, 161.619003, 255.0f }, { 51.841499, 184.186493, 160.165497, 255.0f }, { 54.264000, 184.645493, 158.686508, 255.0f }, { 56.711998, 185.104492, 157.207504, 255.0f }, { 59.262001, 185.512512, 155.728500, 255.0f }, { 61.786503, 185.945999, 154.223999, 255.0f }, { 64.438499, 186.353989, 152.694000, 255.0f }, { 67.090500, 186.761993, 151.189499, 255.0f }, { 69.742500, 187.144501, 149.659500, 255.0f }, { 72.547501, 187.527008, 148.129501, 255.0f }, { 75.301498, 187.884003, 146.599487, 255.0f }, { 78.132004, 188.215500, 145.069504, 255.0f }, { 81.013496, 188.547012, 143.565002, 255.0f }, { 83.869499, 188.852997, 142.034988, 255.0f }, { 86.827499, 189.133499, 140.556000, 255.0f }, { 89.760002, 189.414001, 139.051498, 255.0f }, { 92.692497, 189.668991, 137.598007, 255.0f }, { 95.701500, 189.873001, 136.144501, 255.0f }, { 98.659500, 190.076996, 134.716492, 255.0f }, { 101.643005, 190.255508, 133.339493, 255.0f }, { 104.626495, 190.408493, 131.962494, 255.0f }, { 107.558998, 190.561508, 130.636505, 255.0f }, { 110.516998, 190.663498, 129.335999, 255.0f }, { 113.398499, 190.791000, 128.035492, 255.0f }, { 116.305496, 190.867493, 126.786003, 255.0f }, { 119.136002, 190.918503, 125.561996, 255.0f }, { 121.966499, 190.969498, 124.363495, 255.0f }, { 124.746002, 191.020508, 123.190498, 255.0f }, { 127.500000, 191.020508, 122.042999, 255.0f }, { 130.202988, 191.045990, 120.895500, 255.0f }, { 132.906006, 191.045990, 119.798996, 255.0f }, { 135.532501, 191.020508, 118.702499, 255.0f }, { 138.159012, 190.995010, 117.631493, 255.0f }, { 140.734512, 190.969498, 116.560501, 255.0f }, { 143.284500, 190.918503, 115.540497, 255.0f }, { 145.809006, 190.867493, 114.495003, 255.0f }, { 148.307999, 190.791000, 113.500504, 255.0f }, { 150.781509, 190.714508, 112.505997, 255.0f }, { 153.229492, 190.638000, 111.537003, 255.0f }, { 155.626495, 190.561508, 110.542496, 255.0f }, { 158.023499, 190.459503, 109.598999, 255.0f }, { 160.395004, 190.357498, 108.655495, 255.0f }, { 162.740997, 190.229996, 107.711998, 255.0f }, { 165.061508, 190.127991, 106.793999, 255.0f }, { 167.382004, 190.000504, 105.875999, 255.0f }, { 169.651505, 189.873001, 104.958000, 255.0f }, { 171.921005, 189.745499, 104.065506, 255.0f }, { 174.165009, 189.592499, 103.172997, 255.0f }, { 176.408997, 189.464996, 102.280502, 255.0f }, { 178.602005, 189.311996, 101.388000, 255.0f }, { 180.820496, 189.158997, 100.520996, 255.0f }, { 182.987991, 189.005997, 99.653999, 255.0f }, { 185.155502, 188.827499, 98.787003, 255.0f }, { 187.322998, 188.674500, 97.919998, 255.0f }, { 189.464996, 188.496002, 97.053001, 255.0f }, { 191.581497, 188.317505, 96.211502, 255.0f }, { 193.697998, 188.139008, 95.344498, 255.0f }, { 195.814499, 187.986008, 94.502998, 255.0f }, { 197.905502, 187.781998, 93.661499, 255.0f }, { 199.996506, 187.603500, 92.794502, 255.0f }, { 202.061996, 187.425003, 91.952995, 255.0f }, { 204.127487, 187.246506, 91.111504, 255.0f }, { 206.167496, 187.068008, 90.244499, 255.0f }, { 208.233002, 186.889511, 89.403000, 255.0f }, { 210.272995, 186.711014, 88.536003, 255.0f }, { 212.287491, 186.532501, 87.668999, 255.0f }, { 214.327499, 186.353989, 86.802002, 255.0f }, { 216.341995, 186.175491, 85.935005, 255.0f }, { 218.356506, 185.996994, 85.068001, 255.0f }, { 220.371002, 185.843994, 84.150002, 255.0f }, { 222.360001, 185.691010, 83.257500, 255.0f }, { 224.348999, 185.537994, 82.339500, 255.0f }, { 226.363510, 185.410507, 81.421501, 255.0f }, { 228.326996, 185.283005, 80.478004, 255.0f }, { 230.315994, 185.181000, 79.483498, 255.0f }, { 232.305008, 185.104492, 78.488998, 255.0f }, { 234.268494, 185.028000, 77.468994, 255.0f }, { 236.231995, 185.028000, 76.398003, 255.0f }, { 238.195496, 185.028000, 75.301498, 255.0f }, { 240.133499, 185.104492, 74.128494, 255.0f }, { 242.071503, 185.232010, 72.904503, 255.0f }, { 243.958511, 185.461502, 71.604004, 255.0f }, { 245.794510, 185.767502, 70.227005, 255.0f }, { 247.554001, 186.226501, 68.748001, 255.0f }, { 249.211502, 186.812988, 67.167000, 255.0f }, { 250.690506, 187.552490, 65.534996, 255.0f }, { 251.990997, 188.445007, 63.852001, 255.0f }, { 253.011002, 189.490494, 62.143501, 255.0f }, { 253.776001, 190.638000, 60.511497, 255.0f }, { 254.311508, 191.862000, 58.905003, 255.0f }, { 254.643005, 193.111511, 57.400497, 255.0f }, { 254.770508, 194.411987, 55.972500, 255.0f }, { 254.745010, 195.712494, 54.595501, 255.0f }, { 254.617493, 197.013000, 53.295002, 255.0f }, { 254.388000, 198.339005, 52.070999, 255.0f }, { 254.082001, 199.639496, 50.872498, 255.0f }, { 253.725006, 200.940002, 49.699501, 255.0f }, { 253.291504, 202.240494, 48.577503, 255.0f }, { 252.807007, 203.515503, 47.506500, 255.0f }, { 252.297012, 204.815994, 46.435501, 255.0f }, { 251.761490, 206.116501, 45.389999, 255.0f }, { 251.200485, 207.391510, 44.369999, 255.0f }, { 250.613998, 208.692001, 43.350002, 255.0f }, { 250.027496, 209.992493, 42.355499, 255.0f }, { 249.441010, 211.292999, 41.361000, 255.0f }, { 248.854492, 212.593506, 40.366501, 255.0f }, { 248.267990, 213.919510, 39.372002, 255.0f }, { 247.681503, 215.245499, 38.377499, 255.0f }, { 247.146011, 216.597000, 37.357502, 255.0f }, { 246.636002, 217.973999, 36.337498, 255.0f }, { 246.177002, 219.376495, 35.317501, 255.0f }, { 245.768997, 220.804489, 34.246498, 255.0f }, { 245.386505, 222.257996, 33.175499, 255.0f }, { 245.080490, 223.737000, 32.078999, 255.0f }, { 244.799988, 225.266998, 30.982500, 255.0f }, { 244.621490, 226.822510, 29.860500, 255.0f }, { 244.494003, 228.429001, 28.712999, 255.0f }, { 244.442993, 230.060989, 27.591000, 255.0f }, { 244.468506, 231.744003, 26.418001, 255.0f }, { 244.570496, 233.452499, 25.244999, 255.0f }, { 244.774506, 235.237503, 24.072001, 255.0f }, { 245.055008, 237.048004, 22.873499, 255.0f }, { 245.412003, 238.884003, 21.675001, 255.0f }, { 245.845505, 240.796494, 20.451000, 255.0f }, { 246.380997, 242.709000, 19.201500, 255.0f }, { 246.967499, 244.672501, 17.926500, 255.0f }, { 247.605011, 246.661499, 16.600500, 255.0f }, { 248.267990, 248.675995, 15.223500, 255.0f }, { 248.956497, 250.690506, 13.719001, 255.0f }, { 12.847666, 7.599765, 134.633636, 255.0f }, { 16.201681, 7.248630, 135.946625, 255.0f }, { 19.215014, 6.937530, 137.191788, 255.0f }, { 21.986610, 6.661875, 138.377777, 255.0f }, { 24.576645, 6.417075, 139.511261, 255.0f }, { 27.024900, 6.198795, 140.598846, 255.0f }, { 29.356621, 6.006780, 141.644348, 255.0f }, { 31.595264, 5.833890, 142.652878, 255.0f }, { 33.757156, 5.675790, 143.628754, 255.0f }, { 35.853767, 5.530185, 144.574554, 255.0f }, { 37.894787, 5.394270, 145.493317, 255.0f }, { 39.887356, 5.266005, 146.386581, 255.0f }, { 41.837849, 5.143605, 147.256882, 255.0f }, { 43.751369, 5.025030, 148.105530, 255.0f }, { 45.632248, 4.909260, 148.933762, 255.0f }, { 47.484314, 4.794765, 149.743134, 255.0f }, { 49.310368, 4.680270, 150.534149, 255.0f }, { 51.113476, 4.565010, 151.307816, 255.0f }, { 52.895924, 4.447710, 152.064926, 255.0f }, { 54.659248, 4.328115, 152.805939, 255.0f }, { 56.405235, 4.206735, 153.531174, 255.0f }, { 58.135666, 4.081785, 154.241074, 255.0f }, { 59.852325, 3.953010, 154.935959, 255.0f }, { 61.555977, 3.819645, 155.616043, 255.0f }, { 63.248161, 3.681945, 156.281342, 255.0f }, { 64.929886, 3.539910, 156.931839, 255.0f }, { 66.601662, 3.393540, 157.567307, 255.0f }, { 68.264267, 3.242580, 158.188232, 255.0f }, { 69.918701, 3.087795, 158.794113, 255.0f }, { 71.565239, 2.929440, 159.384705, 255.0f }, { 73.204376, 2.768025, 159.960236, 255.0f }, { 74.836891, 2.604315, 160.519958, 255.0f }, { 76.463020, 2.438055, 161.064117, 255.0f }, { 78.083549, 2.270010, 161.591965, 255.0f }, { 79.698463, 2.100945, 162.103500, 255.0f }, { 81.308281, 1.931880, 162.598206, 255.0f }, { 82.913254, 1.763325, 163.075562, 255.0f }, { 84.513626, 1.596555, 163.535583, 255.0f }, { 86.109161, 1.432590, 163.977493, 255.0f }, { 87.700874, 1.272705, 164.401047, 255.0f }, { 89.288246, 1.117410, 164.805984, 255.0f }, { 90.871544, 0.968490, 165.191544, 255.0f }, { 92.451012, 0.826965, 165.557480, 255.0f }, { 94.026909, 0.694620, 165.903259, 255.0f }, { 95.598740, 0.572475, 166.228378, 255.0f }, { 97.166985, 0.462570, 166.532349, 255.0f }, { 98.731667, 0.365670, 166.815140, 255.0f }, { 100.292519, 0.284070, 167.075745, 255.0f }, { 101.849800, 0.219045, 167.313919, 255.0f }, { 103.403267, 0.172890, 167.529129, 255.0f }, { 104.952896, 0.147135, 167.721146, 255.0f }, { 106.498711, 0.143820, 167.889450, 255.0f }, { 108.040695, 0.164730, 168.033783, 255.0f }, { 109.578346, 0.211905, 168.153381, 255.0f }, { 111.112167, 0.287385, 168.248245, 255.0f }, { 112.641655, 0.392700, 168.317596, 255.0f }, { 114.167068, 0.530400, 168.361206, 255.0f }, { 115.687637, 0.702525, 168.379044, 255.0f }, { 117.203865, 0.911370, 168.370636, 255.0f }, { 118.715248, 1.158975, 168.335449, 255.0f }, { 120.221535, 1.447890, 168.273743, 255.0f }, { 121.722725, 1.779900, 168.184998, 255.0f }, { 123.218552, 2.157300, 168.069229, 255.0f }, { 124.709030, 2.582385, 167.926163, 255.0f }, { 126.193634, 3.057450, 167.755569, 255.0f }, { 127.672890, 3.584025, 167.557434, 255.0f }, { 129.145767, 4.164915, 167.331512, 255.0f }, { 130.612534, 4.802415, 167.078293, 255.0f }, { 132.072922, 5.498565, 166.797791, 255.0f }, { 133.526413, 6.255660, 166.489746, 255.0f }, { 134.973022, 7.075485, 166.154434, 255.0f }, { 136.412750, 7.960335, 165.792084, 255.0f }, { 137.845352, 8.912250, 165.403198, 255.0f }, { 139.270035, 9.933270, 164.987564, 255.0f }, { 140.687332, 10.999681, 164.545639, 255.0f }, { 142.096970, 12.069406, 164.077957, 255.0f }, { 143.498184, 13.143975, 163.584793, 255.0f }, { 144.891251, 14.223391, 163.066635, 255.0f }, { 146.276154, 15.307140, 162.524002, 255.0f }, { 147.652405, 16.395479, 161.957123, 255.0f }, { 149.019699, 17.487646, 161.367065, 255.0f }, { 150.378342, 18.583891, 160.754044, 255.0f }, { 151.727798, 19.683449, 160.118835, 255.0f }, { 153.067825, 20.786579, 159.462219, 255.0f }, { 154.398682, 21.892771, 158.784943, 255.0f }, { 155.720078, 23.002020, 158.087509, 255.0f }, { 157.032059, 24.113819, 157.370697, 255.0f }, { 158.334335, 25.228170, 156.635529, 255.0f }, { 159.626678, 26.344561, 155.882782, 255.0f }, { 160.909348, 27.463245, 155.113174, 255.0f }, { 162.182053, 28.583462, 154.327271, 255.0f }, { 163.444550, 29.705462, 153.526581, 255.0f }, { 164.697357, 30.828991, 152.711090, 255.0f }, { 165.940231, 31.953796, 151.882339, 255.0f }, { 167.172897, 33.079872, 151.040833, 255.0f }, { 168.395370, 34.206718, 150.187607, 255.0f }, { 169.607895, 35.334332, 149.323410, 255.0f }, { 170.810471, 36.462963, 148.449265, 255.0f }, { 172.003113, 37.591846, 147.565445, 255.0f }, { 173.185806, 38.721241, 146.673187, 255.0f }, { 174.358292, 39.850891, 145.773300, 255.0f }, { 175.521088, 40.980793, 144.866272, 255.0f }, { 176.674194, 42.110954, 143.953110, 255.0f }, { 177.817612, 43.241112, 143.034348, 255.0f }, { 178.951096, 44.371277, 142.110474, 255.0f }, { 180.075394, 45.501434, 141.182541, 255.0f }, { 181.189987, 46.631340, 140.251022, 255.0f }, { 182.295166, 47.761246, 139.316193, 255.0f }, { 183.391159, 48.890892, 138.379059, 255.0f }, { 184.478210, 50.020290, 137.440155, 255.0f }, { 185.555847, 51.149433, 136.499710, 255.0f }, { 186.624817, 52.278316, 135.558258, 255.0f }, { 187.684845, 53.406944, 134.616547, 255.0f }, { 188.736465, 54.535320, 133.675079, 255.0f }, { 189.779160, 55.663441, 132.733627, 255.0f }, { 190.813690, 56.791306, 131.792679, 255.0f }, { 191.839554, 57.918915, 130.852997, 255.0f }, { 192.857529, 59.046524, 129.914352, 255.0f }, { 193.867310, 60.173878, 128.977463, 255.0f }, { 194.869217, 61.300980, 128.042130, 255.0f }, { 195.862946, 62.428337, 127.108574, 255.0f }, { 196.849289, 63.555435, 126.177315, 255.0f }, { 197.827972, 64.682793, 125.248604, 255.0f }, { 198.799026, 65.809891, 124.322441, 255.0f }, { 199.762665, 66.937500, 123.399094, 255.0f }, { 200.718903, 68.065109, 122.478287, 255.0f }, { 201.668030, 69.192970, 121.560028, 255.0f }, { 202.609985, 70.321350, 120.644836, 255.0f }, { 203.545074, 71.450233, 119.732193, 255.0f }, { 204.473038, 72.579636, 118.822601, 255.0f }, { 205.394089, 73.709534, 117.915825, 255.0f }, { 206.308258, 74.840210, 117.011848, 255.0f }, { 207.216064, 75.971642, 116.111191, 255.0f }, { 208.116714, 77.103836, 115.213081, 255.0f }, { 209.011002, 78.237061, 114.318031, 255.0f }, { 209.898651, 79.371552, 113.425529, 255.0f }, { 210.779938, 80.507072, 112.535583, 255.0f }, { 211.654587, 81.643860, 111.648178, 255.0f }, { 212.522614, 82.781921, 110.763329, 255.0f }, { 213.384247, 83.921776, 109.880775, 255.0f }, { 214.239532, 85.062897, 109.001030, 255.0f }, { 215.088409, 86.205811, 108.123314, 255.0f }, { 215.930939, 87.350502, 107.247643, 255.0f }, { 216.766830, 88.497246, 106.374016, 255.0f }, { 217.596344, 89.646011, 105.502167, 255.0f }, { 218.419479, 90.796829, 104.632111, 255.0f }, { 219.236237, 91.949944, 103.763840, 255.0f }, { 220.046387, 93.105347, 102.897346, 255.0f }, { 220.849899, 94.263298, 102.032135, 255.0f }, { 221.646759, 95.424057, 101.168190, 255.0f }, { 222.437271, 96.587364, 100.305527, 255.0f }, { 223.220871, 97.753487, 99.443878, 255.0f }, { 223.997864, 98.922661, 98.583000, 255.0f }, { 224.767975, 100.094894, 97.723389, 255.0f }, { 225.531189, 101.270447, 96.864304, 255.0f }, { 226.287506, 102.449310, 96.005966, 255.0f }, { 227.036697, 103.631493, 95.148148, 255.0f }, { 227.778748, 104.817238, 94.290840, 255.0f }, { 228.513397, 106.006561, 93.433784, 255.0f }, { 229.240921, 107.199959, 92.576988, 255.0f }, { 229.960785, 108.397186, 91.720444, 255.0f }, { 230.673248, 109.598236, 90.863892, 255.0f }, { 231.378082, 110.803619, 90.007355, 255.0f }, { 232.074997, 112.013336, 89.150551, 255.0f }, { 232.764008, 113.227394, 88.294006, 255.0f }, { 233.445114, 114.445786, 87.436951, 255.0f }, { 234.117798, 115.668762, 86.579895, 255.0f }, { 234.782074, 116.896332, 85.722328, 255.0f }, { 235.438171, 118.129005, 84.864258, 255.0f }, { 236.085373, 119.366264, 84.005920, 255.0f }, { 236.723892, 120.608620, 83.147087, 255.0f }, { 237.353485, 121.856087, 82.287735, 255.0f }, { 237.974167, 123.108902, 81.427872, 255.0f }, { 238.585663, 124.366562, 80.567764, 255.0f }, { 239.187454, 125.630081, 79.706627, 255.0f }, { 239.779816, 126.898712, 78.845238, 255.0f }, { 240.362488, 128.172943, 77.983078, 255.0f }, { 240.935226, 129.452789, 77.120422, 255.0f }, { 241.498001, 130.738251, 76.257492, 255.0f }, { 242.050339, 132.029572, 75.393806, 255.0f }, { 242.592728, 133.326752, 74.530128, 255.0f }, { 243.124130, 134.629807, 73.665169, 255.0f }, { 243.644852, 135.938705, 72.799950, 255.0f }, { 244.154587, 137.253754, 71.934479, 255.0f }, { 244.653122, 138.574905, 71.068756, 255.0f }, { 245.140686, 139.902191, 70.202774, 255.0f }, { 245.616776, 141.235580, 69.336792, 255.0f }, { 246.081116, 142.575089, 68.470810, 255.0f }, { 246.533493, 143.920990, 67.605087, 255.0f }, { 246.974136, 145.273499, 66.738853, 255.0f }, { 247.402283, 146.632141, 65.872879, 255.0f }, { 247.817932, 147.997406, 65.007408, 255.0f }, { 248.221069, 149.369064, 64.142700, 255.0f }, { 248.611481, 150.747070, 63.278507, 255.0f }, { 248.989136, 152.131729, 62.415585, 255.0f }, { 249.353271, 153.523010, 61.553684, 255.0f }, { 249.704422, 154.920670, 60.693314, 255.0f }, { 250.041779, 156.324951, 59.834732, 255.0f }, { 250.365631, 157.735855, 58.978188, 255.0f }, { 250.675446, 159.153412, 58.123936, 255.0f }, { 250.970749, 160.578094, 57.271725, 255.0f }, { 251.251755, 162.009155, 56.422577, 255.0f }, { 251.517975, 163.447083, 55.576740, 255.0f }, { 251.769653, 164.891418, 54.735237, 255.0f }, { 252.006287, 166.342865, 53.897820, 255.0f }, { 252.227646, 167.800964, 53.065502, 255.0f }, { 252.433426, 169.265686, 52.239044, 255.0f }, { 252.623657, 170.737289, 51.418713, 255.0f }, { 252.798080, 172.215530, 50.605515, 255.0f }, { 252.956177, 173.700638, 49.800228, 255.0f }, { 253.097961, 175.192657, 49.003349, 255.0f }, { 253.223160, 176.691284, 48.216419, 255.0f }, { 253.331284, 178.196548, 47.440453, 255.0f }, { 253.422562, 179.708954, 46.675964, 255.0f }, { 253.496262, 181.227997, 45.924736, 255.0f }, { 253.552628, 182.753662, 45.188042, 255.0f }, { 253.590866, 184.286209, 44.467155, 255.0f }, { 253.611023, 185.825638, 43.763607, 255.0f }, { 253.613052, 187.371704, 43.079189, 255.0f }, { 253.596222, 188.924408, 42.415424, 255.0f }, { 253.560532, 190.483719, 41.774353, 255.0f }, { 253.505951, 192.049927, 41.158020, 255.0f }, { 253.432007, 193.622513, 40.568459, 255.0f }, { 253.337906, 195.202240, 40.007206, 255.0f }, { 253.223419, 196.788605, 39.476040, 255.0f }, { 253.088776, 198.381577, 38.978024, 255.0f }, { 252.933731, 199.980942, 38.515709, 255.0f }, { 252.758286, 201.586929, 38.091137, 255.0f }, { 252.561951, 203.199051, 37.706852, 255.0f }, { 252.344681, 204.817276, 37.364895, 255.0f }, { 252.105240, 206.442642, 37.066036, 255.0f }, { 251.843353, 208.074387, 36.812565, 255.0f }, { 251.559799, 209.712250, 36.607033, 255.0f }, { 251.255066, 211.355728, 36.450977, 255.0f }, { 250.927917, 213.005325, 36.344639, 255.0f }, { 250.576523, 214.662064, 36.287266, 255.0f }, { 250.203461, 216.323898, 36.281143, 255.0f }, { 249.809219, 217.990845, 36.325516, 255.0f }, { 249.388718, 219.665161, 36.416042, 255.0f }, { 248.947571, 221.344086, 36.554504, 255.0f }, { 248.482971, 223.028610, 36.735554, 255.0f }, { 247.995148, 224.718750, 36.955364, 255.0f }, { 247.485916, 226.413483, 37.209343, 255.0f }, { 246.952957, 228.113815, 37.488571, 255.0f }, { 246.399094, 229.818497, 37.785900, 255.0f }, { 245.825363, 231.527252, 38.089352, 255.0f }, { 245.228653, 233.241364, 38.382599, 255.0f }, { 244.615387, 234.958786, 38.649330, 255.0f }, { 243.986038, 236.678772, 38.864296, 255.0f }, { 243.343185, 238.401535, 38.994858, 255.0f }, { 242.690140, 240.126114, 38.995876, 255.0f }, { 242.033493, 241.850922, 38.805389, 255.0f }, { 241.383499, 243.573456, 38.333637, 255.0f }, { 240.758759, 245.288589, 37.449554, 255.0f }, { 240.183487, 246.990448, 35.943779, 255.0f }, { 239.703827, 248.665283, 33.488132, 255.0f }, { 48.437252, 18.298800, 59.203350, 255.0f }, { 49.681648, 21.264450, 66.679947, 255.0f }, { 50.887802, 24.219900, 74.011200, 255.0f }, { 52.058250, 27.162600, 81.202194, 255.0f }, { 53.193001, 30.095100, 88.247849, 255.0f }, { 54.292049, 33.014851, 95.150703, 255.0f }, { 55.355400, 35.921852, 101.908195, 255.0f }, { 56.383049, 38.818649, 108.522896, 255.0f }, { 57.375000, 41.702702, 114.994804, 255.0f }, { 58.331253, 44.576553, 121.323906, 255.0f }, { 59.251801, 47.437649, 127.510201, 255.0f }, { 60.134098, 50.285999, 133.551147, 255.0f }, { 60.983250, 53.124153, 139.449295, 255.0f }, { 61.796700, 55.949551, 145.202087, 255.0f }, { 62.574448, 58.762203, 150.812103, 255.0f }, { 63.316502, 61.564651, 156.279312, 255.0f }, { 64.022850, 64.354347, 161.603699, 255.0f }, { 64.690948, 67.133850, 166.785309, 255.0f }, { 65.325897, 69.900604, 171.821548, 255.0f }, { 65.925148, 72.654602, 176.715012, 255.0f }, { 66.488701, 75.398399, 181.463089, 255.0f }, { 67.014000, 78.129448, 186.068405, 255.0f }, { 67.506149, 80.850296, 190.530899, 255.0f }, { 67.962601, 83.558395, 194.850601, 255.0f }, { 68.380798, 86.253754, 199.027496, 255.0f }, { 68.765854, 88.938904, 203.059052, 255.0f }, { 69.112656, 91.611298, 206.947800, 255.0f }, { 69.426300, 94.273506, 210.691208, 255.0f }, { 69.701698, 96.920403, 214.294357, 255.0f }, { 69.943947, 99.559654, 217.752151, 255.0f }, { 70.147949, 102.183601, 221.064606, 255.0f }, { 70.318802, 104.797348, 224.236801, 255.0f }, { 70.451393, 107.400902, 227.263641, 255.0f }, { 70.550850, 109.991699, 230.147705, 255.0f }, { 70.612053, 112.569748, 232.886398, 255.0f }, { 70.637550, 115.137596, 235.484848, 255.0f }, { 70.629906, 117.690147, 237.937943, 255.0f }, { 70.584000, 120.235046, 240.245697, 255.0f }, { 70.502396, 122.767204, 242.413208, 255.0f }, { 70.387650, 125.286606, 244.435349, 255.0f }, { 70.234650, 127.793251, 246.314697, 255.0f }, { 70.045952, 130.289703, 248.051254, 255.0f }, { 69.821548, 132.775955, 249.642456, 255.0f }, { 69.546150, 135.251999, 251.075562, 255.0f }, { 69.120300, 137.738251, 252.271500, 255.0f }, { 68.538895, 140.237244, 253.222656, 255.0f }, { 67.809601, 142.746460, 253.936646, 255.0f }, { 66.942596, 145.265854, 254.421158, 255.0f }, { 65.948097, 147.792908, 254.683792, 255.0f }, { 64.833748, 150.322510, 254.734802, 255.0f }, { 63.612301, 152.854660, 254.579254, 255.0f }, { 62.288849, 155.389343, 254.227356, 255.0f }, { 60.878700, 157.924057, 253.686752, 255.0f }, { 59.384399, 160.453659, 252.965103, 255.0f }, { 57.823799, 162.978149, 252.070053, 255.0f }, { 56.199451, 165.497543, 251.011795, 255.0f }, { 54.524097, 168.009308, 249.795456, 255.0f }, { 52.805401, 170.508301, 248.428650, 255.0f }, { 51.053551, 172.997101, 246.924164, 255.0f }, { 49.281300, 175.470596, 245.284500, 255.0f }, { 47.493752, 177.926239, 243.519897, 255.0f }, { 45.703651, 180.366592, 241.640549, 255.0f }, { 43.918652, 182.783997, 239.651550, 255.0f }, { 42.148949, 185.181000, 237.560547, 255.0f }, { 40.402199, 187.555054, 235.377747, 255.0f }, { 38.691151, 189.903595, 233.110809, 255.0f }, { 37.023449, 192.221542, 230.764801, 255.0f }, { 35.409302, 194.511459, 228.352509, 255.0f }, { 33.858902, 196.770752, 225.878998, 255.0f }, { 32.379902, 198.994354, 223.354492, 255.0f }, { 30.985050, 201.184799, 220.781540, 255.0f }, { 29.679449, 203.337006, 218.175446, 255.0f }, { 28.475851, 205.450943, 215.538757, 255.0f }, { 27.381901, 207.521545, 212.884201, 255.0f }, { 26.410349, 209.551346, 210.214355, 255.0f }, { 25.566299, 211.535263, 207.541946, 255.0f }, { 24.862499, 213.470703, 204.872101, 255.0f }, { 24.306601, 215.360260, 202.212463, 255.0f }, { 23.911350, 217.196259, 199.573196, 255.0f }, { 23.681849, 218.981247, 196.962006, 255.0f }, { 23.630850, 220.712708, 194.386505, 255.0f }, { 23.765999, 222.388046, 191.854355, 255.0f }, { 24.100050, 224.002213, 189.375748, 255.0f }, { 24.638100, 225.557709, 186.955811, 255.0f }, { 25.392899, 227.052002, 184.602158, 255.0f }, { 26.372099, 228.480011, 182.324997, 255.0f }, { 27.578249, 229.862106, 180.027451, 255.0f }, { 29.003698, 231.216156, 177.610062, 255.0f }, { 30.635700, 232.542160, 175.083008, 255.0f }, { 32.469151, 233.837555, 172.448853, 255.0f }, { 34.491299, 235.102356, 169.717804, 255.0f }, { 36.697052, 236.334000, 166.892395, 255.0f }, { 39.073650, 237.535049, 163.985397, 255.0f }, { 41.613453, 238.702942, 160.999359, 255.0f }, { 44.311348, 239.835144, 157.941895, 255.0f }, { 47.152050, 240.934204, 154.818146, 255.0f }, { 50.130451, 241.997559, 151.638290, 255.0f }, { 53.236351, 243.025208, 148.407455, 255.0f }, { 56.462101, 244.014603, 145.130707, 255.0f }, { 59.794952, 244.965759, 141.815704, 255.0f }, { 63.232349, 245.878647, 138.472656, 255.0f }, { 66.758995, 246.750748, 135.101547, 255.0f }, { 70.372353, 247.584610, 131.715149, 255.0f }, { 74.057098, 248.377655, 128.318558, 255.0f }, { 77.808151, 249.127350, 124.916855, 255.0f }, { 81.615303, 249.833710, 121.517700, 255.0f }, { 85.468353, 250.496689, 118.128754, 255.0f }, { 89.359650, 251.116348, 114.755096, 255.0f }, { 93.281555, 251.690109, 111.404396, 255.0f }, { 97.223846, 252.217957, 108.084305, 255.0f }, { 101.178902, 252.699905, 104.799896, 255.0f }, { 105.133949, 253.133408, 101.556297, 255.0f }, { 109.083900, 253.518448, 98.366249, 255.0f }, { 113.018555, 253.855042, 95.229752, 255.0f }, { 116.927696, 254.140656, 92.157005, 255.0f }, { 120.806252, 254.375259, 89.155647, 255.0f }, { 124.641449, 254.561401, 86.230804, 255.0f }, { 128.423111, 254.691452, 83.387550, 255.0f }, { 132.146103, 254.770508, 80.636093, 255.0f }, { 135.800247, 254.793442, 77.981552, 255.0f }, { 139.377899, 254.762848, 75.431557, 255.0f }, { 142.866302, 254.676147, 72.988647, 255.0f }, { 146.260361, 254.533356, 70.665596, 255.0f }, { 149.547302, 254.334442, 68.464943, 255.0f }, { 152.722046, 254.076889, 66.396896, 255.0f }, { 155.774399, 253.760712, 64.463997, 255.0f }, { 158.694153, 253.383301, 62.676453, 255.0f }, { 161.473648, 252.947250, 61.039352, 255.0f }, { 164.123108, 252.447449, 59.557800, 255.0f }, { 166.754700, 251.876251, 58.229248, 255.0f }, { 169.391403, 251.236191, 57.043499, 255.0f }, { 172.028091, 250.527313, 55.998001, 255.0f }, { 174.659698, 249.749542, 55.085102, 255.0f }, { 177.288742, 248.905502, 54.299702, 255.0f }, { 179.910141, 248.000244, 53.631599, 255.0f }, { 182.521347, 247.031250, 53.078251, 255.0f }, { 185.119797, 245.998489, 52.632000, 255.0f }, { 187.705505, 244.909653, 52.285198, 255.0f }, { 190.273346, 243.762146, 52.035301, 255.0f }, { 192.823349, 242.558563, 51.874649, 255.0f }, { 195.350403, 241.298843, 51.793049, 255.0f }, { 197.857056, 239.988144, 51.790501, 255.0f }, { 200.335648, 238.626450, 51.856800, 255.0f }, { 202.786194, 237.213745, 51.984299, 255.0f }, { 205.206146, 235.752609, 52.170448, 255.0f }, { 207.595505, 234.245544, 52.407600, 255.0f }, { 209.949142, 232.695145, 52.690651, 255.0f }, { 212.264542, 231.098862, 53.009403, 255.0f }, { 214.539154, 229.464310, 53.361301, 255.0f }, { 216.775497, 227.786407, 53.738701, 255.0f }, { 218.963409, 226.070251, 54.136501, 255.0f }, { 221.107941, 224.318390, 54.547050, 255.0f }, { 223.201492, 222.530853, 54.965252, 255.0f }, { 225.244049, 220.710159, 55.383450, 255.0f }, { 227.235611, 218.856293, 55.793999, 255.0f }, { 229.168503, 216.971848, 56.196899, 255.0f }, { 231.042755, 215.059357, 56.579399, 255.0f }, { 232.858337, 213.118805, 56.936398, 255.0f }, { 234.610199, 211.155289, 57.262798, 255.0f }, { 236.298294, 209.163742, 57.553501, 255.0f }, { 237.917542, 207.151794, 57.800850, 255.0f }, { 239.467957, 205.119446, 57.997200, 255.0f }, { 240.946960, 203.066696, 58.139999, 255.0f }, { 242.349442, 200.998657, 58.219051, 255.0f }, { 243.678009, 198.912750, 58.231800, 255.0f }, { 244.924942, 196.811554, 58.168049, 255.0f }, { 246.092850, 194.697601, 58.022701, 255.0f }, { 247.174042, 192.573456, 57.790649, 255.0f }, { 248.173645, 190.439087, 57.466801, 255.0f }, { 249.081451, 188.297104, 57.040951, 255.0f }, { 249.900009, 186.150009, 56.510548, 255.0f }, { 250.636948, 183.957001, 55.890900, 255.0f }, { 251.299957, 181.687500, 55.207500, 255.0f }, { 251.891556, 179.341507, 54.462898, 255.0f }, { 252.414307, 176.924103, 53.659649, 255.0f }, { 252.865662, 174.440399, 52.800297, 255.0f }, { 253.250702, 171.890396, 51.887402, 255.0f }, { 253.566895, 169.284302, 50.926048, 255.0f }, { 253.814255, 166.619553, 49.921349, 255.0f }, { 253.997864, 163.906342, 48.870750, 255.0f }, { 254.117706, 161.142151, 47.781898, 255.0f }, { 254.171249, 158.337158, 46.657352, 255.0f }, { 254.163605, 155.491348, 45.497101, 255.0f }, { 254.092194, 152.607300, 44.308800, 255.0f }, { 253.962158, 149.692642, 43.092449, 255.0f }, { 253.768356, 146.749954, 41.850601, 255.0f }, { 253.518448, 143.784302, 40.590900, 255.0f }, { 253.207352, 140.795700, 39.313351, 255.0f }, { 252.840149, 137.791794, 38.020500, 255.0f }, { 252.416855, 134.777710, 36.714901, 255.0f }, { 251.937454, 131.750854, 35.401653, 255.0f }, { 251.404495, 128.721451, 34.085850, 255.0f }, { 250.818008, 125.692047, 32.764950, 255.0f }, { 250.175400, 122.665199, 31.446600, 255.0f }, { 249.484360, 119.645996, 30.133350, 255.0f }, { 248.739746, 116.637001, 28.827749, 255.0f }, { 247.946701, 113.640755, 27.532351, 255.0f }, { 247.105194, 110.667450, 26.249701, 255.0f }, { 246.215256, 107.714554, 24.984900, 255.0f }, { 245.276855, 104.787155, 23.740499, 255.0f }, { 244.292557, 101.892899, 22.519051, 255.0f }, { 243.264908, 99.031799, 21.323099, 255.0f }, { 242.191345, 96.208954, 20.157749, 255.0f }, { 241.071899, 93.426903, 19.025551, 255.0f }, { 239.914200, 90.693298, 17.929049, 255.0f }, { 238.710587, 88.008148, 16.870800, 255.0f }, { 237.468750, 85.379097, 15.855901, 255.0f }, { 236.188660, 82.806152, 14.884351, 255.0f }, { 234.867752, 80.296951, 13.961249, 255.0f }, { 233.508591, 77.851501, 13.091700, 255.0f }, { 232.111206, 75.477448, 12.275701, 255.0f }, { 230.680649, 73.174805, 11.515800, 255.0f }, { 229.214401, 70.951202, 10.819650, 255.0f }, { 227.709900, 68.801544, 10.182150, 255.0f }, { 226.162048, 66.687599, 9.570150, 255.0f }, { 224.568298, 64.601700, 8.978550, 255.0f }, { 222.926102, 62.541302, 8.407350, 255.0f }, { 221.238007, 60.511497, 7.859100, 255.0f }, { 219.501450, 58.509750, 7.331250, 255.0f }, { 217.718994, 56.533501, 6.826350, 255.0f }, { 215.888107, 54.587852, 6.341850, 255.0f }, { 214.011292, 52.667702, 5.877750, 255.0f }, { 212.088593, 50.775600, 5.434050, 255.0f }, { 210.117447, 48.914101, 5.013300, 255.0f }, { 208.100388, 47.078098, 4.612950, 255.0f }, { 206.037460, 45.270153, 4.233000, 255.0f }, { 203.926041, 43.490250, 3.876000, 255.0f }, { 201.768753, 41.738400, 3.536850, 255.0f }, { 199.563004, 40.017151, 3.223200, 255.0f }, { 197.311340, 38.321400, 2.927400, 255.0f }, { 195.013809, 36.653698, 2.654550, 255.0f }, { 192.667801, 35.014050, 2.402100, 255.0f }, { 190.273346, 33.399899, 2.170050, 255.0f }, { 187.835556, 31.816351, 1.960950, 255.0f }, { 185.349304, 30.260851, 1.772250, 255.0f }, { 182.814606, 28.733400, 1.603950, 255.0f }, { 180.233994, 27.233999, 1.456050, 255.0f }, { 177.607498, 25.760099, 1.331100, 255.0f }, { 174.935104, 24.316801, 1.226550, 255.0f }, { 172.214249, 22.899000, 1.144950, 255.0f }, { 169.444946, 21.511801, 1.081200, 255.0f }, { 166.629761, 20.150101, 1.040400, 255.0f }, { 163.768646, 18.819000, 1.022550, 255.0f }, { 160.859100, 17.513401, 1.022550, 255.0f }, { 157.903641, 16.235851, 1.045500, 255.0f }, { 154.902313, 14.988900, 1.088850, 255.0f }, { 151.852493, 13.767449, 1.155150, 255.0f }, { 148.756805, 12.574050, 1.239300, 255.0f }, { 145.612656, 11.408700, 1.348950, 255.0f }, { 142.422607, 10.271399, 1.476450, 255.0f }, { 139.186646, 9.162150, 1.626900, 255.0f }, { 135.902252, 8.080951, 1.797750, 255.0f }, { 132.571945, 7.027800, 1.989000, 255.0f }, { 129.193207, 6.002700, 2.200650, 255.0f }, { 125.768547, 5.005650, 2.435250, 255.0f }, { 122.298004, 4.036650, 2.690250, 255.0f }, { 68.086021, 1.242870, 84.000824, 255.0f }, { 68.470055, 2.449275, 85.533882, 255.0f }, { 68.835724, 3.729375, 87.051643, 255.0f }, { 69.182777, 5.085210, 88.553596, 255.0f }, { 69.511475, 6.518565, 90.038712, 255.0f }, { 69.821289, 8.031735, 91.507515, 255.0f }, { 70.112755, 9.626760, 92.958466, 255.0f }, { 70.385605, 11.262586, 94.391823, 255.0f }, { 70.639595, 12.837720, 95.807320, 255.0f }, { 70.874954, 14.362620, 97.203705, 255.0f }, { 71.091705, 15.846974, 98.580963, 255.0f }, { 71.289330, 17.298180, 99.938835, 255.0f }, { 71.468086, 18.721334, 101.276566, 255.0f }, { 71.627975, 20.121284, 102.593895, 255.0f }, { 71.768730, 21.501600, 103.890564, 255.0f }, { 71.890625, 22.864830, 105.165825, 255.0f }, { 71.993385, 24.213524, 106.419411, 255.0f }, { 72.077286, 25.549978, 107.650803, 255.0f }, { 72.142044, 26.875216, 108.860008, 255.0f }, { 72.188210, 28.191013, 110.046265, 255.0f }, { 72.215233, 29.498400, 111.209328, 255.0f }, { 72.223396, 30.798136, 112.348923, 255.0f }, { 72.212685, 32.091240, 113.464798, 255.0f }, { 72.183357, 33.378227, 114.556458, 255.0f }, { 72.135422, 34.659599, 115.623878, 255.0f }, { 72.068863, 35.936131, 116.666832, 255.0f }, { 71.983955, 37.207561, 117.685051, 255.0f }, { 71.881187, 38.474655, 118.678268, 255.0f }, { 71.760063, 39.737671, 119.646255, 255.0f }, { 71.621338, 40.996605, 120.589241, 255.0f }, { 71.465019, 42.251717, 121.506989, 255.0f }, { 71.291374, 43.502743, 122.399239, 255.0f }, { 71.100632, 44.749950, 123.266235, 255.0f }, { 70.893059, 45.993584, 124.107735, 255.0f }, { 70.669167, 47.233143, 124.923988, 255.0f }, { 70.429474, 48.468868, 125.715256, 255.0f }, { 70.173706, 49.700775, 126.481277, 255.0f }, { 69.902634, 50.928852, 127.222305, 255.0f }, { 69.616531, 52.152599, 127.938858, 255.0f }, { 69.316139, 53.372265, 128.630661, 255.0f }, { 69.001732, 54.587593, 129.298264, 255.0f }, { 68.673538, 55.798588, 129.942123, 255.0f }, { 68.331841, 57.004993, 130.562042, 255.0f }, { 67.977898, 58.206810, 131.158997, 255.0f }, { 67.611977, 59.403782, 131.732742, 255.0f }, { 67.234062, 60.595901, 132.284302, 255.0f }, { 66.845192, 61.782928, 132.813431, 255.0f }, { 66.445602, 62.965111, 133.321136, 255.0f }, { 66.036072, 64.141937, 133.807678, 255.0f }, { 65.617111, 65.313156, 134.273560, 255.0f }, { 65.189476, 66.479263, 134.719574, 255.0f }, { 64.753426, 67.639771, 135.145660, 255.0f }, { 64.309464, 68.794662, 135.552643, 255.0f }, { 63.858379, 69.943947, 135.941269, 255.0f }, { 63.400394, 71.087624, 136.311768, 255.0f }, { 62.936806, 72.225441, 136.664963, 255.0f }, { 62.467861, 73.357124, 137.001297, 255.0f }, { 61.993813, 74.483459, 137.321579, 255.0f }, { 61.515434, 75.603676, 137.625793, 255.0f }, { 61.033230, 76.718025, 137.915222, 255.0f }, { 60.547455, 77.826515, 138.189865, 255.0f }, { 60.059128, 78.929390, 138.450729, 255.0f }, { 59.568764, 80.026138, 138.698074, 255.0f }, { 59.076870, 81.117027, 138.932678, 255.0f }, { 58.583443, 82.202057, 139.155029, 255.0f }, { 58.089508, 83.281471, 139.365646, 255.0f }, { 57.595062, 84.355278, 139.565063, 255.0f }, { 57.100872, 85.423470, 139.753525, 255.0f }, { 56.607197, 86.486053, 139.931763, 255.0f }, { 56.114532, 87.543282, 140.100327, 255.0f }, { 55.623154, 88.595154, 140.259689, 255.0f }, { 55.133549, 89.641426, 140.409882, 255.0f }, { 54.645988, 90.682846, 140.551926, 255.0f }, { 54.160725, 91.719170, 140.686050, 255.0f }, { 53.678265, 92.750389, 140.812531, 255.0f }, { 53.198868, 93.776756, 140.932129, 255.0f }, { 52.722778, 94.798294, 141.044830, 255.0f }, { 52.250267, 95.815231, 141.150925, 255.0f }, { 51.781063, 96.827583, 141.250870, 255.0f }, { 51.315945, 97.835854, 141.344971, 255.0f }, { 50.854652, 98.839790, 141.433716, 255.0f }, { 50.397179, 99.839645, 141.517105, 255.0f }, { 49.944298, 100.835419, 141.595367, 255.0f }, { 49.495502, 101.827362, 141.669067, 255.0f }, { 49.051037, 102.815742, 141.738190, 255.0f }, { 48.610905, 103.800560, 141.802689, 255.0f }, { 48.175365, 104.782051, 141.863129, 255.0f }, { 47.743908, 105.760223, 141.919479, 255.0f }, { 47.316780, 106.735352, 141.972015, 255.0f }, { 46.893990, 107.707664, 142.020721, 255.0f }, { 46.475281, 108.676918, 142.065613, 255.0f }, { 46.060394, 109.643623, 142.106903, 255.0f }, { 45.649845, 110.607780, 142.144653, 255.0f }, { 45.242867, 111.569382, 142.179062, 255.0f }, { 44.839455, 112.528946, 142.209686, 255.0f }, { 44.439869, 113.486221, 142.236969, 255.0f }, { 44.043346, 114.441704, 142.260666, 255.0f }, { 43.649879, 115.395149, 142.281067, 255.0f }, { 43.259727, 116.346809, 142.297653, 255.0f }, { 42.872131, 117.296936, 142.310913, 255.0f }, { 42.487335, 118.245544, 142.320343, 255.0f }, { 42.104836, 119.192863, 142.325958, 255.0f }, { 41.724377, 120.138916, 142.327744, 255.0f }, { 41.346207, 121.083687, 142.325699, 255.0f }, { 40.969578, 122.027702, 142.319321, 255.0f }, { 40.594467, 122.970436, 142.308609, 255.0f }, { 40.220894, 123.912659, 142.293320, 255.0f }, { 39.848850, 124.854118, 142.273682, 255.0f }, { 39.477825, 125.794815, 142.249191, 255.0f }, { 39.107822, 126.735001, 142.219620, 255.0f }, { 38.739090, 127.674667, 142.184692, 255.0f }, { 38.371380, 128.614105, 142.144653, 255.0f }, { 38.004944, 129.552994, 142.098755, 255.0f }, { 37.639786, 130.491913, 142.047485, 255.0f }, { 37.275902, 131.430313, 141.989868, 255.0f }, { 36.913544, 132.368713, 141.925858, 255.0f }, { 36.552464, 133.307129, 141.855225, 255.0f }, { 36.193428, 134.245514, 141.777710, 255.0f }, { 35.836678, 135.183655, 141.693039, 255.0f }, { 35.482483, 136.122055, 141.600983, 255.0f }, { 35.131348, 137.060455, 141.501038, 255.0f }, { 34.784039, 137.999115, 141.393158, 255.0f }, { 34.441830, 138.937515, 141.277390, 255.0f }, { 34.104465, 139.876419, 141.152954, 255.0f }, { 33.773220, 140.815079, 141.019577, 255.0f }, { 33.448860, 141.754257, 140.877045, 255.0f }, { 33.132915, 142.693405, 140.725327, 255.0f }, { 32.825893, 143.632584, 140.563400, 255.0f }, { 32.529842, 144.571991, 140.391785, 255.0f }, { 32.245514, 145.511414, 140.209457, 255.0f }, { 31.975470, 146.451080, 140.016922, 255.0f }, { 31.720724, 147.390503, 139.813171, 255.0f }, { 31.483065, 148.330185, 139.598480, 255.0f }, { 31.264530, 149.269608, 139.372040, 255.0f }, { 31.066904, 150.209030, 139.133865, 255.0f }, { 30.892740, 151.148438, 138.883453, 255.0f }, { 30.744074, 152.087616, 138.620804, 255.0f }, { 30.623459, 153.026520, 138.345154, 255.0f }, { 30.533190, 153.965164, 138.057007, 255.0f }, { 30.475559, 154.903320, 137.755585, 255.0f }, { 30.452866, 155.840958, 137.440399, 255.0f }, { 30.468164, 156.778336, 137.111465, 255.0f }, { 30.523245, 157.714951, 136.768478, 255.0f }, { 30.620655, 158.651047, 136.411240, 255.0f }, { 30.762690, 159.586151, 136.039429, 255.0f }, { 30.951900, 160.520462, 135.653122, 255.0f }, { 31.189560, 161.454025, 135.251495, 255.0f }, { 31.478220, 162.386292, 134.834564, 255.0f }, { 31.818899, 163.317566, 134.402344, 255.0f }, { 32.213127, 164.247284, 133.954300, 255.0f }, { 32.662186, 165.175995, 133.490204, 255.0f }, { 33.167088, 166.102921, 133.010040, 255.0f }, { 33.728340, 167.028564, 132.513550, 255.0f }, { 34.346458, 167.952179, 132.000488, 255.0f }, { 35.021442, 168.874268, 131.470612, 255.0f }, { 35.753551, 169.794037, 130.923889, 255.0f }, { 36.542267, 170.712036, 130.359818, 255.0f }, { 37.387077, 171.627747, 129.778671, 255.0f }, { 38.287743, 172.540894, 129.180191, 255.0f }, { 39.242973, 173.451767, 128.563873, 255.0f }, { 40.252003, 174.360077, 127.929924, 255.0f }, { 41.314079, 175.265579, 127.277893, 255.0f }, { 42.427666, 176.168274, 126.608009, 255.0f }, { 43.591740, 177.067917, 125.919762, 255.0f }, { 44.805283, 177.964493, 125.213409, 255.0f }, { 46.066517, 178.857513, 124.488197, 255.0f }, { 47.374664, 179.747208, 123.744614, 255.0f }, { 48.727951, 180.633331, 122.982422, 255.0f }, { 50.125607, 181.515884, 122.201355, 255.0f }, { 51.565845, 182.394363, 121.401421, 255.0f }, { 53.047649, 183.268753, 120.582619, 255.0f }, { 54.570000, 184.139084, 119.744942, 255.0f }, { 56.131622, 185.004791, 118.887634, 255.0f }, { 57.731232, 185.866440, 118.011192, 255.0f }, { 59.367825, 186.722977, 117.115639, 255.0f }, { 61.040371, 187.574951, 116.200439, 255.0f }, { 62.747849, 188.422058, 115.266121, 255.0f }, { 64.489243, 189.263794, 114.312424, 255.0f }, { 66.263535, 190.100449, 113.339088, 255.0f }, { 68.069962, 190.931503, 112.346115, 255.0f }, { 69.907997, 191.756943, 111.333260, 255.0f }, { 71.776634, 192.576767, 110.300766, 255.0f }, { 73.674858, 193.390472, 109.248627, 255.0f }, { 75.602142, 194.198044, 108.176865, 255.0f }, { 77.557739, 194.999512, 107.085464, 255.0f }, { 79.540871, 195.794617, 105.974426, 255.0f }, { 81.551292, 196.583069, 104.843758, 255.0f }, { 83.587982, 197.364899, 103.693199, 255.0f }, { 85.650673, 198.139587, 102.522499, 255.0f }, { 87.738876, 198.907394, 101.332153, 255.0f }, { 89.851799, 199.667816, 100.122177, 255.0f }, { 91.988953, 200.420822, 98.892563, 255.0f }, { 94.149567, 201.166428, 97.643074, 255.0f }, { 96.333649, 201.904160, 96.374443, 255.0f }, { 98.540413, 202.634216, 95.085930, 255.0f }, { 100.769371, 203.356125, 93.778030, 255.0f }, { 103.020256, 204.070129, 92.450760, 255.0f }, { 105.292816, 204.775452, 91.103592, 255.0f }, { 107.586540, 205.472366, 89.737045, 255.0f }, { 109.900665, 206.160614, 88.351379, 255.0f }, { 112.234932, 206.840179, 86.946587, 255.0f }, { 114.588837, 207.510849, 85.522926, 255.0f }, { 116.961876, 208.172562, 84.080383, 255.0f }, { 119.353516, 208.824860, 82.619492, 255.0f }, { 121.763519, 209.468216, 81.139725, 255.0f }, { 124.191635, 210.101898, 79.641853, 255.0f }, { 126.636826, 210.725891, 78.126137, 255.0f }, { 129.099106, 211.340439, 76.592308, 255.0f }, { 131.577957, 211.945282, 75.041145, 255.0f }, { 134.072891, 212.540207, 73.472389, 255.0f }, { 136.583359, 213.125168, 71.886543, 255.0f }, { 139.108627, 213.699936, 70.284630, 255.0f }, { 141.648422, 214.264771, 68.666656, 255.0f }, { 144.201996, 214.819656, 67.033630, 255.0f }, { 146.768570, 215.364334, 65.385826, 255.0f }, { 149.347885, 215.898544, 63.723736, 255.0f }, { 151.938950, 216.422821, 62.048897, 255.0f }, { 154.541473, 216.936905, 60.361557, 255.0f }, { 157.154724, 217.440796, 58.663258, 255.0f }, { 159.777649, 217.934479, 56.955013, 255.0f }, { 162.410004, 218.418213, 55.238098, 255.0f }, { 165.050522, 218.891998, 53.514553, 255.0f }, { 167.698715, 219.355850, 51.785908, 255.0f }, { 170.353760, 219.809738, 50.054714, 255.0f }, { 173.014709, 220.254211, 48.323265, 255.0f }, { 175.680710, 220.689240, 46.594875, 255.0f }, { 178.350830, 221.114838, 44.872604, 255.0f }, { 181.023987, 221.531509, 43.160534, 255.0f }, { 183.699692, 221.939255, 41.463768, 255.0f }, { 186.376694, 222.338577, 39.787395, 255.0f }, { 189.053940, 222.729507, 38.138054, 255.0f }, { 191.730423, 223.112503, 36.523140, 255.0f }, { 194.405106, 223.488129, 34.951317, 255.0f }, { 197.077255, 223.856339, 33.432796, 255.0f }, { 199.745331, 224.217667, 31.978275, 255.0f }, { 202.408798, 224.572891, 30.601274, 255.0f }, { 205.066406, 224.921722, 29.316074, 255.0f }, { 207.716888, 225.265213, 28.138485, 255.0f }, { 210.359711, 225.603607, 27.085335, 255.0f }, { 212.993851, 225.937393, 26.174730, 255.0f }, { 215.618057, 226.267105, 25.424009, 255.0f }, { 218.231552, 226.593262, 24.850260, 255.0f }, { 220.833313, 226.916351, 24.468016, 255.0f }, { 223.422836, 227.236877, 24.288752, 255.0f }, { 225.999100, 227.555359, 24.320372, 255.0f }, { 228.561600, 227.872086, 24.565426, 255.0f }, { 231.109299, 228.188034, 25.021875, 255.0f }, { 233.641708, 228.503204, 25.682835, 255.0f }, { 236.157028, 228.819153, 26.538105, 255.0f }, { 238.655533, 229.135345, 27.573404, 255.0f }, { 241.137177, 229.452835, 28.773689, 255.0f }, { 243.601486, 229.771576, 30.122641, 255.0f }, { 246.047974, 230.092361, 31.604954, 255.0f }, { 248.476334, 230.415451, 33.204826, 255.0f }, { 250.886337, 230.741089, 34.908733, 255.0f }, { 253.278229, 231.070038, 36.703678, 255.0f }, }; // pointers to colormap palette arrays static float4* colormapPalettesGPU = NULL; static float4* colormapPalettesCPU = NULL; // cudaColormapInit cudaError_t cudaColormapInit() { if( colormapPalettesGPU != NULL ) return cudaSuccess; // already initialized // allocate memory const size_t numMaps = COLORMAP_VIRIDIS_INVERTED + 1; const size_t mapSize = sizeof(float4) * 256; const size_t memSize = mapSize * numMaps; if( CUDA_FAILED(cudaMalloc((void**)&colormapPalettesGPU, memSize)) ) return cudaErrorMemoryAllocation; if( CUDA_FAILED(cudaMallocHost((void**)&colormapPalettesCPU, memSize)) ) return cudaErrorMemoryAllocation; // copy palettes to pinned memory memcpy(colormapPalettesCPU, colormapPalettes, memSize/2); // create inverted palettes for( uint32_t c=0; c < numMaps/2; c++ ) for( uint32_t n=0; n < 256; n++ ) colormapPalettesCPU[((numMaps/2+c)*256)+n] = colormapPalettes[c*256+255-n]; // copy palettes to GPU if( CUDA_FAILED(cudaMemcpy(colormapPalettesGPU, colormapPalettesCPU, memSize, cudaMemcpyHostToDevice)) ) return cudaErrorInvalidMemcpyDirection; return cudaSuccess; } // cudaColormapFree cudaError_t cudaColormapFree() { if( colormapPalettesGPU != NULL ) { CUDA(cudaFree(colormapPalettesGPU)); colormapPalettesGPU = NULL; } if( colormapPalettesCPU != NULL ) { CUDA(cudaFreeHost(colormapPalettesCPU)); colormapPalettesCPU = NULL; } return cudaSuccess; } // cudaColormapPalette float4* cudaColormapPalette( cudaColormapType colormap ) { if( colormap > COLORMAP_VIRIDIS_INVERTED ) return NULL; if( CUDA_FAILED(cudaColormapInit()) ) return NULL; return colormapPalettesGPU + (colormap * 256); } // gpuColormapPalette template<typename T, cudaFilterMode filter> __global__ void gpuColormapPalette( float4* palette, float* input, int input_width, int input_height, T* output, int output_width, int output_height, float multiplier, float min_value ) { const int x = blockIdx.x * blockDim.x + threadIdx.x; const int y = blockIdx.y * blockDim.y + threadIdx.y; if( x >= output_width || y >= output_height ) return; const float pixel = cudaFilterPixel<filter>(input, x, y, input_width, input_height, output_width, output_height); const float value = fmaxf(fminf((pixel - min_value) * multiplier, 255.0f), 0.0f); // __saturatef(pixel - min_value) * 255.0f; output[y * output_width + x] = cast_vec<T>(palette[(int)value]); } // gpuColormapFlow template<typename T, cudaFilterMode filter, cudaDataFormat format> __global__ void gpuColormapFlow( float2* input, int input_width, int input_height, T* output, int output_width, int output_height, float max_value ) { const int x = blockIdx.x * blockDim.x + threadIdx.x; const int y = blockIdx.y * blockDim.y + threadIdx.y; if( x >= output_width || y >= output_height ) return; const float2 pixel = cudaFilterPixel<filter, format>(input, x, y, input_width, input_height, output_width, output_height); const float2 value = pixel / max_value; const float3 color = make_float3(1.0f + value.x, 1.0f - 0.5 * (value.x + value.y), 1.0f + value.y) * 255.0f; /*const float4 color = make_float4(__saturatef(1.0f + value.y), __saturatef(1.0f - 0.5 * (value.x + value.y)), __saturatef(1.0f + value.x), 1.0f); output[y * output_width + x] = color * 255.0f;*/ const float3 clamped = clamp(color, 0.0f, 255.0f); output[y * output_width + x] = make_vec<T>(clamped.x, clamped.y, clamped.z, 255.0f); } // gpuColormapNone template<typename T, cudaFilterMode filter, cudaDataFormat format> __global__ void gpuColormapNone( T* input, int input_width, int input_height, T* output, int output_width, int output_height ) { const int x = blockIdx.x * blockDim.x + threadIdx.x; const int y = blockIdx.y * blockDim.y + threadIdx.y; if( x >= output_width || y >= output_height ) return; output[y * output_width + x] = cudaFilterPixel<filter, format>(input, x, y, input_width, input_height, output_width, output_height); } // cudaColormap cudaError_t cudaColormap( float* input, size_t input_width, size_t input_height, void* output, size_t output_width, size_t output_height, const float2& input_range, cudaDataFormat input_format, imageFormat output_format, cudaColormapType colormap, cudaFilterMode filter, cudaStream_t stream ) { if( !input || !output ) return cudaErrorInvalidDevicePointer; if( input_width == 0 || output_width == 0 || input_height == 0 || output_height == 0 ) return cudaErrorInvalidValue; if( colormap > COLORMAP_NONE ) return cudaErrorNotYetImplemented; if( input_width == output_width && input_height == output_height ) filter = FILTER_POINT; // palettized colormaps if( colormap <= COLORMAP_VIRIDIS_INVERTED ) { // get the pointer to the colormap float4* palette = cudaColormapPalette(colormap); if( !palette ) return cudaErrorMemoryAllocation; // calculate the multiplier to map from input_range -> [0,255] const float multiplier = 255.0f / (input_range.y - input_range.x); // launch kernel const dim3 blockDim(8, 8); const dim3 gridDim(iDivUp(output_width,blockDim.x), iDivUp(output_height,blockDim.y)); #define colormapKernelFilter(type, filterMode) \ gpuColormapPalette<type, filterMode><<<gridDim, blockDim, 0, stream>>>( \ palette, input, input_width, input_height, \ (type*)output, output_width, output_height, \ multiplier, input_range.x); #define colormapKernel(type) \ { \ if( filter == FILTER_POINT ) \ colormapKernelFilter(type, FILTER_POINT) \ else if( filter == FILTER_LINEAR ) \ colormapKernelFilter(type, FILTER_LINEAR) \ } if( output_format == IMAGE_RGB8 ) colormapKernel(uchar3) else if( output_format == IMAGE_RGBA8 ) colormapKernel(uchar4) else if( output_format == IMAGE_RGB32F ) colormapKernel(float3) else if( output_format == IMAGE_RGBA32F ) colormapKernel(float4) else { imageFormatErrorMsg(LOG_CUDA, "cudaColormap()", output_format); return cudaErrorInvalidValue; } } else if( colormap == COLORMAP_FLOW ) // parametric flow field { // get the maximum absolute value const float max_value = fmaxf(fabs(input_range.x), fabs(input_range.y)); // launch kernel const dim3 blockDim(8, 8); const dim3 gridDim(iDivUp(output_width,blockDim.x), iDivUp(output_height,blockDim.y)); #define flowKernelFilter(type, filterMode, layout) \ gpuColormapFlow<type, filterMode, layout><<<gridDim, blockDim, 0, stream>>>( \ (float2*)input, input_width, input_height, \ (type*)output, output_width, output_height, \ max_value); #define flowKernel(type) \ { \ if( filter == FILTER_POINT ) \ { \ if( input_format == FORMAT_CHW ) \ flowKernelFilter(type, FILTER_POINT, FORMAT_CHW) \ else if( input_format == FORMAT_HWC ) \ flowKernelFilter(type, FILTER_POINT, FORMAT_HWC) \ } \ else if( filter == FILTER_LINEAR ) \ { \ if( input_format == FORMAT_CHW ) \ flowKernelFilter(type, FILTER_LINEAR, FORMAT_CHW) \ else if( input_format == FORMAT_HWC ) \ flowKernelFilter(type, FILTER_LINEAR, FORMAT_HWC) \ } \ } if( output_format == IMAGE_RGB8 ) flowKernel(uchar3) else if( output_format == IMAGE_RGBA8 ) flowKernel(uchar4) else if( output_format == IMAGE_RGB32F ) flowKernel(float3) else if( output_format == IMAGE_RGBA32F ) flowKernel(float4) else { imageFormatErrorMsg(LOG_CUDA, "cudaColormap(COLORMAP_FLOW)", output_format); return cudaErrorInvalidValue; } } else if( colormap == COLORMAP_NONE ) { // launch kernel const dim3 blockDim(8, 8); const dim3 gridDim(iDivUp(output_width,blockDim.x), iDivUp(output_height,blockDim.y)); #define noneKernelFilter(type, filterMode, layout) \ gpuColormapNone<type, filterMode, layout><<<gridDim, blockDim, 0, stream>>>( \ (type*)input, input_width, input_height, \ (type*)output, output_width, output_height); #define noneKernel(type) \ { \ if( filter == FILTER_POINT ) \ { \ if( input_format == FORMAT_CHW ) \ noneKernelFilter(type, FILTER_POINT, FORMAT_CHW) \ else if( input_format == FORMAT_HWC ) \ noneKernelFilter(type, FILTER_POINT, FORMAT_HWC) \ } \ else if( filter == FILTER_LINEAR ) \ { \ if( input_format == FORMAT_CHW ) \ noneKernelFilter(type, FILTER_LINEAR, FORMAT_CHW) \ else if( input_format == FORMAT_HWC ) \ noneKernelFilter(type, FILTER_LINEAR, FORMAT_HWC) \ } \ } if( output_format == IMAGE_GRAY8 ) noneKernel(uchar) else if( output_format == IMAGE_GRAY32F ) noneKernel(float) /*else if( output_format == IMAGE_RGB8 ) noneKernel(uchar3) else if( output_format == IMAGE_RGBA8 ) noneKernel(uchar4)*/ else if( output_format == IMAGE_RGB32F ) noneKernel(float3) else if( output_format == IMAGE_RGBA32F ) noneKernel(float4) else { LogError(LOG_CUDA "cudaColormap(COLORMAP_NONE) -- unsupported image format (%s)\n", imageFormatToStr(output_format)); LogError(LOG_CUDA " supported formats are:\n"); LogError(LOG_CUDA " * gray8\n"); LogError(LOG_CUDA " * gray32f\n"); //LogError(LOG_CUDA " * rgb8\n"); //LogError(LOG_CUDA " * rgba8\n"); LogError(LOG_CUDA " * rgb32f\n"); LogError(LOG_CUDA " * rgba32f\n"); return cudaErrorInvalidValue; } } return CUDA(cudaGetLastError()); } // cudaColormap cudaError_t cudaColormap( float* input, void* output, size_t width, size_t height, const float2& input_range, cudaDataFormat input_format, imageFormat output_format, cudaColormapType colormap, cudaStream_t stream) { return cudaColormap(input, width, height, output, width, height, input_range, input_format, output_format, colormap, FILTER_POINT, stream); }
the_stack
#include <stdio.h> #include <stdlib.h> #include <string.h> #include <math.h> #include <time.h> #include <hip/hip_runtime.h> /*HIP error wraper*/ static void HIP_ERROR(hipError_t err) { if (err != hipSuccess) { printf("HIP ERROR: %s, exiting\n", hipGetErrorString(err)); exit(-1); } } /*constants*/ #define EPS_0 8.85418782e-12 // F/m, vacuum permittivity #define K 1.38065e-23 // J/K, Boltzmann constant #define ME 9.10938215e-31 // kg, electron mass #define QE 1.602176565e-19 // C, elementary charge #define AMU 1.660538921e-27 // kg, atomic mass unit #define EV_TO_K 11604.52 // 1eV in Kelvin, QE/K /*simulation parameters, these could come from an input file*/ #define PLASMA_DEN 1e16 // plasma density to load #define NUM_IONS 500000 // number of ions #define NUM_ELECTRONS 500000 // number of electrons #define DX 1e-4 // cell spacing #define NC 100 // number of cells #define NUM_TS 1000 // number of time steps #define DT 1e-11 // time step size #define ELECTRON_TEMP 3.0 // electron temperature in eV #define ION_TEMP 1.0 // ion temperature in eV /*domain parameters, set here so can access from GPU*/ #define X0 0 /*mesh origin*/ #define XL NC* DX /*domain length*/ #define XMAX (X0 + XL) /*domain max position*/ const int THREADS_PER_BLOCK = 256; /* Data structure to hold domain information*/ struct Domain { const int ni = NC + 1; /*number of nodes*/ const double x0 = X0; const double dx = DX; const double xl = XL; const double xmax = XMAX; /*data structures*/ double* phi; /*potential*/ double* ef; /*electric field on the cpu*/ double* rho; /*charge density*/ float* ndi; /*ion density on the CPU*/ float* nde; /*electron density on the CPU*/ }; /* Data structure for particle storage **/ struct Particle { double x; /*position*/ double v; /*velocity*/ bool alive; /*flag to avoid removing particles*/ }; /* Data structure to hold species information*/ struct Species { double mass; /*particle mass in kg*/ double charge; /*particle charge in Coulomb*/ double spwt; /*species specific weight*/ int np; /*number of particles*/ int np_alloc; /*size of the allocated data array*/ Particle* part; /*array holding particles on the CPU*/ }; /** FUNCTION PROTOTYPES **/ double rnd(); double SampleVel(double v_th); void ScatterSpecies(Species* species, Particle* species_part_gpu, float* den, float* den_gpu); void ComputeRho(Species* ions, Species* electrons); bool SolvePotential(double* phi, double* rho); bool SolvePotentialDirect(double* phi, double* rho); void ComputeEF(double* phi, double* ef, double* ef_gpu); void PushSpecies(Species* species, Particle* species_part_gpu, double* ef); void RewindSpecies(Species* species, Particle* species_part_gpu, double* ef); void AddParticle(Species* species, double x, double v); __device__ double XtoL(double pos); __device__ double gather(double lc, const double* field); __device__ void scatter(double lc, float value, float* field); void WriteResults(int ts); /* GLOBAL VARIABLES */ Domain domain; FILE* file_res; /* --------- main -------------*/ int main(int argc, char* argv[]) { int p; int ts; // time step domain.phi = new double[domain.ni]; // potential domain.rho = new double[domain.ni]; // charge density domain.ef = new double[domain.ni]; // electric field domain.nde = new float[domain.ni]; // number density of electrons domain.ndi = new float[domain.ni]; // number density of ions /*save pointers so we can write phi instead of domain.phi*/ double* phi = domain.phi; double* rho = domain.rho; double* ef = domain.ef; float* nde = domain.nde; float* ndi = domain.ndi; /*also allocate device memory */ float *nde_gpu, *ndi_gpu; HIP_ERROR(hipMalloc((void**)&nde_gpu, domain.ni * sizeof(float))); HIP_ERROR(hipMalloc((void**)&ndi_gpu, domain.ni * sizeof(float))); double* ef_gpu; HIP_ERROR(hipMalloc((void**)&ef_gpu, domain.ni * sizeof(double))); Particle *ions_part_gpu; HIP_ERROR(hipMalloc((void**)&ions_part_gpu, NUM_IONS * sizeof(Particle))); Particle *electrons_part_gpu; HIP_ERROR(hipMalloc((void**)&electrons_part_gpu, NUM_ELECTRONS * sizeof(Particle))); /*clear data*/ memset(phi, 0, sizeof(double) * domain.ni); /*variables to hold species data ions*/ Species ions; Species electrons; /*set material data*/ ions.mass = 16 * AMU; ions.charge = QE; ions.spwt = PLASMA_DEN * domain.xl / NUM_IONS; ions.np = 0; ions.np_alloc = NUM_IONS; ions.part = new Particle[NUM_IONS]; electrons.mass = ME; // electrons electrons.charge = -QE; electrons.spwt = PLASMA_DEN * domain.xl / NUM_ELECTRONS; electrons.np = 0; electrons.np_alloc = NUM_ELECTRONS; electrons.part = new Particle[NUM_ELECTRONS]; /*randomize RNG*/ srand(123); /*load uniformly spaced ions */ double delta_ions = domain.xl / NUM_IONS; double v_thi = sqrt(2 * K * ION_TEMP * EV_TO_K / ions.mass); for (p = 0; p < NUM_IONS; p++) { double x = domain.x0 + p * delta_ions; double v = SampleVel(v_thi); AddParticle(&ions, x, v); } /*load uniformly spaced electrons */ double delta_electrons = domain.xl / NUM_ELECTRONS; double v_the = sqrt(2 * K * ELECTRON_TEMP * EV_TO_K / electrons.mass); for (p = 0; p < NUM_ELECTRONS; p++) { double x = domain.x0 + p * delta_electrons; double v = SampleVel(v_the); AddParticle(&electrons, x, v); } /*copy particles to the GPU*/ HIP_ERROR(hipMemcpy(ions_part_gpu, ions.part, NUM_IONS * sizeof(Particle), hipMemcpyHostToDevice)); HIP_ERROR(hipMemcpy(electrons_part_gpu, electrons.part, NUM_ELECTRONS * sizeof(Particle), hipMemcpyHostToDevice)); /*compute number density*/ ScatterSpecies(&ions, ions_part_gpu, ndi, ndi_gpu); ScatterSpecies(&electrons, electrons_part_gpu, nde, nde_gpu); /*compute charge density and solve potential*/ ComputeRho(&ions, &electrons); SolvePotential(phi, rho); ComputeEF(phi, ef, ef_gpu); RewindSpecies(&ions, ions_part_gpu, ef_gpu); RewindSpecies(&electrons, electrons_part_gpu, ef_gpu); /*OUTPUT*/ file_res = fopen("result.dat", "w"); fprintf(file_res, "VARIABLES = x nde ndi rho phi ef\n"); WriteResults(0); clock_t start = clock(); // grab starting clock time /* MAIN LOOP*/ for (ts = 1; ts <= NUM_TS; ts++) { /*compute number density*/ ScatterSpecies(&ions, ions_part_gpu, ndi, ndi_gpu); ScatterSpecies(&electrons, electrons_part_gpu, nde, nde_gpu); ComputeRho(&ions, &electrons); SolvePotential(phi, rho); ComputeEF(phi, ef, ef_gpu); /*move particles*/ PushSpecies(&electrons, electrons_part_gpu, ef_gpu); PushSpecies(&ions, ions_part_gpu, ef_gpu); /*write diagnostics*/ if (ts % 25 == 0) { /*max phi*/ double max_phi = abs(phi[0]); for (int i = 0; i < domain.ni; i++) if (abs(phi[i]) > max_phi) max_phi = abs(phi[i]); printf("TS:%i\tnp_i:%d\tnp_e:%d\tdphi:%.3g\n", ts, ions.np, electrons.np, max_phi - phi[0]); } /*save data*/ if (ts % 1000 == 0) WriteResults(ts); } clock_t end = clock(); fclose(file_res); /*free up memory*/ delete phi; delete rho; delete ef; delete nde; delete ndi; hipFree(nde_gpu); hipFree(ndi_gpu); hipFree(ef_gpu); /*free particles*/ delete ions.part; delete electrons.part; hipFree(ions_part_gpu); hipFree(electrons_part_gpu); printf("Time per time step: %.3g ms\n", 1000 * (end - start) / (double)(CLOCKS_PER_SEC * NUM_TS)); /* call hipDeviceReset for correct profiling data*/ HIP_ERROR(hipDeviceReset()); return 0; } /***** HELPER FUNCTIONS *********************************************************/ /* random number generator for now using built-in but this is not adequate for real simulations*/ double rnd() { return rand() / (double)RAND_MAX; } /* samples random velocity from Maxwellian distribution using Birdsall's method*/ double SampleVel(double v_th) { const int M = 12; double sum = 0; for (int i = 0; i < M; i++) sum += rnd(); return sqrt(0.5) * v_th * (sum - M / 2.0) / sqrt(M / 12.0); } /* move particles*/ __global__ void scatterParticle(const Particle* __restrict particles, float*__restrict den, long N) { /*get particle id*/ long p = blockIdx.x * blockDim.x + threadIdx.x; if (p < N && particles[p].alive) { double lc = XtoL(particles[p].x); scatter(lc, 1.f, den); } } /*scatter particles of species to the mesh*/ void ScatterSpecies(Species* species, Particle* species_part_gpu, float* den, float* den_gpu) { /*initialize densities to zero*/ HIP_ERROR(hipMemset(den_gpu, 0, sizeof(float) * domain.ni)); int size = species->np_alloc; /*scatter particles to the mesh*/ int nblocks = 1 + size / THREADS_PER_BLOCK; hipLaunchKernelGGL(scatterParticle, dim3(nblocks), dim3(THREADS_PER_BLOCK), 0, 0, species_part_gpu, den_gpu, size); /*copy density back to CPU*/ HIP_ERROR(hipMemcpy(den, den_gpu, sizeof(float) * domain.ni, hipMemcpyDeviceToHost)); /*divide by cell volume*/ for (int i = 0; i < domain.ni; i++) den[i] *= species->spwt / domain.dx; /*only half cell at boundaries*/ den[0] *= 2.0; den[domain.ni - 1] *= 2.0; } /*adds new particle to the species, returns pointer to the newly added data*/ void AddParticle(Species* species, double x, double v) { /*abort the simulation if we ran out of space to store this particle*/ if (species->np > species->np_alloc - 1) { printf("Too many particles!\n"); exit(-1); } /*store position and velocity of this particle*/ species->part[species->np].x = x; species->part[species->np].v = v; species->part[species->np].alive = true; /*increment particle counter*/ species->np++; } /*computes charge density by adding ion and electron data*/ void ComputeRho(Species* ions, Species* electrons) { double* rho = domain.rho; for (int i = 0; i < domain.ni; i++) rho[i] = ions->charge * domain.ndi[i] + electrons->charge * domain.nde[i]; } /*Thomas algorithm for a tri-diagonal matrix*/ bool SolvePotentialDirect(double* x, double* rho) { /*set coefficients, this should be pre-computed*/ int ni = domain.ni; double dx2 = domain.dx * domain.dx; int i; double* a = new double[ni]; double* b = new double[ni]; double* c = new double[ni]; /*central difference on internal nodes*/ for (i = 1; i < ni - 1; i++) { a[i] = 1; b[i] = -2; c[i] = 1; } /*dirichlet b.c. on boundaries*/ a[0] = 0; b[0] = 1; c[0] = 0; a[ni - 1] = 0; b[ni - 1] = 1; c[ni - 1] = 0; /*multiply RHS*/ for (i = 1; i < domain.ni - 1; i++) x[i] = -rho[i] * dx2 / EPS_0; x[0] = 0; x[ni - 1] = 0; /* Modify the coefficients. */ c[0] /= b[0]; /* Division by zero risk. */ x[0] /= b[0]; /* Division by zero would imply a singular matrix. */ for (i = 1; i < ni; i++) { double id = (b[i] - c[i - 1] * a[i]); /* Division by zero risk. */ c[i] /= id; /* Last value calculated is redundant. */ x[i] = (x[i] - x[i - 1] * a[i]) / id; } /* Now back substitute. */ for (i = ni - 2; i >= 0; i--) x[i] = x[i] - c[i] * x[i + 1]; return true; } /* solves potential using the Gauss Seidel Method, returns true if converged*/ bool SolvePotential(double* phi, double* rho) { double L2; double dx2 = domain.dx * domain.dx; /*precompute*/ /*initialize boundaries*/ phi[0] = phi[domain.ni - 1] = 0; /*solve potential, identical to lesson 2*/ for (int solver_it = 0; solver_it < 40000; solver_it++) { /*Gauss Seidel method, phi[i-1]-2*phi[i]+phi[i+1] = -dx^2*rho[i]/eps_0*/ for (int i = 1; i < domain.ni - 1; i++) { /*SOR*/ double g = 0.5 * (phi[i - 1] + phi[i + 1] + dx2 * rho[i] / EPS_0); phi[i] = phi[i] + 1.4 * (g - phi[i]); } /*check for convergence*/ if (solver_it % 25 == 0) { double sum = 0; for (int i = 1; i < domain.ni - 1; i++) { double R = -rho[i] / EPS_0 - (phi[i - 1] - 2 * phi[i] + phi[i + 1]) / dx2; sum += R * R; } L2 = sqrt(sum) / domain.ni; if (L2 < 1e-4) { return true; } } } printf("Gauss-Seidel solver failed to converge, L2=%.3g!\n", L2); return false; } /* computes electric field by differentiating potential*/ void ComputeEF(double* phi, double* ef, double* ef_gpu) { for (int i = 1; i < domain.ni - 1; i++) ef[i] = -(phi[i + 1] - phi[i - 1]) / (2 * domain.dx); // central difference /*one sided difference at boundaries*/ ef[0] = -(phi[1] - phi[0]) / domain.dx; ef[domain.ni - 1] = -(phi[domain.ni - 1] - phi[domain.ni - 2]) / domain.dx; /*copy to the gpu*/ HIP_ERROR(hipMemcpy(ef_gpu, ef, domain.ni * sizeof(double), hipMemcpyHostToDevice)); } /*GPU code to move particles*/ __global__ void pushParticle(Particle*__restrict particles, const double*__restrict ef, double qm, long N) { /*get particle id*/ long p = blockIdx.x * blockDim.x + threadIdx.x; if (p < N && particles[p].alive) { /*grab pointer to this particle*/ Particle* part = &particles[p]; /*compute particle node position*/ double lc = XtoL(part->x); /*gather electric field onto particle position*/ double part_ef = gather(lc, ef); /*advance velocity*/ part->v += DT * qm * part_ef; /*advance position*/ part->x += DT * part->v; /*remove particles leaving the domain*/ if (part->x < X0 || part->x >= XMAX) part->alive = false; } } /* moves particles of a single species, returns wall charge*/ void PushSpecies(Species* species, Particle* species_part_gpu, double* ef) { /*precompute q/m*/ double qm = species->charge / species->mass; int size = species->np_alloc; /*loop over particles*/ int nblocks = 1 + size / THREADS_PER_BLOCK; hipLaunchKernelGGL(pushParticle, dim3(nblocks), dim3(THREADS_PER_BLOCK), 0, 0, species_part_gpu, ef, qm, size); } /*GPU code to rewind particles*/ __global__ void rewindParticle(Particle*__restrict particles, const double*__restrict ef, double qm, long N) { /*get particle id*/ long p = blockIdx.x * blockDim.x + threadIdx.x; if (p < N && particles[p].alive) { /*grab pointer to this particle*/ Particle* part = &particles[p]; /*compute particle node position*/ double lc = XtoL(part->x); /*gather electric field onto particle position*/ double part_ef = gather(lc, ef); /*advance velocity*/ part->v -= 0.5 * DT * qm * part_ef; } } /* rewinds particle velocities by -0.5DT*/ void RewindSpecies(Species* species, Particle* species_part_gpu, double* ef) { /*precompute q/m*/ double qm = species->charge / species->mass; int size = species->np_alloc; /*loop over particles*/ int nblocks = 1 + size / THREADS_PER_BLOCK; hipLaunchKernelGGL(rewindParticle, dim3(nblocks), dim3(THREADS_PER_BLOCK), 0, 0, species_part_gpu, ef, qm, size); } /* converts physical coordinate to logical*/ __device__ double XtoL(double pos) { double li = (pos - 0) / DX; return li; } /* atomic scatter of scalar value onto a field at logical coordinate lc*/ __device__ void scatter(double lc, float value, float* field) { int i = (int)lc; float di = lc - i; atomicAdd(&(field[i]), value * (1 - di)); atomicAdd(&(field[i + 1]), value * (di)); } /* gathers field value at logical coordinate lc*/ __device__ double gather(double lc, const double* field) { int i = (int)lc; double di = lc - i; /*gather field value onto particle position*/ double val = field[i] * (1 - di) + field[i + 1] * (di); return val; } /* writes new zone to the results file*/ void WriteResults(int ts) { fprintf(file_res, "ZONE I=%d T=ZONE_%06d\n", domain.ni, ts); for (int i = 0; i < domain.ni; i++) { fprintf(file_res, "%g %g %g %g %g %g\n", i * domain.dx, domain.nde[i], domain.ndi[i], domain.rho[i], domain.phi[i], domain.ef[i]); } fflush(file_res); }
the_stack
#include "polydet.cu" #include "sturm.cu" #include "polyquotient.cu" #include "cheirality.cu" #include "essential_matrix_5pt.cu" //#include "essential_matrix_6pt.cu" // Declare constant memory (64KB maximum) __constant__ int c_num_points; __constant__ int c_num_test_points; __constant__ int c_ransac_num_test_points; __constant__ int c_ransac_num_iterations; __constant__ double c_inlier_threshold; // Function declarations __device__ int RandomInt(curandState* state, const int global_index, const int min_int, const int max_int); template<int n> __device__ void SelectSubset(const double *qs, const double *qps, curandState* state, const int global_index, Matches_n<n>& q, Matches_n<n>& qp); template<typename T> __device__ void ComputeError(const T *q, const T *qp, const Ematrix &E, T &error); /* * Initialise a state for each thread */ __global__ void SetupRandomState(const unsigned long long seed, curandState* state) { int global_index = threadIdx.x + blockDim.x * blockIdx.x; curand_init(seed, global_index, 0, &state[global_index]); } /* * Estimate the essential matrix, using the 5-point algorithm and RANSAC */ template <int subset_size> __global__ void EstimateEssentialMatrix(const double *qs, // Two sets of matching points const double *qps, // (flattened 2D arrays) curandState* state, // Random number generator state int *num_inliers, // Number of inliers per thread double (*essential_matrices)[3][3]) { // Essential matrices with greatest number of inliers int global_index = threadIdx.x + blockDim.x * blockIdx.x; int num_essential_matrices; Matches_n<subset_size> q, qp; Ematrix essential_matrix; Ematrix essential_matrix_set[10]; // RANSAC int best_num_inliers = 0; for (int i = 0; i < c_ransac_num_iterations; ++i) { // Generate hypothesis set SelectSubset<subset_size>(qs, qps, state, global_index, q, qp); // Compute up to 10 essential matrices using the 5 point algorithm compute_E_matrices_optimized(q, qp, essential_matrix_set, num_essential_matrices); /** // Polish all essential matrices const double PolishThreshold = 5.0e-24; const int MaxReps = 7; // Somewhat arbitrary bound for (int j = 0; j < num_essential_matrices; ++j) { double square_error = sq_error(essential_matrix_set[j], q, qp); if (square_error > PolishThreshold) polish_E(q, qp, essential_matrix_set[j], MaxReps); } // Find those that give correct cheirality int num_projection_matrices = num_essential_matrices; Pmatrix projection_matrix_set[10]; compute_P_matrices(q, qp, essential_matrix_set, (double *)0, projection_matrix_set, num_projection_matrices, subset_size); **/ // Test essential matrices in solution set // Choose the solution with the greatest number of inliers int best_num_inliers_subset = 0; int best_index = 0; for (int j = 0; j < num_essential_matrices; ++j) { // for (int j = 0; j < num_projection_matrices; ++j) { int inlier_count = 0; for (int k = 0; k < c_num_test_points; ++k) { double error; double q_test[3] = {qs[2 * k], qs[2 * k + 1], 1.0}; double qp_test[3] = {qps[2 * k], qps[2 * k + 1], 1.0}; ComputeError<double>(q_test, qp_test, essential_matrix_set[j], error); if (error <= c_inlier_threshold) { inlier_count++; } } if (inlier_count > best_num_inliers_subset) { best_num_inliers_subset = inlier_count; best_index = j; } } // Evaluate best essential matrix on full set of test matches int inlier_count = 0; for (int k = 0; k < c_ransac_num_test_points; ++k) { double error; double q_test[3] = {qs[2 * k], qs[2 * k + 1], 1.0}; double qp_test[3] = {qps[2 * k], qps[2 * k + 1], 1.0}; ComputeError<double>(q_test, qp_test, essential_matrix_set[best_index], error); if (error <= c_inlier_threshold) { inlier_count++; } } if (inlier_count > best_num_inliers) { best_num_inliers = inlier_count; memcpy(essential_matrix, &essential_matrix_set[best_index], 3 * 3 * sizeof(double)); } } // Copy to output num_inliers[global_index] = best_num_inliers; memcpy(&essential_matrices[global_index], essential_matrix, 3 * 3 * sizeof(double)); } /* * Estimate the essential matrix and camera matrix, using the 5-point algorithm and RANSAC */ template <int subset_size> __global__ void EstimateProjectionMatrix(const double *qs, // Two sets of matching points const double *qps, // (flattened 2D arrays) curandState* state, // Random number generator state int *num_inliers, // Number of inliers per thread double (*essential_matrices)[3][3], // Essential matrices with greatest number of inliers double (*projection_matrices)[3][4]){ // camera matrices with greates number of inliers int global_index = threadIdx.x + blockDim.x * blockIdx.x; int num_essential_matrices; Matches_n<subset_size> q, qp; Ematrix essential_matrix; Pmatrix projection_matrix; Ematrix essential_matrix_set[10]; // RANSAC int best_num_inliers = 0; for (int i = 0; i < c_ransac_num_iterations; ++i) { // Generate hyposthesis set SelectSubset<subset_size>(qs, qps, state, global_index, q, qp); // Compute up to 10 essential matrices using the 5 point algorithm compute_E_matrices_optimized(q, qp, essential_matrix_set, num_essential_matrices); /** // Polish all essential matrices const double PolishThreshold = 5.0e-24; const int MaxReps = 7; // Somewhat arbitrary bound for (int j = 0; j < num_essential_matrices; ++j) { double square_error = sq_error(essential_matrix_set[j], q, qp); if (square_error > PolishThreshold) polish_E(q, qp, essential_matrix_set[j], MaxReps); } **/ // Find those that give correct cheirality int num_projection_matrices = num_essential_matrices; Pmatrix projection_matrix_set[10]; compute_P_matrices(q, qp, essential_matrix_set, (double *)0, projection_matrix_set, num_projection_matrices, subset_size); // Test essential matrices in solution set // Choose the solution with the greatest number of inliers int best_num_inliers_subset = 0; int best_index = 0; // for (int j = 0; j < num_essential_matrices; ++j) { for (int j = 0; j < num_projection_matrices; ++j) { int inlier_count = 0; for (int k = 0; k < c_num_test_points; ++k) { double error; double q_test[3] = {qs[2 * k], qs[2 * k + 1], 1.0}; double qp_test[3] = {qps[2 * k], qps[2 * k + 1], 1.0}; ComputeError<double>(q_test, qp_test, essential_matrix_set[j], error); if (error <= c_inlier_threshold) { inlier_count++; } } if (inlier_count > best_num_inliers_subset) { best_num_inliers_subset = inlier_count; best_index = j; } } // Evaluate best essential matrix on full set of test matches int inlier_count = 0; for (int k = 0; k < c_ransac_num_test_points; ++k) { double error; double q_test[3] = {qs[2 * k], qs[2 * k + 1], 1.0}; double qp_test[3] = {qps[2 * k], qps[2 * k + 1], 1.0}; ComputeError<double>(q_test, qp_test, essential_matrix_set[best_index], error); if (error <= c_inlier_threshold) { inlier_count++; } } if (inlier_count > best_num_inliers) { best_num_inliers = inlier_count; memcpy(essential_matrix, &essential_matrix_set[best_index], 3 * 3 * sizeof(double)); memcpy(projection_matrix, &projection_matrix_set[best_index], 3 * 4 * sizeof(double)); } } // Copy to output num_inliers[global_index] = best_num_inliers; memcpy(&essential_matrices[global_index], essential_matrix, 3 * 3 * sizeof(double)); memcpy(&projection_matrices[global_index], projection_matrix, 3 * 4 * sizeof(double)); } /* * Compute Sampson distance given a pair of matched points and an essential matrix */ template<typename T> __device__ void ComputeError(const T *q, const T *qp, const Ematrix &E, T &error) { // Compute Ex T Ex[3]; for (int k = 0; k < 3; k++) { T sum = 0.0; for (int l = 0; l < 3; l++) { sum += E[k][l] * q[l]; Ex[k] = sum; } } // Compute x^TE T xE[3]; for (int k = 0; k < 3; k++) { T sum = 0.0; for (int l = 0; l < 3; l++) { sum += qp[l] * E[l][k]; xE[k] = sum; } } // Compute xEx T xEx = 0.0; for (int k = 0; k < 3; k++) { xEx += qp[k] * Ex[k]; } // Compute Sampson error T d = sqrt(Ex[0]*Ex[0]+Ex[1]*Ex[1]+xE[0]*xE[0]+xE[1]*xE[1]); error = xEx / d; if (error < 0.0) error = -error; } /* * Generate an integer in the range [min_int, max_int] */ __device__ int RandomInt(curandState* state, const int global_index, const int min_int, const int max_int) { // Generate a random float in (0,1) float random_float = curand_uniform(&state[global_index]); random_float *= (max_int - min_int + 0.999999f); random_float += min_int; return (int) truncf(random_float); } /* * Generate a random subset of qs and qps, each thread selects a different subset * Optimised for speed, no checking that elements are unique */ template<int n> __device__ void SelectSubset(const double* qs, const double* qps, curandState* state, const int global_index, Matches_n<n>& q, Matches_n<n>& qp) { for (int i = 0; i < n; ++i) { int index = RandomInt(state, global_index, 0, c_num_points - 1); for (int j = 0; j < 2; ++j) { q[i][j] = qs[2 * index + j]; qp[i][j] = qps[2 * index + j]; } q[i][2] = 1.0; qp[i][2] = 1.0; } } /* * Generate a random subset of qs and qps, each thread selects a different subset * Ensures all elements are unique */ /* template<int n> __device__ void SelectSubset(const double* qs, const double* qps, curandState* state, const bool random, const int global_index, Matches_n<n>& q, Matches_n<n>& qp) { // Generate index set int index; int index_set[n] = {0}; for (int i = 0; i < n; ++i) { bool index_in_set = true; while (index_in_set) { if (random) index = RandomInt(state, global_index, 0, c_num_points - 1); else index = (global_index + i) % c_num_points; // Debug, use sequential points index_in_set = false; for (int j = 0; j < i; ++j) { // Only look at previous entries if (index_set[j] == index) { index_in_set = true; } } if (!index_in_set) { index_set[i] = index; } } } for (int i = 0; i < n; ++i) { for (int j = 0; j < 2; ++j) { q[i][j] = qs[2 * index_set[i] + j]; qp[i][j] = qps[2 * index_set[i] + j]; } q[i][2] = 1.0; qp[i][2] = 1.0; } } */
the_stack
#include "cupoch/geometry/boundingvolume.h" #include "cupoch/geometry/densegrid.inl" #include "cupoch/geometry/geometry_functor.h" #include "cupoch/geometry/intersection_test.h" #include "cupoch/geometry/occupancygrid.h" #include "cupoch/geometry/pointcloud.h" #include "cupoch/geometry/voxelgrid.h" #include "cupoch/utility/eigen.h" #include "cupoch/utility/platform.h" namespace cupoch { namespace geometry { namespace { struct extract_range_voxels_functor { extract_range_voxels_functor(const Eigen::Vector3i& extents, int resolution, const Eigen::Vector3i& min_bound) : extents_(extents), resolution_(resolution), min_bound_(min_bound){}; const Eigen::Vector3i extents_; const int resolution_; const Eigen::Vector3i min_bound_; __device__ int operator()(size_t idx) const { int x = idx / (extents_[1] * extents_[2]); int yz = idx % (extents_[1] * extents_[2]); int y = yz / extents_[2]; int z = yz % extents_[2]; Eigen::Vector3i gidx = min_bound_ + Eigen::Vector3i(x, y, z); return IndexOf(gidx, resolution_); } }; __device__ int VoxelTraversal(Eigen::Vector3i* voxels, int n_buffer, const Eigen::Vector3i& half_resolution, const Eigen::Vector3f& start, const Eigen::Vector3f& end, float voxel_size) { int n_voxels = 0; Eigen::Vector3f ray = end - start; float length = ray.norm(); if (length == 0) { return n_voxels; } ray /= length; Eigen::Vector3i current_voxel(floorf(start[0] / voxel_size), floorf(start[1] / voxel_size), floorf(start[2] / voxel_size)); Eigen::Vector3i last_voxel(floorf(end[0] / voxel_size), floorf(end[1] / voxel_size), floorf(end[2] / voxel_size)); float stepX = (ray[0] > 0) ? 1 : ((ray[0] < 0) ? -1 : 0); float stepY = (ray[1] > 0) ? 1 : ((ray[1] < 0) ? -1 : 0); float stepZ = (ray[2] > 0) ? 1 : ((ray[2] < 0) ? -1 : 0); float voxel_boundary_x = (current_voxel[0] + 0.5 * stepX) * voxel_size; float voxel_boundary_y = (current_voxel[1] + 0.5 * stepY) * voxel_size; float voxel_boundary_z = (current_voxel[2] + 0.5 * stepZ) * voxel_size; float tMaxX = (stepX != 0) ? (voxel_boundary_x - start[0]) / ray[0] : std::numeric_limits<float>::infinity(); float tMaxY = (stepY != 0) ? (voxel_boundary_y - start[1]) / ray[1] : std::numeric_limits<float>::infinity(); float tMaxZ = (stepZ != 0) ? (voxel_boundary_z - start[2]) / ray[2] : std::numeric_limits<float>::infinity(); float tDeltaX = (stepX != 0) ? voxel_size / fabs(ray[0]) : std::numeric_limits<float>::infinity(); float tDeltaY = (stepY != 0) ? voxel_size / fabs(ray[1]) : std::numeric_limits<float>::infinity(); float tDeltaZ = (stepZ != 0) ? voxel_size / fabs(ray[2]) : std::numeric_limits<float>::infinity(); voxels[n_voxels] = current_voxel + half_resolution; ++n_voxels; while (n_voxels < n_buffer) { if (tMaxX < tMaxY) { if (tMaxX < tMaxZ) { current_voxel[0] += stepX; tMaxX += tDeltaX; } else { current_voxel[2] += stepZ; tMaxZ += tDeltaZ; } } else { if (tMaxY < tMaxZ) { current_voxel[1] += stepY; tMaxY += tDeltaY; } else { current_voxel[2] += stepZ; tMaxZ += tDeltaZ; } } if (last_voxel == current_voxel) { break; } else { float dist_from_origin = min(min(tMaxX, tMaxY), tMaxZ); if (dist_from_origin > length) { break; } else { voxels[n_voxels] = current_voxel + half_resolution; ++n_voxels; } } } return n_voxels; } struct compute_voxel_traversal_functor { compute_voxel_traversal_functor(Eigen::Vector3i* voxels, int n_step, const Eigen::Vector3f& viewpoint, const Eigen::Vector3i& half_resolution, float voxel_size, const Eigen::Vector3f& origin) : voxels_(voxels), n_step_(n_step), viewpoint_(viewpoint), half_resolution_(half_resolution), voxel_size_(voxel_size), origin_(origin){}; Eigen::Vector3i* voxels_; const int n_step_; const Eigen::Vector3f viewpoint_; const Eigen::Vector3i half_resolution_; const float voxel_size_; const Eigen::Vector3f origin_; __device__ void operator()( const thrust::tuple<size_t, Eigen::Vector3f>& x) { const int idx = thrust::get<0>(x); const Eigen::Vector3f end = thrust::get<1>(x); VoxelTraversal(voxels_ + idx * n_step_, n_step_, half_resolution_, viewpoint_, end - origin_, voxel_size_); } }; void ComputeFreeVoxels(const utility::device_vector<Eigen::Vector3f>& points, const Eigen::Vector3f& viewpoint, float voxel_size, int resolution, Eigen::Vector3f& origin, int n_div, utility::device_vector<Eigen::Vector3i>& free_voxels) { if (points.empty()) return; size_t n_points = points.size(); size_t max_idx = resolution * resolution * resolution; Eigen::Vector3i half_resolution = Eigen::Vector3i::Constant(resolution / 2); free_voxels.resize( n_div * 3 * n_points, Eigen::Vector3i::Constant(geometry::INVALID_VOXEL_INDEX)); compute_voxel_traversal_functor func( thrust::raw_pointer_cast(free_voxels.data()), n_div * 3, viewpoint - origin, half_resolution, voxel_size, origin); thrust::for_each(enumerate_begin(points), enumerate_end(points), func); auto end1 = thrust::remove_if( free_voxels.begin(), free_voxels.end(), [max_idx] __device__(const Eigen::Vector3i& idx) -> bool { return idx[0] < 0 || idx[1] < 0 || idx[2] < 0 || idx[0] >= max_idx || idx[1] >= max_idx || idx[2] >= max_idx; }); free_voxels.resize(thrust::distance(free_voxels.begin(), end1)); thrust::sort(utility::exec_policy(0)->on(0), free_voxels.begin(), free_voxels.end()); auto end2 = thrust::unique(utility::exec_policy(0)->on(0), free_voxels.begin(), free_voxels.end()); free_voxels.resize(thrust::distance(free_voxels.begin(), end2)); } struct create_occupancy_voxels_functor { create_occupancy_voxels_functor(const Eigen::Vector3f& origin, const Eigen::Vector3i& half_resolution, float voxel_size) : origin_(origin), half_resolution_(half_resolution), voxel_size_(voxel_size){}; const Eigen::Vector3f origin_; const Eigen::Vector3i half_resolution_; const float voxel_size_; __device__ Eigen::Vector3i operator()( const thrust::tuple<Eigen::Vector3f, bool>& x) const { const Eigen::Vector3f& point = thrust::get<0>(x); bool hit_flag = thrust::get<1>(x); Eigen::Vector3f ref_coord = (point - origin_) / voxel_size_; return (hit_flag) ? Eigen::device_vectorize<float, 3, ::floor>(ref_coord) .cast<int>() + half_resolution_ : Eigen::Vector3i(INVALID_VOXEL_INDEX, INVALID_VOXEL_INDEX, INVALID_VOXEL_INDEX); ; } }; void ComputeOccupiedVoxels( const utility::device_vector<Eigen::Vector3f>& points, const utility::device_vector<bool> hit_flags, float voxel_size, int resolution, Eigen::Vector3f& origin, utility::device_vector<Eigen::Vector3i>& occupied_voxels) { occupied_voxels.resize(points.size()); size_t max_idx = resolution * resolution * resolution; Eigen::Vector3i half_resolution = Eigen::Vector3i::Constant(resolution / 2); create_occupancy_voxels_functor func(origin, half_resolution, voxel_size); thrust::transform(make_tuple_begin(points, hit_flags), make_tuple_end(points, hit_flags), occupied_voxels.begin(), func); auto end1 = thrust::remove_if( occupied_voxels.begin(), occupied_voxels.end(), [max_idx] __device__(const Eigen::Vector3i& idx) -> bool { return idx[0] < 0 || idx[1] < 0 || idx[2] < 0 || idx[0] >= max_idx || idx[1] >= max_idx || idx[2] >= max_idx; }); occupied_voxels.resize(thrust::distance(occupied_voxels.begin(), end1)); thrust::sort(utility::exec_policy(0)->on(0), occupied_voxels.begin(), occupied_voxels.end()); auto end2 = thrust::unique(utility::exec_policy(0)->on(0), occupied_voxels.begin(), occupied_voxels.end()); occupied_voxels.resize(thrust::distance(occupied_voxels.begin(), end2)); } struct add_occupancy_functor { add_occupancy_functor(OccupancyVoxel* voxels, int resolution, float clamping_thres_min, float clamping_thres_max, float prob_miss_log, float prob_hit_log, bool occupied) : voxels_(voxels), resolution_(resolution), clamping_thres_min_(clamping_thres_min), clamping_thres_max_(clamping_thres_max), prob_miss_log_(prob_miss_log), prob_hit_log_(prob_hit_log), occupied_(occupied){}; OccupancyVoxel* voxels_; const int resolution_; const float clamping_thres_min_; const float clamping_thres_max_; const float prob_miss_log_; const float prob_hit_log_; const bool occupied_; __device__ void operator()(const Eigen::Vector3i& voxel) { size_t idx = IndexOf(voxel, resolution_); float p = voxels_[idx].prob_log_; p = (isnan(p)) ? 0 : p; p += (occupied_) ? prob_hit_log_ : prob_miss_log_; voxels_[idx].prob_log_ = min(max(p, clamping_thres_min_), clamping_thres_max_); voxels_[idx].grid_index_ = voxel.cast<unsigned short>(); } }; } // namespace template class DenseGrid<OccupancyVoxel>; OccupancyGrid::OccupancyGrid() : DenseGrid<OccupancyVoxel>(Geometry::GeometryType::OccupancyGrid, 0.05, 512, Eigen::Vector3f::Zero()), min_bound_(Eigen::Vector3ui16::Constant(resolution_ / 2)), max_bound_(Eigen::Vector3ui16::Constant(resolution_ / 2)) {} OccupancyGrid::OccupancyGrid(float voxel_size, int resolution, const Eigen::Vector3f& origin) : DenseGrid<OccupancyVoxel>(Geometry::GeometryType::OccupancyGrid, voxel_size, resolution, origin), min_bound_(Eigen::Vector3ui16::Constant(resolution_ / 2)), max_bound_(Eigen::Vector3ui16::Constant(resolution_ / 2)) {} OccupancyGrid::~OccupancyGrid() {} OccupancyGrid::OccupancyGrid(const OccupancyGrid& other) : DenseGrid<OccupancyVoxel>(Geometry::GeometryType::OccupancyGrid, other), min_bound_(other.min_bound_), max_bound_(other.max_bound_), clamping_thres_min_(other.clamping_thres_min_), clamping_thres_max_(other.clamping_thres_max_), prob_hit_log_(other.prob_hit_log_), prob_miss_log_(other.prob_miss_log_), occ_prob_thres_log_(other.occ_prob_thres_log_), visualize_free_area_(other.visualize_free_area_) {} OccupancyGrid& OccupancyGrid::Clear() { DenseGrid::Clear(); min_bound_ = Eigen::Vector3ui16::Constant(resolution_ / 2); max_bound_ = Eigen::Vector3ui16::Constant(resolution_ / 2); return *this; } Eigen::Vector3f OccupancyGrid::GetMinBound() const { return (min_bound_.cast<int>() - Eigen::Vector3i::Constant(resolution_ / 2)) .cast<float>() * voxel_size_ + origin_; } Eigen::Vector3f OccupancyGrid::GetMaxBound() const { return (max_bound_.cast<int>() - Eigen::Vector3i::Constant(resolution_ / 2 - 1)) .cast<float>() * voxel_size_ + origin_; } bool OccupancyGrid::IsOccupied(const Eigen::Vector3f& point) const { auto idx = GetVoxelIndex(point); if (idx < 0) return false; OccupancyVoxel voxel = voxels_[idx]; return !std::isnan(voxel.prob_log_) && voxel.prob_log_ > occ_prob_thres_log_; } bool OccupancyGrid::IsUnknown(const Eigen::Vector3f& point) const { auto idx = GetVoxelIndex(point); if (idx < 0) return true; OccupancyVoxel voxel = voxels_[idx]; return std::isnan(voxel.prob_log_); } thrust::tuple<bool, OccupancyVoxel> OccupancyGrid::GetVoxel( const Eigen::Vector3f& point) const { auto idx = GetVoxelIndex(point); if (idx < 0) return thrust::make_tuple(false, OccupancyVoxel()); OccupancyVoxel voxel = voxels_[idx]; return thrust::make_tuple(!std::isnan(voxel.prob_log_), voxel); } template <typename Func> std::shared_ptr<utility::device_vector<OccupancyVoxel>> OccupancyGrid::ExtractBoundVoxels(Func check_func) const { Eigen::Vector3ui16 diff = max_bound_ - min_bound_ + Eigen::Vector3ui16::Ones(); auto out = std::make_shared<utility::device_vector<OccupancyVoxel>>(); out->resize(diff[0] * diff[1] * diff[2]); extract_range_voxels_functor func(diff.cast<int>(), resolution_, min_bound_.cast<int>()); auto end = thrust::copy_if( thrust::make_permutation_iterator( voxels_.begin(), thrust::make_transform_iterator( thrust::make_counting_iterator<size_t>(0), func)), thrust::make_permutation_iterator( voxels_.begin(), thrust::make_transform_iterator( thrust::make_counting_iterator(out->size()), func)), out->begin(), check_func); out->resize(thrust::distance(out->begin(), end)); return out; } std::shared_ptr<utility::device_vector<OccupancyVoxel>> OccupancyGrid::ExtractKnownVoxels() const { auto check_fn = [th = occ_prob_thres_log_] __device__( const thrust::tuple<OccupancyVoxel>& x) { const OccupancyVoxel& v = thrust::get<0>(x); return !isnan(v.prob_log_); }; return ExtractBoundVoxels(check_fn); } std::shared_ptr<utility::device_vector<OccupancyVoxel>> OccupancyGrid::ExtractFreeVoxels() const { auto check_fn = [th = occ_prob_thres_log_] __device__( const thrust::tuple<OccupancyVoxel>& x) { const OccupancyVoxel& v = thrust::get<0>(x); return !isnan(v.prob_log_) && v.prob_log_ <= th; }; return ExtractBoundVoxels(check_fn); } std::shared_ptr<utility::device_vector<OccupancyVoxel>> OccupancyGrid::ExtractOccupiedVoxels() const { auto check_fn = [th = occ_prob_thres_log_] __device__( const thrust::tuple<OccupancyVoxel>& x) { const OccupancyVoxel& v = thrust::get<0>(x); return !isnan(v.prob_log_) && v.prob_log_ > th; }; return ExtractBoundVoxels(check_fn); } OccupancyGrid& OccupancyGrid::Reconstruct(float voxel_size, int resolution) { DenseGrid::Reconstruct(voxel_size, resolution); return *this; } OccupancyGrid& OccupancyGrid::SetFreeArea(const Eigen::Vector3f& min_bound, const Eigen::Vector3f& max_bound) { const Eigen::Vector3i half_res = Eigen::Vector3i::Constant(resolution_ / 2); Eigen::Vector3i imin_bound = ((min_bound - origin_) / voxel_size_) .array() .floor() .matrix() .cast<int>() + half_res; Eigen::Vector3i imax_bound = ((max_bound - origin_) / voxel_size_) .array() .floor() .matrix() .cast<int>() + half_res; min_bound_ = imin_bound.array() .max(Eigen::Array3i(0, 0, 0)) .matrix() .cast<unsigned short>(); max_bound_ = imax_bound.array() .min(Eigen::Array3i(resolution_ - 1, resolution_ - 1, resolution_ - 1)) .matrix() .cast<unsigned short>(); Eigen::Vector3ui16 diff = max_bound_ - min_bound_ + Eigen::Vector3ui16::Ones(); extract_range_voxels_functor func(diff.cast<int>(), resolution_, min_bound_.cast<int>()); thrust::for_each( thrust::make_permutation_iterator( voxels_.begin(), thrust::make_transform_iterator( thrust::make_counting_iterator<size_t>(0), func)), thrust::make_permutation_iterator( voxels_.begin(), thrust::make_transform_iterator( thrust::make_counting_iterator<size_t>( diff[0] * diff[1] * diff[2]), func)), [pml = prob_miss_log_] __device__(geometry::OccupancyVoxel & v) { v.prob_log_ = (isnan(v.prob_log_)) ? 0 : v.prob_log_; v.prob_log_ += pml; }); return *this; } OccupancyGrid& OccupancyGrid::Insert( const utility::device_vector<Eigen::Vector3f>& points, const Eigen::Vector3f& viewpoint, float max_range) { if (points.empty()) return *this; utility::device_vector<Eigen::Vector3f> ranged_points(points.size()); utility::device_vector<float> ranged_dists(points.size()); utility::device_vector<bool> hit_flags(points.size()); thrust::transform( points.begin(), points.end(), make_tuple_begin(ranged_points, ranged_dists, hit_flags), [viewpoint, max_range] __device__(const Eigen::Vector3f& pt) { const Eigen::Vector3f pt_vp = pt - viewpoint; const float dist = pt_vp.norm(); const bool is_hit = max_range < 0 || dist <= max_range; const Eigen::Vector3f ranged_pt = (is_hit) ? pt : ((dist == 0) ? viewpoint : viewpoint + pt_vp / dist * max_range); return thrust::make_tuple( ranged_pt, (ranged_pt - viewpoint).array().abs().maxCoeff(), is_hit); }); float max_dist = *(thrust::max_element(ranged_dists.begin(), ranged_dists.end())); int n_div = int(std::ceil(max_dist / voxel_size_)); utility::device_vector<Eigen::Vector3i> free_voxels; utility::device_vector<Eigen::Vector3i> occupied_voxels; if (n_div > 0) { // comupute free voxels ComputeFreeVoxels(ranged_points, viewpoint, voxel_size_, resolution_, origin_, n_div + 1, free_voxels); } else { thrust::copy(points.begin(), points.end(), ranged_points.begin()); thrust::fill(hit_flags.begin(), hit_flags.end(), true); } // compute occupied voxels ComputeOccupiedVoxels(ranged_points, hit_flags, voxel_size_, resolution_, origin_, occupied_voxels); if (n_div > 0) { utility::device_vector<Eigen::Vector3i> free_voxels_res( free_voxels.size()); auto end = thrust::set_difference( free_voxels.begin(), free_voxels.end(), occupied_voxels.begin(), occupied_voxels.end(), free_voxels_res.begin()); free_voxels_res.resize(thrust::distance(free_voxels_res.begin(), end)); AddVoxels(free_voxels_res, false); } AddVoxels(occupied_voxels, true); return *this; } OccupancyGrid& OccupancyGrid::Insert( const utility::pinned_host_vector<Eigen::Vector3f>& points, const Eigen::Vector3f& viewpoint, float max_range) { utility::device_vector<Eigen::Vector3f> dev_points(points.size()); cudaSafeCall(cudaMemcpy( thrust::raw_pointer_cast(dev_points.data()), points.data(), points.size() * sizeof(Eigen::Vector3f), cudaMemcpyHostToDevice)); return Insert(dev_points, viewpoint, max_range); } OccupancyGrid& OccupancyGrid::Insert(const geometry::PointCloud& pointcloud, const Eigen::Vector3f& viewpoint, float max_range) { Insert(pointcloud.points_, viewpoint, max_range); return *this; } OccupancyGrid& OccupancyGrid::AddVoxel(const Eigen::Vector3i& voxel, bool occupied) { int idx = IndexOf(voxel, resolution_); size_t max_idx = resolution_ * resolution_ * resolution_; if (idx < 0 || idx >= max_idx) { utility::LogError( "[OccupancyGrid] a provided voxeld is not occupancy grid " "range."); return *this; } else { OccupancyVoxel org_ov = voxels_[idx]; if (std::isnan(org_ov.prob_log_)) org_ov.prob_log_ = 0.0; org_ov.prob_log_ += (occupied) ? prob_hit_log_ : prob_miss_log_; org_ov.prob_log_ = std::min(std::max(org_ov.prob_log_, clamping_thres_min_), clamping_thres_max_); org_ov.grid_index_ = voxel.cast<unsigned short>(); voxels_[idx] = org_ov; min_bound_ = min_bound_.array().min(org_ov.grid_index_.array()); max_bound_ = max_bound_.array().max(org_ov.grid_index_.array()); } return *this; } OccupancyGrid& OccupancyGrid::AddVoxels( const utility::device_vector<Eigen::Vector3i>& voxels, bool occupied) { if (voxels.empty()) return *this; Eigen::Vector3i fv = voxels.front(); Eigen::Vector3i bv = voxels.back(); Eigen::Vector3ui16 fvu = fv.cast<unsigned short>(); Eigen::Vector3ui16 bvu = bv.cast<unsigned short>(); min_bound_ = min_bound_.array().min(fvu.array()); min_bound_ = min_bound_.array().min(bvu.array()); max_bound_ = max_bound_.array().max(fvu.array()); max_bound_ = max_bound_.array().max(bvu.array()); add_occupancy_functor func(thrust::raw_pointer_cast(voxels_.data()), resolution_, clamping_thres_min_, clamping_thres_max_, prob_miss_log_, prob_hit_log_, occupied); thrust::for_each(voxels.begin(), voxels.end(), func); return *this; } } // namespace geometry } // namespace cupoch
the_stack
#pragma once ////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////// // Notes: // // The following implementations are prototypes, do not exploit any reuse and // currently have bad performance. Their purpose is to be a baseline for // exploring the feasibility of single fused kernels that do not require any // device memory. // // The general structure of the code is a 5-D loop which exhibits reuse // across: // - tiles, batches for weights // - outputplanes for inputs // - inputplanes for outputs //// // When implemented with memory reuse in mind, these kernels stress out the // GPU resources. // For UpdateOutput, the 2 stressing factors are: // - amount of shared memory required for a single block which is given by: // InputPlUnroll x OutputPlUnroll x FFTSize / 2 x FFTSize x sizeof(Complex) // In practice, for FFT sizes 32x32, a 4 x 4 unroll of InputPlUnroll x // OutputPlUnroll requires 65K and does not fit in shared memory. // So there is a tradeoff between amount of reuse, what fits into shared // memory and precomputing weights in global memory. // - amount of register used, number of threads in a threadblock within the // limit of the 65K registers. // A block has FFTSize x OutputPlUnroll x BatchUnroll threads. // By limiting the kernel to use only 128 registers for FFT 32, we can fit // 32 x 2 x 8 threads in a block within the 64K register K40m budget. // // 32x32: // This kernel takes up a lot of registers, by squeezing them we get to // 128 per thread. So we can only hope for 1 block per SM at any point. // We must enforce the constrtains: // FFTSize, BatchUnroll, InputUnroll, OutputUnroll // FFTSize x BatchUnroll x OutputUnroll x 128 <= 65K // FFTSize x FFTSize / 2 x InputUnroll x OutputUnroll x 8 <= 40K // // 16x16: // This kernel can be squeezed within 64 registers per thread. // So we can only aim for 2 blocks per SM. // We must enforce the constrtains: // FFTSize, BatchUnroll, InputUnroll, OutputUnroll // FFTSize x BatchUnroll x OutputUnroll x 64 <= 32K // FFTSize x FFTSize / 2 x InputUnroll x OutputUnroll x 8 <= 20K // // 8x8: // This kernel spills badly within 32 registers per thread... // But it does fit within 40 registers. // ////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////// #include "cuda/Complex.cuh" #include "cuda/CudaUtils.cuh" #include "cuda/DeviceTensor.cuh" #include "cuda/fbfft/FBFFTCommon.cuh" #include "cuda/fbfft/FFT2D32.cuh" #include <cuda_runtime.h> #include <cassert> namespace facebook { namespace cuda { namespace fbfft { namespace detail { template <typename T, int Dims> struct TiledDeviceTensor { __host__ __device__ TiledDeviceTensor() : padL(-1), padU(-1), tensor() {} __host__ __device__ TiledDeviceTensor(DeviceTensor<T, Dims>& t, int leftPadding, int upPadding) : padL(leftPadding), padU(upPadding), tensor(t.data(), t.sizes(), t.strides()) {} int padL; int padU; DeviceTensor<T, Dims> tensor; }; template <typename T, int Dims, int NumTiles> struct TiledDeviceTensors { TiledDeviceTensor<T, Dims> inputs[NumTiles]; TiledDeviceTensor<T, Dims> weights[NumTiles]; TiledDeviceTensor<T, Dims> outputs[NumTiles]; }; template<int FFTSize> __device__ void updateOutputIteratedKernel( TiledDeviceTensor<float, 4>* ins, TiledDeviceTensor<float, 4>* outs, const DeviceTensor<float, 4> weight, size_t numTiles) { constexpr float invNorm = 1.0f / (FFTSize * FFTSize); const auto tileIndexStart = blockIdx.x; const auto inputPlanes = weight.getSize(1); const auto outputPlanes = weight.getSize(0); const auto batch = blockIdx.z * blockDim.z + threadIdx.z; const auto outputPl = blockIdx.y * blockDim.y + threadIdx.y; const auto tileIndex = tileIndexStart; const auto& input = ins[tileIndex]; auto& output = outs[tileIndex]; // Early exits if (batch >= input.tensor.getSize(0)) { return; } if (outputPl >= outputPlanes) { return; } Complex out[FFTSize / 2]; #pragma unroll for (int i = 0 ; i < FFTSize / 2; ++i) { out[i] = Complex(0.0f); } for (int inputPl = 0; inputPl < inputPlanes; ++inputPl) { Complex wei[FFTSize / 2]; #pragma unroll for (int i = 0 ; i < FFTSize; i += 2) { float f1 = inBounds(i, threadIdx.x, 0, 0, weight) ? weight [outputPl][inputPl][i - 0][threadIdx.x - 0].ldg() : 0.0f; float f2 = inBounds(i + 1, threadIdx.x, 0, 0, weight) ? weight [outputPl][inputPl][i + 1 - 0][threadIdx.x - 0].ldg() : 0.0f; wei[i / 2] = Complex(f1, f2); } fbfft2DVerticalCoreForward<FFTSize>(wei); Complex inp[FFTSize / 2]; #pragma unroll for (int i = 0 ; i < FFTSize; i += 2) { float f1 = inBounds(i, threadIdx.x, input.padU, input.padL, input.tensor) ? input.tensor [batch][inputPl][i - input.padU][threadIdx.x - input.padL].ldg() : 0.0f; float f2 = inBounds(i + 1, threadIdx.x, input.padU, input.padL, input.tensor) ? input.tensor [batch][inputPl][i + 1 - input.padU][threadIdx.x - input.padL].ldg() : 0.0f; inp[i / 2] = Complex(f1, f2); } fbfft2DVerticalCoreForward<FFTSize>(inp); // First element packs 2 real into a complex, don't get fooled Complex tmpin, tmpwei; if (threadIdx.x > 0) { Complex otherinp = shfl(inp[0], FFTSize - threadIdx.x, FFTSize); Complex inp0 = Complex(0.5f) * (otherinp.conjugate() + inp[0]); Complex inpN2 = Complex(0.5f) * (otherinp.conjugate() - inp[0]).conjugate().transpose(); Complex otherwei = shfl(wei[0], FFTSize - threadIdx.x, FFTSize); Complex wei0 = Complex(0.5f) * (otherwei.conjugate() + wei[0]); Complex weiN2 = Complex(0.5f) * (otherwei.conjugate() - wei[0]).conjugate().transpose(); Complex out0 = inp0 * wei0.conjugate(); Complex outN2 = inpN2 * weiN2.conjugate(); out[0] += out0 + outN2.conjugate().transpose(); } else { out[0] += Complex(inp[0].re() * wei[0].re(), inp[0].im() * wei[0].im()); } #pragma unroll for (int i = 1; i < FFTSize / 2; ++i) { out[i] += inp[i] * wei[i].conjugate(); } } // inputPl fbfft2DVerticalCoreInverse<FFTSize, true>(out); // Write the results back to memory. // No need for conjugation as we know we have real results. #pragma unroll for (int i = 0 ; i < FFTSize; i += 2) { if (inBounds(i, threadIdx.x, output.padU, output.padL, output.tensor)) { output.tensor [batch][outputPl][i - output.padU][threadIdx.x - output.padL] = out[i / 2].re() * invNorm; } if (inBounds(i + 1, threadIdx.x, output.padU, output.padL, output.tensor)) { output.tensor [batch][outputPl][i + 1 - output.padU][threadIdx.x - output.padL] = out[i / 2].im() * invNorm; } } } __launch_bounds__(512, 1) // 128 registers __global__ void updateOutputIteratedKernel32( TiledDeviceTensor<float, 4>* ins, TiledDeviceTensor<float, 4>* outs, const DeviceTensor<float, 4> weight, size_t numTiles) { updateOutputIteratedKernel<32>( ins, outs, weight, numTiles); } __launch_bounds__(1024, 1) // 64 registers __global__ void updateOutputIteratedKernel16( TiledDeviceTensor<float, 4>* ins, TiledDeviceTensor<float, 4>* outs, const DeviceTensor<float, 4> weight, size_t numTiles) { updateOutputIteratedKernel<16>( ins, outs, weight, numTiles); } __launch_bounds__(425, 3) // 40 registers __global__ void updateOutputIteratedKernel8( TiledDeviceTensor<float, 4>* ins, TiledDeviceTensor<float, 4>* outs, const DeviceTensor<float, 4> weight, size_t numTiles) { updateOutputIteratedKernel<8>( ins, outs, weight, numTiles); } template<int FFTSize> __device__ void updateGradInputIteratedKernel( TiledDeviceTensor<float, 4>* ins, TiledDeviceTensor<float, 4>* outs, const DeviceTensor<float, 4> weight, size_t numTiles) { constexpr float invNorm = 1.0f / (FFTSize * FFTSize); const auto tileIndexStart = blockIdx.x; const auto inputPlanes = weight.getSize(1); const auto outputPlanes = weight.getSize(0); const auto batch = blockIdx.z * blockDim.z + threadIdx.z; const auto inputPl = blockIdx.y * blockDim.y + threadIdx.y; const auto tileIndex = tileIndexStart; auto& input = ins[tileIndex]; const auto& output = outs[tileIndex]; // Early exits if (batch >= input.tensor.getSize(0)) { return; } if (inputPl >= weight.getSize(1)) { return; } Complex inp[FFTSize / 2]; #pragma unroll for (int i = 0 ; i < FFTSize / 2; ++i) { inp[i] = Complex(0.0f); } for (int outputPl = 0; outputPl < outputPlanes; ++outputPl) { Complex wei[FFTSize / 2]; #pragma unroll for (int i = 0 ; i < FFTSize; i += 2) { float f1 = inBounds(i, threadIdx.x, 0, 0, weight) ? weight [outputPl][inputPl][i - 0][threadIdx.x - 0].ldg() : 0.0f; float f2 = inBounds(i + 1, threadIdx.x, 0, 0, weight) ? weight [outputPl][inputPl][i + 1 - 0][threadIdx.x - 0].ldg() : 0.0f; wei[i / 2] = Complex(f1, f2); } fbfft2DVerticalCoreForward<FFTSize>(wei); Complex out[FFTSize / 2]; #pragma unroll for (int i = 0 ; i < FFTSize; i += 2) { float f1 = inBounds(i, threadIdx.x, output.padU, output.padL, output.tensor) ? output.tensor [batch][outputPl][i - output.padU][threadIdx.x - output.padL].ldg() : 0.0f; float f2 = inBounds(i + 1, threadIdx.x, output.padU, output.padL, output.tensor) ? output.tensor [batch][outputPl][i + 1 - output.padU][threadIdx.x - output.padL].ldg() : 0.0f; out[i / 2] = Complex(f1, f2); } fbfft2DVerticalCoreForward<FFTSize>(out); // First element packs 2 real into a complex, don't get fooled Complex tmpin, tmpwei; if (threadIdx.x > 0) { Complex otherout = shfl(out[0], FFTSize - threadIdx.x, FFTSize); Complex out0 = Complex(0.5f) * (otherout.conjugate() + out[0]); Complex outN2 = Complex(0.5f) * (otherout.conjugate() - out[0]).conjugate().transpose(); Complex otherwei = shfl(wei[0], FFTSize - threadIdx.x, FFTSize); Complex wei0 = Complex(0.5f) * (otherwei.conjugate() + wei[0]); Complex weiN2 = Complex(0.5f) * (otherwei.conjugate() - wei[0]).conjugate().transpose(); Complex in0 = out0 * wei0; Complex inN2 = outN2 * weiN2; inp[0] += in0 + inN2.conjugate().transpose(); } else { inp[0] += Complex(out[0].re() * wei[0].re(), out[0].im() * wei[0].im()); } #pragma unroll for (int i = 1; i < FFTSize / 2; ++i) { inp[i] += out[i] * wei[i]; } } // inputPl fbfft2DVerticalCoreInverse<FFTSize, true>(inp); // Write the results back to memory. // No need for conjugation as we know we have real results. #pragma unroll for (int i = 0 ; i < FFTSize; i += 2) { if (inBounds(i, threadIdx.x, input.padU, input.padL, input.tensor)) { input.tensor [batch][inputPl][i - input.padU][threadIdx.x - input.padL] = inp[i / 2].re() * invNorm; } if (inBounds(i + 1, threadIdx.x, input.padU, input.padL, input.tensor)) { input.tensor [batch][inputPl][i + 1 - input.padU][threadIdx.x - input.padL] = inp[i / 2].im() * invNorm; } } } __launch_bounds__(512, 1) // 128 registers __global__ void updateGradInputIteratedKernel32( TiledDeviceTensor<float, 4>* ins, TiledDeviceTensor<float, 4>* outs, const DeviceTensor<float, 4> weight, size_t numTiles) { updateGradInputIteratedKernel<32>( ins, outs, weight, numTiles); } __launch_bounds__(512, 1) // 128 registers __global__ void updateGradInputIteratedKernel16( TiledDeviceTensor<float, 4>* ins, TiledDeviceTensor<float, 4>* outs, const DeviceTensor<float, 4> weight, size_t numTiles) { updateGradInputIteratedKernel<16>( ins, outs, weight, numTiles); } __launch_bounds__(1024, 1) // 64 registers __global__ void updateGradInputIteratedKernel8( TiledDeviceTensor<float, 4>* ins, TiledDeviceTensor<float, 4>* outs, const DeviceTensor<float, 4> weight, size_t numTiles) { updateGradInputIteratedKernel<8>( ins, outs, weight, numTiles); } template<int FFTSize> __device__ void accGradParametersIteratedKernel( TiledDeviceTensor<float, 4>* ins, TiledDeviceTensor<float, 4>* outs, DeviceTensor<float, 4> weight, size_t numTiles, float scale) { const float invNormScale = scale / (FFTSize * FFTSize); const auto inputPlanes = weight.getSize(1); const auto outputPlanes = weight.getSize(0); const auto inputPl = blockIdx.y * blockDim.y + threadIdx.y; const auto outputPl = blockIdx.z * blockDim.z + threadIdx.z; Complex wei[FFTSize / 2 + 1]; #pragma unroll for (int i = 0 ; i < FFTSize / 2; ++i) { wei[i] = Complex(0.0f); } wei[FFTSize / 2] = Complex(0.0f); for (int tileIndex = 0; tileIndex < numTiles; ++tileIndex) { const auto& input = ins[tileIndex]; const auto& output = outs[tileIndex]; // Early exits if (inputPl >= inputPlanes) { return; } if (outputPl >= outputPlanes) { return; } const auto Batches = input.tensor.getSize(0); for (int batch = 0; batch < Batches; ++batch) { Complex out[FFTSize / 2 + 1]; #pragma unroll for (int i = 0 ; i < FFTSize; i += 2) { float f1 = inBounds(i, threadIdx.x, output.padU, output.padL, output.tensor) ? output.tensor [batch][outputPl][i - output.padU][threadIdx.x - output.padL].ldg() : 0.0f; float f2 = inBounds(i + 1, threadIdx.x, output.padU, output.padL, output.tensor) ? output.tensor [batch][outputPl][i + 1 - output.padU][threadIdx.x - output.padL].ldg() : 0.0f; out[i / 2] = Complex(f1, f2); } out[FFTSize / 2] = Complex(0.0f); fbfft2DVerticalCoreForward<FFTSize>(out); Complex inp[FFTSize / 2 + 1]; #pragma unroll for (int i = 0 ; i < FFTSize; i += 2) { float f1 = inBounds(i, threadIdx.x, input.padU, input.padL, input.tensor) ? input.tensor [batch][inputPl][i - input.padU][threadIdx.x - input.padL].ldg() : 0.0f; float f2 = inBounds(i + 1, threadIdx.x, input.padU, input.padL, input.tensor) ? input.tensor [batch][inputPl][i + 1 - input.padU][threadIdx.x - input.padL].ldg() : 0.0f; inp[i / 2] = Complex(f1, f2); } inp[FFTSize / 2] = Complex(0.0f); fbfft2DVerticalCoreForward<FFTSize>(inp); // First element packs 2 real into a complex, don't get fooled Complex tmpin, tmpwei; if (threadIdx.x > 0) { // Unpack Complex otherinp = shfl(inp[0], FFTSize - threadIdx.x, FFTSize); inp[FFTSize / 2] = Complex(0.5f) * (otherinp.conjugate() - inp[0]).conjugate().transpose(); inp[0] = Complex(0.5f) * (otherinp.conjugate() + inp[0]); // Unpack Complex otherout = shfl(out[0], FFTSize - threadIdx.x, FFTSize); out[FFTSize / 2] = Complex(0.5f) * (otherout.conjugate() - out[0]).conjugate().transpose(); out[0] = Complex(0.5f) * (otherout.conjugate() + out[0]); } else { inp[FFTSize / 2] = Complex(inp[0].im()); inp[0] = Complex(inp[0].re()); out[FFTSize / 2] = Complex(out[0].im()); out[0] = Complex(out[0].re()); } #pragma unroll for (int i = 0; i < FFTSize / 2 + 1; ++i) { wei[i] += inp[i] * out[i].conjugate(); } } // batches } // tileIndex fbfft2DVerticalCoreInverse<FFTSize, false>(wei); // Write the results back to memory. // No need for conjugation as we know we have real results. #pragma unroll for (int i = 0 ; i < FFTSize; i += 2) { if (inBounds(i, threadIdx.x, 0, 0, weight)) { weight[outputPl][inputPl][i - 0][threadIdx.x - 0] = wei[i / 2].re() * invNormScale; } if (inBounds(i + 1, threadIdx.x, 0, 0, weight)) { weight[outputPl][inputPl][i + 1 - 0][threadIdx.x - 0] = wei[i / 2].im() * invNormScale; } } } __launch_bounds__(512, 1) // 128 registers __global__ void accGradParametersIteratedKernel32( TiledDeviceTensor<float, 4>* ins, TiledDeviceTensor<float, 4>* outs, DeviceTensor<float, 4> weight, size_t numTiles, float scale) { accGradParametersIteratedKernel<32>( ins, outs, weight, numTiles, scale); } __launch_bounds__(512, 1) // 128 registers __global__ void accGradParametersIteratedKernel16( TiledDeviceTensor<float, 4>* ins, TiledDeviceTensor<float, 4>* outs, DeviceTensor<float, 4> weight, size_t numTiles, float scale) { accGradParametersIteratedKernel<16>( ins, outs, weight, numTiles, scale); } __launch_bounds__(1024, 1) // 64 registers __global__ void accGradParametersIteratedKernel8( TiledDeviceTensor<float, 4>* ins, TiledDeviceTensor<float, 4>* outs, DeviceTensor<float, 4> weight, size_t numTiles, float scale) { accGradParametersIteratedKernel<8>( ins, outs, weight, numTiles, scale); } #define INST_UPDATE_OUTPUT_ITERATED(FFTSIZE, BUNROLL, IUNROLL, OUNROLL) \ { \ dim3 blocks(numTiles, \ ceil(outputPlanes, OUNROLL), \ ceil(batchSize, BUNROLL)); \ dim3 threads(FFTSIZE, OUNROLL, BUNROLL); \ updateOutputIteratedKernel##FFTSIZE \ <<<blocks, threads, 0, s>>>(ins, \ outs, \ weight, \ numTiles); \ return true; \ } #define INST_UPDATE_GRAD_INPUT_ITERATED(FFTSIZE, BUNROLL, IUNROLL, OUNROLL) \ { \ dim3 blocks(numTiles, \ ceil(inputPlanes, IUNROLL), \ ceil(batchSize, BUNROLL)); \ dim3 threads(FFTSIZE, IUNROLL, BUNROLL); \ updateGradInputIteratedKernel##FFTSIZE \ <<<blocks, threads, 0, s>>>(ins, \ outs, \ weight, \ numTiles); \ return true; \ } #define INST_ACC_GRAD_PARAMETERS_ITERATED(FFTSIZE, BUNROLL, IUNROLL, OUNROLL) \ { \ dim3 blocks(1, /* Accumulate so must always be 1! */ \ ceil(inputPlanes, IUNROLL), \ ceil(outputPlanes, OUNROLL)); \ dim3 threads(FFTSIZE, IUNROLL, OUNROLL); \ accGradParametersIteratedKernel##FFTSIZE \ <<<blocks, threads, 0, s>>>(ins, \ outs, \ weight, \ numTiles, \ scale); \ return true; \ } // A bit awkward but used directly from lua FFI typedef struct _FFTConvolutionPassFFI { static const int FFT_UpdateOutput = 0; static const int FFT_UpdateGradInput = 1; static const int FFT_AccGradParameters = 2; int pass; } FFTConvolutionPassFFI; template<int FFTSize> bool FFTIteratedConvolution( TiledDeviceTensor<float, 4>* ins, TiledDeviceTensor<float, 4>* outs, const DeviceTensor<float, 4> weight, FFTConvolutionPassFFI pass, float scale, int batchSize, size_t numTiles, cudaStream_t s); template<> bool FFTIteratedConvolution<8>( TiledDeviceTensor<float, 4>* ins, TiledDeviceTensor<float, 4>* outs, const DeviceTensor<float, 4> weight, FFTConvolutionPassFFI pass, float scale, int batchSize, size_t numTiles, cudaStream_t s) { const auto inputPlanes = weight.getSize(1); const auto outputPlanes = weight.getSize(0); // Don't forget to init your twiddles facebook::cuda::fbfft::initTwiddles(); constexpr int FFTSize = 8; if (pass.pass == pass.FFT_UpdateOutput) { CHECK_LE(1, numTiles); CHECK_LE(1, weight.getSize(0)); CHECK_LE(1, FFTSize); INST_UPDATE_OUTPUT_ITERATED(8, 4, 4, 4); } if (pass.pass == pass.FFT_UpdateGradInput) { CHECK_LE(1, numTiles); CHECK_LE(1, weight.getSize(0)); CHECK_LE(1, FFTSize); INST_UPDATE_GRAD_INPUT_ITERATED(8, 4, 4, 4); } if (pass.pass == pass.FFT_AccGradParameters) { CHECK_LE(1, weight.getSize(0)); CHECK_LE(1, weight.getSize(1)); CHECK_LE(1, FFTSize); INST_ACC_GRAD_PARAMETERS_ITERATED(8, 1, 4, 4); } return false; } template<> bool FFTIteratedConvolution<16>( TiledDeviceTensor<float, 4>* ins, TiledDeviceTensor<float, 4>* outs, const DeviceTensor<float, 4> weight, FFTConvolutionPassFFI pass, float scale, int batchSize, size_t numTiles, cudaStream_t s) { const auto inputPlanes = weight.getSize(1); const auto outputPlanes = weight.getSize(0); // Don't forget to init your twiddles facebook::cuda::fbfft::initTwiddles(); constexpr int FFTSize = 16; if (pass.pass == pass.FFT_UpdateOutput) { CHECK_LE(1, numTiles); CHECK_LE(1, weight.getSize(0)); CHECK_LE(1, FFTSize); INST_UPDATE_OUTPUT_ITERATED(16, 8, 4, 4); } if (pass.pass == pass.FFT_UpdateGradInput) { CHECK_LE(1, numTiles); CHECK_LE(1, weight.getSize(0)); CHECK_LE(1, FFTSize); INST_UPDATE_GRAD_INPUT_ITERATED(16, 8, 4, 4); } if (pass.pass == pass.FFT_AccGradParameters) { CHECK_LE(1, weight.getSize(0)); CHECK_LE(1, weight.getSize(1)); CHECK_LE(1, FFTSize); INST_ACC_GRAD_PARAMETERS_ITERATED(16, 1, 2, 2); } return false; } template<> bool FFTIteratedConvolution<32>( TiledDeviceTensor<float, 4>* ins, TiledDeviceTensor<float, 4>* outs, const DeviceTensor<float, 4> weight, FFTConvolutionPassFFI pass, float scale, int batchSize, size_t numTiles, cudaStream_t s) { const auto inputPlanes = weight.getSize(1); const auto outputPlanes = weight.getSize(0); // Don't forget to init your twiddles facebook::cuda::fbfft::initTwiddles(); constexpr int FFTSize = 32; if (pass.pass == pass.FFT_UpdateOutput) { CHECK_LE(1, numTiles); CHECK_LE(1, weight.getSize(0)); CHECK_LE(1, FFTSize); INST_UPDATE_OUTPUT_ITERATED(32, 8, 4, 2); } if (pass.pass == pass.FFT_UpdateGradInput) { CHECK_LE(1, numTiles); CHECK_LE(1, weight.getSize(0)); CHECK_LE(1, FFTSize); INST_UPDATE_GRAD_INPUT_ITERATED(32, 8, 2, 4); } if (pass.pass == pass.FFT_AccGradParameters) { CHECK_LE(1, weight.getSize(0)); CHECK_LE(1, weight.getSize(1)); CHECK_LE(1, FFTSize); INST_ACC_GRAD_PARAMETERS_ITERATED(32, 1, 2, 2); } return false; } #undef INST_UPDATE_OUTPUT_ITERATED #undef INST_UPDATE_GRAD_INPUT_ITERATED #undef INST_ACC_GRAD_PARAMETERS_ITERATED }}}}
the_stack
#include <boost/preprocessor.hpp> #include <collectives/ib_comm.hpp> #include <iostream> #include <sstream> #include <utils.cuh> #include <utils.hpp> namespace HugeCTR { #define MAX_AR_CHANNELS 31 IbComm::ARCollContext::ARCollContext(IbComm* comm) { size_t num_gpus = comm->num_gpus_; num_gpus_ = num_gpus; std::generate_n(std::back_inserter(ctx_), num_gpus, [] { return std::make_unique<ARCollContextPerGPU>(); }); // Read config params from env if (getenv("ONESHOT_NBLOCKS")) { cfg_nblocks_ = atoi(getenv("ONESHOT_NBLOCKS")); } if (getenv("ONESHOT_ALIGN_BLOCK")) { cfg_align_block_ = atoi(getenv("ONESHOT_ALIGN_BLOCK")); } if (getenv("ONESHOT_MIN_BLOCK")) { cfg_min_block_ = atoi(getenv("ONESHOT_MIN_BLOCK")); } if (getenv("ONESHOT_NCHANNELS")) { cfg_nchannels_ = atoi(getenv("ONESHOT_NCHANNELS")); } PROXY_ASSERT_MSG(cfg_nchannels_ <= MAX_AR_CHANNELS, "Max oneshot channels is 31"); PROXY_ASSERT(cfg_nblocks_ <= AR_MAX_BLOCKS); HCTR_LOG_S(INFO, ROOT) << "using oneshot nblocks: " << cfg_nblocks_ << std::endl; HCTR_LOG_S(INFO, ROOT) << "using oneshot nchannels: " << cfg_nchannels_ << std::endl; HCTR_LOG_S(INFO, ROOT) << "using oneshot min block: " << cfg_min_block_ << std::endl; } void IbComm::ARCollContext::update_size(size_t ar_size) { // calculate peerblock size PROXY_ASSERT_MSG((ar_size % (num_gpus_ * 16)) == 0, "AR size needs to be aligned to num_gpus*16"); ar_size_ = ar_size; blocksize_ = (cfg_nblocks_ - 1 + (cfg_align_block_ - 1 + ar_size) / cfg_align_block_) / cfg_nblocks_; blocksize_ *= cfg_align_block_; if (blocksize_ < cfg_min_block_) { blocksize_ = cfg_min_block_; } peer_blocklines_ = blocksize_ / sizeof(uint4) / num_gpus_; num_blocks_ = (ar_size + blocksize_ - 1) / blocksize_; PROXY_ASSERT(num_blocks_ <= AR_MAX_BLOCKS); } ARCollHandle IbComm::register_ar_coll() { ar_coll_ctx_.emplace_back(std::make_unique<ARCollContext>(this)); ARCollHandle coll_handle = (ARCollHandle)(ar_coll_ctx_.size() - 1); for (size_t g = 0; g < num_gpus_; g++) { M2PARCollInit coll_init_cmd_; coll_init_cmd_.coll_handle_ = coll_handle; coll_init_cmd_.cfg_nblocks_ = ar_coll_ctx_[coll_handle]->cfg_nblocks_; coll_init_cmd_.cfg_align_block_ = ar_coll_ctx_[coll_handle]->cfg_align_block_; coll_init_cmd_.cfg_min_block_ = ar_coll_ctx_[coll_handle]->cfg_min_block_; ARCollInitCmd cmd = std::make_pair(std::move(coll_init_cmd_), std::move(P2MNull())); proxy_cmd_->cmd_[g] = std::move(cmd); } proxy_cmd_->post_command(); proxy_cmd_->wait_for_completion(); proxy_cmd_->reset(); return coll_handle; } template <> sharp_datatype IbComm::get_sharp_dtype<int>() { return SHARP_DTYPE_INT; } template <> sharp_datatype IbComm::get_sharp_dtype<uint32_t>() { return SHARP_DTYPE_UNSIGNED; } template <> sharp_datatype IbComm::get_sharp_dtype<__half>() { return SHARP_DTYPE_FLOAT_SHORT; } template <> sharp_datatype IbComm::get_sharp_dtype<float>() { return SHARP_DTYPE_FLOAT; } template <typename T> void IbComm::set_ar_coll_buf(ARCollHandle coll, void* ar_ptr, const size_t ar_size, size_t device_id) { PROXY_ASSERT(ar_size != 0); auto& coll_ctx = *ar_coll_ctx_[coll]; if (proxy_cmd_->cmd_[device_id].which() != 0) { HCTR_LOG_S(ERROR, WORLD) << "Proxy command is already populated. Don't mix up set API " << HCTR_LOCATION() << std::endl; exit(1); } proxy_cmd_->cmd_[device_id] = ARBufInitCmd(); ARBufInitCmd& cmd = boost::get<ARBufInitCmd>(proxy_cmd_->cmd_[device_id]); M2PARBufInit& buf_init = std::get<0>(cmd); auto& gpu_ctx = *coll_ctx.ctx_[device_id]; gpu_ctx.d_ar_ptr_ = ar_ptr; buf_init.coll_handle_ = coll; buf_init.d_ar_ptr_ = ar_ptr; buf_init.ar_size_ = ar_size; buf_init.sharp_dtype_ = get_sharp_dtype<T>(); buf_init.element_size_ = sizeof(T); if (coll_ctx.ar_size_ != 0) { PROXY_ASSERT(ar_size == coll_ctx.ar_size_); } coll_ctx.ar_size_ = ar_size; PROXY_ASSERT_MSG(((size_t)ar_ptr & 0xf) == 0, "AR pointer needs to aligned to 16B"); } template void IbComm::set_ar_coll_buf<__half>(ARCollHandle coll, void* ar_ptr, const size_t ar_size, size_t device_id); template void IbComm::set_ar_coll_buf<float>(ARCollHandle coll, void* ar_ptr, const size_t ar_size, size_t device_id); template void IbComm::set_ar_coll_buf<uint32_t>(ARCollHandle coll, void* ar_ptr, const size_t ar_size, size_t device_id); #define MAX_LOCAL_RANKS 32 #define TOTAL_FLAGS (2 * MAX_LOCAL_RANKS + MAX_AR_CHANNELS) void IbComm::register_ar_coll_buf(ARCollHandle coll) { auto& coll_ctx = ar_coll_ctx_[coll]; proxy_cmd_->post_command(); proxy_cmd_->wait_for_completion(); // Allocations for (size_t g = 0; g < num_gpus_; g++) { HCTR_LIB_THROW(cudaSetDevice(device_list_[g])); auto& gpu_ctx = *coll_ctx->ctx_[g]; gpu_ctx.buf_ = GeneralBuffer2<CudaAllocator>::create(); gpu_ctx.buf_->reserve({num_gpus_}, &gpu_ctx.d_peer_ptrs_); gpu_ctx.buf_->reserve({1}, &gpu_ctx.d_coll_cmd_); gpu_ctx.buf_->reserve({TOTAL_FLAGS}, &gpu_ctx.d_flags_); gpu_ctx.buf_->reserve({num_gpus_}, &gpu_ctx.d_flags_ptrs_); gpu_ctx.buf_->allocate(); HCTR_LIB_THROW(cudaMemset(gpu_ctx.buf_->get_ptr(), 0, gpu_ctx.buf_->get_size_in_bytes())); } // Get proxy output std::vector<void*> h_peer_ptrs(num_gpus_); std::vector<void*> h_peer_flag_ptrs(num_gpus_); for (size_t g = 0; g < num_gpus_; g++) { auto& gpu_ctx = *coll_ctx->ctx_[g]; h_peer_ptrs[g] = gpu_ctx.d_ar_ptr_; h_peer_flag_ptrs[g] = gpu_ctx.d_flags_.get_ptr(); ARBufInitCmd& proxy_cmd = boost::get<ARBufInitCmd>(proxy_cmd_->cmd_[g]); auto& buf_init_out = std::get<1>(proxy_cmd); gpu_ctx.h_rs_cmd_ = buf_init_out.h_rs_cmd_; gpu_ctx.d_ag_cmd_ = buf_init_out.d_ag_cmd_; } for (size_t g = 0; g < num_gpus_; g++) { auto& gpu_ctx = *coll_ctx->ctx_[g]; HCTR_LIB_THROW(cudaSetDevice(device_list_[g])); HCTR_LIB_THROW(cudaMemcpy(gpu_ctx.d_peer_ptrs_.get_ptr(), h_peer_ptrs.data(), num_gpus_ * sizeof(void*), cudaMemcpyHostToDevice)); HCTR_LIB_THROW(cudaMemcpy(gpu_ctx.d_flags_ptrs_.get_ptr(), h_peer_flag_ptrs.data(), num_gpus_ * sizeof(size_t*), cudaMemcpyHostToDevice)); } coll_ctx->update_size(coll_ctx->ar_size_); proxy_cmd_->reset(); } void IbComm::update_size(ARCollHandle coll, const size_t ar_size) { auto& ctx = ar_coll_ctx_[coll]; PROXY_ASSERT_MSG(ar_size < ctx->ar_size_, "updated AR size must be less than init size"); for (size_t g = 0; g < num_gpus_; g++) { proxy_cmd_->cmd_[g] = ARUpdateSizeCmd(); auto& cmd = boost::get<ARUpdateSizeCmd>(proxy_cmd_->cmd_[g]); auto& m2p_cmd = std::get<0>(cmd); m2p_cmd.ar_size_ = ar_size; m2p_cmd.coll_handle_ = coll; } proxy_cmd_->post_command(); ctx->update_size(ar_size); proxy_cmd_->wait_for_completion(); proxy_cmd_->reset(); } // TODO: rs sync threads is max(SMS + 1, RANKS) #define AR_MAX_THREADS 1024 #define AR_BARRIER_FLAG_OFFSET 0 #define RS_SM_SYNC_OFFSET (RANKS) #define AG_RANK_BCAST_OFFSET (RANKS + MAX_AR_CHANNELS) #define UNROLL 6 #define RS_SYNC_THREADS 32 // MAX of AR_CHANNELS + 1 and RANKS #define AR_WORKER_THREADS (blockDim.x - RS_SYNC_THREADS) template <int RANKS, typename T> static __global__ void __launch_bounds__(AR_MAX_THREADS) all_reduce_cuda(void** __restrict__ d_peer_ptrs, const int numlines, size_t* d_coll_cmd_, size_t* h_rs_cmd_, size_t* d_ag_cmd_, size_t** flags, const int peerblocklines, const int numblocks, const int device_id) { // Do a barrier across all ranks volatile size_t* my_flag = flags[device_id]; size_t base_count = *d_coll_cmd_; if (threadIdx.x < RANKS) { if (blockIdx.x == 0) { size_t* rem_flag = flags[threadIdx.x]; rem_flag[AR_BARRIER_FLAG_OFFSET + device_id] = (base_count + 1); } while (my_flag[AR_BARRIER_FLAG_OFFSET + threadIdx.x] < (base_count + 1)) { } } if (threadIdx.x < RS_SYNC_THREADS) { __syncthreads(); // Post barrier and init sync /* sync across SMs and write a single RS complete flag to host */ for (int nblock = 0; nblock < numblocks; nblock++) { asm volatile("bar.sync 1, %0;" ::"r"(AR_WORKER_THREADS + RS_SYNC_THREADS)); size_t flag_count = (nblock + base_count + 1); if (threadIdx.x == 0) { __threadfence(); if (blockIdx.x > 0) { my_flag[RS_SM_SYNC_OFFSET + blockIdx.x] = flag_count; } } else if (blockIdx.x == 0) { if (threadIdx.x < gridDim.x) { while (((volatile size_t*)my_flag)[RS_SM_SYNC_OFFSET + threadIdx.x] < flag_count) { } } } if (blockIdx.x == 0) { asm volatile("bar.sync 2, %0;" ::"r"(RS_SYNC_THREADS)); if (threadIdx.x == 0) { *h_rs_cmd_ = flag_count; } } } /* All gather flag broadcast to all ranks */ size_t cachedflag = base_count; if ((blockIdx.x == 0) && (threadIdx.x < RANKS)) { while (cachedflag < base_count + numblocks) { size_t newflag = *(volatile size_t*)(d_ag_cmd_); if (newflag == cachedflag) continue; cachedflag = newflag; size_t* rem_flag = flags[threadIdx.x]; rem_flag[AG_RANK_BCAST_OFFSET + device_id] = cachedflag; // HCTR_LOG(INFO, WORLD, "Wrote flag from %d: %llu %x\n", device_id, cachedflag, // d_peer_ptrs[device_id]); } } } else { constexpr int basethread = RS_SYNC_THREADS; const int warp = blockIdx.x + (threadIdx.x >> 5); uint4* remote_ptr[RANKS]; for (int r = 0; r < RANKS; r++) { remote_ptr[r] = reinterpret_cast<uint4*>(d_peer_ptrs[(r + device_id + warp) % RANKS]); } uint4* my_ptr = reinterpret_cast<uint4*>(d_peer_ptrs[device_id]); __syncthreads(); // Post barrier and init sync int blocklineoffset = 0; while (blocklineoffset < numlines) { /* reduce scatter */ const int remainder = min(numlines - blocklineoffset, peerblocklines * RANKS); const int blocklines = remainder / RANKS; // Assumption: numlines is divisible by RANKS const int blockstart = blocklineoffset + blocklines * device_id; const int myThreadIdx = threadIdx.x - basethread; for (int line = blockIdx.x * AR_WORKER_THREADS + myThreadIdx; line < blocklines; line += AR_WORKER_THREADS * gridDim.x) { uint4 val[RANKS]; #pragma unroll for (int i = 0; i < RANKS; i++) { val[i] = remote_ptr[i][blockstart + line]; } uint4 sum = val[0]; T* s = reinterpret_cast<T*>(&sum); #pragma unroll for (int i = 1; i < RANKS; i++) { T* v = reinterpret_cast<T*>(&val[i]); #pragma unroll for (int j = 0; j < sizeof(uint4) / sizeof(T); j++) { s[j] += v[j]; } } my_ptr[blockstart + line] = sum; } asm volatile("bar.sync 1, %0;" ::"r"(AR_WORKER_THREADS + RS_SYNC_THREADS)); blocklineoffset += peerblocklines * RANKS; } // Reduce scatter { /* All gather */ const int nwarps = ((AR_WORKER_THREADS) >> 5) / (RANKS - 1); const int myblockDim = nwarps << 5; const int mywarp = ((threadIdx.x - basethread) >> 5) / (RANKS - 1); const int maxthreadIdx = myblockDim * (RANKS - 1) + basethread; const int mydest = (device_id + 1 + ((threadIdx.x - basethread) >> 5) % (RANKS - 1)) & (RANKS - 1); const int mythreadIdx = (mywarp << 5) + (threadIdx.x & 31); volatile size_t* flag = (volatile size_t*)&(my_flag[AG_RANK_BCAST_OFFSET + mydest]); uint4* dest_ptr = remote_ptr[((RANKS << 10) + mydest - device_id - warp) % RANKS]; blocklineoffset = 0; int gather_count = (base_count + 1); while (blocklineoffset < numlines) { const int remainder = min(numlines - blocklineoffset, peerblocklines * RANKS); const int blocklines = remainder / RANKS; const int blockstart = blocklineoffset; uint4* myptr = &my_ptr[blockstart + blocklines * mydest]; uint4* peerptr = &dest_ptr[blockstart + blocklines * mydest]; if (threadIdx.x < maxthreadIdx) { const int start_elem = mythreadIdx + myblockDim * blockIdx.x; const int end_elem = max(start_elem, blocklines); const int aligned_elem = ((end_elem - start_elem) / (myblockDim * gridDim.x * UNROLL)) * (myblockDim * gridDim.x * UNROLL); const int end_aligned = start_elem + aligned_elem; if (mythreadIdx == 0) { while (*flag < gather_count) { } // HCTR_LOG(INFO, WORLD, "Gather flag received %llu %d %d %d %d %d %d %x\n", *flag, // device_id, blockstart, blocklines, numlines, remainder, mydest, dest_ptr); gather_count++; } asm volatile("bar.sync %0, %1;" ::"r"(3 + mydest), "r"(myblockDim)); for (int line = start_elem; line < end_aligned; line += myblockDim * gridDim.x * UNROLL) { uint4 val[UNROLL]; #pragma unroll for (int i = 0; i < UNROLL; i++) { val[i] = peerptr[line + i * myblockDim * gridDim.x]; } #pragma unroll for (int i = 0; i < UNROLL; i++) { myptr[line + i * myblockDim * gridDim.x] = val[i]; } } for (int line = end_aligned; line < end_elem; line += myblockDim * gridDim.x) { myptr[line] = peerptr[line]; } } blocklineoffset += peerblocklines * RANKS; } } // All-gather } if ((threadIdx.x == 0) && (blockIdx.x == 0)) { *d_coll_cmd_ = (base_count + numblocks); } } template <int RANKS, typename T> void IbComm::all_reduce(ARCollHandle coll, cudaStream_t stream, size_t device_id) const { auto& ctx = ar_coll_ctx_[coll]; auto& gpu_ctx = ctx->ctx_[device_id]; auto warps = max(RANKS, AR_MAX_THREADS / 32); int numlines = ctx->ar_size_ / sizeof(uint4); int device_id_int = static_cast<int>(device_id); all_reduce_cuda<RANKS, T><<<ctx->cfg_nchannels_, warps * 32, 0, stream>>>( gpu_ctx->d_peer_ptrs_.get_ptr(), numlines, // number of 16B lines gpu_ctx->d_coll_cmd_.get_ptr(), gpu_ctx->h_rs_cmd_, gpu_ctx->d_ag_cmd_, gpu_ctx->d_flags_ptrs_.get_ptr(), ctx->peer_blocklines_, ctx->num_blocks_, device_id_int); } #define SUPPORTED_AR_RANKS (2)(4)(8)(16) template <typename T> void IbComm::all_reduce(ARCollHandle coll, cudaStream_t stream, size_t device_id) { #define SWITCHER(r, data, p) \ if (p == num_gpus_) { \ return all_reduce<p, T>(coll, stream, device_id); \ } BOOST_PP_SEQ_FOR_EACH(SWITCHER, "", SUPPORTED_AR_RANKS) #undef SWITCHER PROXY_ASSERT_MSG(false, "Unsupported number of local GPU"); } #define AR_METHOD(r, data, p) \ template void IbComm::all_reduce<p, __half>(ARCollHandle, cudaStream_t, size_t) const; \ template void IbComm::all_reduce<p, float>(ARCollHandle, cudaStream_t, size_t) const; \ template void IbComm::all_reduce<p, uint32_t>(ARCollHandle, cudaStream_t, size_t) const; BOOST_PP_SEQ_FOR_EACH(AR_METHOD, "", SUPPORTED_AR_RANKS) #undef AR_METHOD template void IbComm::all_reduce<__half>(ARCollHandle, cudaStream_t, size_t); template void IbComm::all_reduce<float>(ARCollHandle, cudaStream_t, size_t); template void IbComm::all_reduce<uint32_t>(ARCollHandle, cudaStream_t, size_t); } // namespace HugeCTR #endif
the_stack
#include <thrust/scan.h> #include <thrust/sort.h> #include <thrust/device_vector.h> #include <thrust/iterator/zip_iterator.h> #include <thrust/iterator/constant_iterator.h> #include <thrust/binary_search.h> #include <thrust/execution_policy.h> #include <thrust/sequence.h> #include <thrust/iterator/transform_iterator.h> #include <cub/thread/thread_search.cuh> #include "cunumeric/cuda_help.h" // above this threshold segment sort will be performed // by cub::DeviceSegmentedRadixSort instead of thrust::(stable_)sort // with tuple keys (not available for complex) #define SEGMENT_THRESHOLD_RADIX_SORT 400 namespace cunumeric { template <LegateTypeCode CODE> struct support_cub : std::true_type { }; template <> struct support_cub<LegateTypeCode::COMPLEX64_LT> : std::false_type { }; template <> struct support_cub<LegateTypeCode::COMPLEX128_LT> : std::false_type { }; template <LegateTypeCode CODE, std::enable_if_t<support_cub<CODE>::value>* = nullptr> void local_sort(const legate_type_of<CODE>* values_in, legate_type_of<CODE>* values_out, const int64_t* indices_in, int64_t* indices_out, const size_t volume, const size_t sort_dim_size, const bool stable, // cub sort is always stable cudaStream_t stream) { using VAL = legate_type_of<CODE>; // fallback to thrust approach as segmented radix sort is not suited for small segments if (volume == sort_dim_size || sort_dim_size > SEGMENT_THRESHOLD_RADIX_SORT) { cub_local_sort(values_in, values_out, indices_in, indices_out, volume, sort_dim_size, stream); } else { thrust_local_sort( values_in, values_out, indices_in, indices_out, volume, sort_dim_size, stable, stream); } } template <LegateTypeCode CODE, std::enable_if_t<!support_cub<CODE>::value>* = nullptr> void local_sort(const legate_type_of<CODE>* values_in, legate_type_of<CODE>* values_out, const int64_t* indices_in, int64_t* indices_out, const size_t volume, const size_t sort_dim_size, const bool stable, cudaStream_t stream) { using VAL = legate_type_of<CODE>; thrust_local_sort( values_in, values_out, indices_in, indices_out, volume, sort_dim_size, stable, stream); } // auto align to multiples of 16 bytes auto get_16b_aligned = [](auto bytes) { return std::max<size_t>(16, (bytes + 15) / 16 * 16); }; auto get_16b_aligned_count = [](auto count, auto element_bytes) { return (get_16b_aligned(count * element_bytes) + element_bytes - 1) / element_bytes; }; // increase number of columns computed per block as long as either // 1. we have more threads in block than elements in row // OR // 2. a) block still large enough to handle full row // AND // b) We end up with too many blocks in y-direction otherwise size_t compute_cols_per_block(size_t row_size, size_t col_size) { size_t result = 1; while (result < THREADS_PER_BLOCK && (row_size * result < THREADS_PER_BLOCK || (row_size * result <= THREADS_PER_BLOCK * 16 && col_size / result > 256))) { result *= 2; } return result; } // create a launchconfig for 2d data copy kernels with coalesced rows // the y direction identifies the row to compute // in x-direction all threads are responsible for all columns of a single row // the heuristic ensures that // * every thread is assigned at leat 1 (except y-grid edge) and at most 32 elements std::tuple<dim3, dim3> generate_launchconfig_for_2d_copy(size_t row_size, size_t col_size) { int cols_per_block = compute_cols_per_block(row_size, col_size); dim3 block_shape = dim3(THREADS_PER_BLOCK / cols_per_block, cols_per_block); const size_t num_blocks_y = (col_size + block_shape.y - 1) / block_shape.y; const size_t num_blocks_x = ((row_size + 31) / 32 + block_shape.x - 1) / block_shape.x; dim3 grid_shape = dim3(num_blocks_x, num_blocks_y); return std::make_tuple(grid_shape, block_shape); } ///////////////////////////////////////////////////////////////////////////////////////////////// ///////////////////////////////////////////////////////////////////////////////////////////////// ///////////////////////////////////////////////////////////////////////////////////////////////// ///////////////////////////////////////////////////////////////////////////////////////////////// ///////////////////////////////////////////////////////////////////////////////////////////////// ///////////////////////////////////////////////////////////////////////////////////////////////// ///////////////////////////////////////////////////////////////////////////////////////////////// ///////////////////////////////////////////////////////////////////////////////////////////////// ///////////////////////////////////////////////////////////////////////////////////////////////// ///////////////////////////////////////////////////////////////////////////////////////////////// template <typename VAL> struct SegmentSample { VAL value; size_t segment; int32_t rank; size_t position; }; template <typename VAL> struct SortPiece { Buffer<VAL> values; Buffer<int64_t> indices; size_t size; }; template <typename VAL> struct SegmentMergePiece { Buffer<size_t> segments; Buffer<VAL> values; Buffer<int64_t> indices; size_t size; }; template <typename VAL> struct SegmentSampleComparator : public thrust::binary_function<SegmentSample<VAL>, SegmentSample<VAL>, bool> { __host__ __device__ bool operator()(const SegmentSample<VAL>& lhs, const SegmentSample<VAL>& rhs) const { if (lhs.segment != rhs.segment) { return lhs.segment < rhs.segment; } else { // special case for unused samples if (lhs.rank < 0 || rhs.rank < 0) { return rhs.rank < 0 && lhs.rank >= 0; } if (lhs.value != rhs.value) { return lhs.value < rhs.value; } else if (lhs.rank != rhs.rank) { return lhs.rank < rhs.rank; } else { return lhs.position < rhs.position; } } } }; template <typename VAL> __global__ static void __launch_bounds__(THREADS_PER_BLOCK, MIN_CTAS_PER_SM) extract_split_positions_segments(const VAL* data, const size_t segment_size_l, const SegmentSample<VAL>* samples, const size_t num_segments_l, const size_t num_samples_per_segment, const size_t num_usable_samples_per_segment, size_t* split_positions, const size_t num_splitters, const size_t my_sort_rank) { const size_t splitter_idx_g = blockIdx.x * blockDim.x + threadIdx.x; if (splitter_idx_g >= num_splitters) return; const size_t num_splitters_per_segment = num_splitters / num_segments_l; const size_t splitter_pos = splitter_idx_g % num_splitters_per_segment; const size_t splitter_segment = splitter_idx_g / num_splitters_per_segment; const size_t index = (splitter_pos + 1) * num_usable_samples_per_segment / (num_splitters_per_segment + 1) - 1; const SegmentSample<VAL> splitter = samples[splitter_segment * num_samples_per_segment + index]; // now perform search on data to receive position *after* last element to be // part of the package for my_sort_rank splitter_idx_g const size_t offset = splitter_segment * segment_size_l; if (my_sort_rank > splitter.rank) { // position of the last position with smaller value than splitter.value + 1 split_positions[splitter_idx_g] = cub::LowerBound(data + offset, segment_size_l, splitter.value) + offset; } else if (my_sort_rank < splitter.rank) { // position of the first position with value larger than splitter.value split_positions[splitter_idx_g] = cub::UpperBound(data + offset, segment_size_l, splitter.value) + offset; } else { split_positions[splitter_idx_g] = splitter.position + 1; } } __global__ static void __launch_bounds__(THREADS_PER_BLOCK, MIN_CTAS_PER_SM) compute_send_dimensions(const size_t segment_size_l, size_t* size_send, const size_t num_segments_l, const size_t num_segments_l_aligned, const size_t* split_positions, const size_t num_sort_ranks, const size_t num_send_parts) { const size_t send_part = blockIdx.x * blockDim.x + threadIdx.x; if (send_part >= num_send_parts) return; const size_t rank = send_part / num_segments_l; const size_t segment = send_part % num_segments_l; size_t start_position = (rank > 0) ? split_positions[segment * (num_sort_ranks - 1) + rank - 1] : (segment_size_l * segment); size_t end_position = (rank < num_sort_ranks - 1) ? split_positions[segment * (num_sort_ranks - 1) + rank] : ((segment + 1) * segment_size_l); size_t size = end_position - start_position; size_send[rank * num_segments_l_aligned + segment] = size; } // A stateful callback functor that maintains a running prefix to be applied // during consecutive scan operations. struct BlockPrefixCallbackOp { // Running prefix size_t running_total; // Constructor __device__ BlockPrefixCallbackOp(size_t running_total) : running_total(running_total) {} // Callback operator to be entered by the first warp of threads in the block. // Thread-0 is responsible for returning a value for seeding the block-wide scan. __device__ size_t operator()(size_t block_aggregate) { size_t old_prefix = running_total; running_total += block_aggregate; return old_prefix; } }; __global__ static void __launch_bounds__(THREADS_PER_BLOCK, MIN_CTAS_PER_SM) compute_scan_per_rank(size_t* segment_blocks, size_t* size_send, size_t num_segments_l, const size_t num_segments_l_aligned) { assert(blockDim.x == THREADS_PER_BLOCK); // Specialize BlockScan for a 1D block of THREADS_PER_BLOCK threads on type size_t typedef cub::BlockScan<size_t, THREADS_PER_BLOCK> BlockScan; // Allocate shared memory for BlockScan __shared__ typename BlockScan::TempStorage temp_storage; // now we have 1 block per rank! const size_t rank = blockIdx.x; const size_t threadId = threadIdx.x; // Initialize running total BlockPrefixCallbackOp prefix_op(0); // Have the block iterate over segments of items for (int block_offset = 0; block_offset < num_segments_l; block_offset += THREADS_PER_BLOCK) { size_t thread_data = 0; // Load a segment of consecutive items that are blocked across threads if (block_offset + threadId < num_segments_l) { thread_data = size_send[rank * num_segments_l_aligned + block_offset + threadId]; } // Collectively compute the block-wide exclusive prefix sum BlockScan(temp_storage).ExclusiveSum(thread_data, thread_data, prefix_op); __syncthreads(); // Store scanned items to output segment if (block_offset + threadId < num_segments_l) { segment_blocks[rank * num_segments_l + block_offset + threadId] = thread_data; } } // also store sum of all in last element if (threadId == 0) { size_send[rank * num_segments_l_aligned + num_segments_l] = prefix_op(0); } } __global__ static void __launch_bounds__(THREADS_PER_BLOCK, MIN_CTAS_PER_SM) initialize_segment_start_positions(const size_t* start_positions, const size_t num_segments_l, size_t* segment_ids, const size_t num_segment_ids) { const size_t segment_idx = blockIdx.x * blockDim.x + threadIdx.x; if (segment_idx >= num_segments_l) return; unsigned long long int* ptr = (unsigned long long int*)segment_ids; const size_t position = start_positions[segment_idx]; if (position < num_segment_ids) atomicAdd(&(ptr[position]), (unsigned long long int)1l); } __global__ static void __launch_bounds__(THREADS_PER_BLOCK, MIN_CTAS_PER_SM) extract_segment_sizes(const size_t* segments, const size_t size, int64_t* segments_diff, const size_t num_segments_l, const size_t segments_size_l) { const size_t segment_idx = blockIdx.x * blockDim.x + threadIdx.x; if (segment_idx >= num_segments_l) return; if (num_segments_l == 1) { segments_diff[segment_idx] = size - segments_size_l; } else { const size_t position = cub::LowerBound(segments, size, segment_idx); const size_t next_position = cub::LowerBound(segments + position, size - position, segment_idx + 1) + position; const size_t segment_size = next_position - position; segments_diff[segment_idx] = segment_size - segments_size_l; } } template <typename VAL> __global__ static void __launch_bounds__(THREADS_PER_BLOCK, MIN_CTAS_PER_SM) copy_data_to_merge_buffers(const Buffer<size_t> segment_blocks_ptr, const Buffer<size_t> size_send, const Buffer<VAL> source_values, Buffer<VAL*> target_values, const size_t num_segments_l, const size_t num_segments_l_aligned, const size_t segment_size_l, const size_t my_rank, const size_t num_sort_ranks) { const size_t thread_offset = blockIdx.x * blockDim.x + threadIdx.x; const size_t threadgroup_size = blockDim.x * gridDim.x; const size_t segment_id = blockIdx.y * blockDim.y + threadIdx.y; if (segment_id >= num_segments_l) return; size_t source_offset = segment_size_l * segment_id; for (int r = 0; r < num_sort_ranks; ++r) { size_t target_offset = segment_blocks_ptr[r * num_segments_l + segment_id]; size_t local_size = size_send[r * num_segments_l_aligned + segment_id]; for (size_t pos = thread_offset; pos < local_size; pos += threadgroup_size) { target_values[r][target_offset + pos] = source_values[source_offset + pos]; } source_offset += local_size; } } template <typename VAL> __global__ static void __launch_bounds__(THREADS_PER_BLOCK, MIN_CTAS_PER_SM) combine_buffers_no_sort(const Buffer<VAL*> source_values, const Buffer<size_t> target_offsets, Buffer<VAL> target_values, const size_t merged_size, const size_t num_sort_ranks) { const size_t thread_offset = blockIdx.x * blockDim.x + threadIdx.x; const size_t threadgroup_size = blockDim.x * gridDim.x; const size_t rank_id = blockIdx.y * blockDim.y + threadIdx.y; if (rank_id >= num_sort_ranks) return; size_t target_offset = target_offsets[rank_id]; size_t local_size = (rank_id == num_sort_ranks - 1) ? (merged_size - target_offset) : (target_offsets[rank_id + 1] - target_offset); for (size_t pos = thread_offset; pos < local_size; pos += threadgroup_size) { target_values[target_offset + pos] = source_values[rank_id][pos]; } } template <typename VAL> __global__ static void __launch_bounds__(THREADS_PER_BLOCK, MIN_CTAS_PER_SM) copy_data_to_rebalance_buffers(const Buffer<int64_t> segment_diff_pos, const Buffer<int64_t> send_left, const Buffer<int64_t> send_right, const Buffer<int64_t> send_left_pos, const Buffer<int64_t> send_right_pos, const Buffer<VAL> source_values, const size_t source_size, Buffer<VAL> target_left_values, Buffer<VAL> target_right_values, const size_t num_segments_l, const size_t segment_size_l, const size_t my_rank, const size_t num_sort_ranks) { const size_t thread_offset = blockIdx.x * blockDim.x + threadIdx.x; const size_t threadgroup_size = blockDim.x * gridDim.x; const size_t segment_id = blockIdx.y * blockDim.y + threadIdx.y; if (segment_id >= num_segments_l) return; // copy left { int64_t send_left_size = send_left[segment_id]; if (send_left_size > 0) { size_t source_start = segment_size_l * segment_id + segment_diff_pos[segment_id]; size_t target_start = send_left_pos[segment_id]; for (size_t pos = thread_offset; pos < send_left_size; pos += threadgroup_size) { target_left_values[target_start + pos] = source_values[source_start + pos]; } } } // copy right { int64_t send_right_size = send_right[segment_id]; if (send_right_size > 0) { size_t source_end = (segment_id < num_segments_l - 1) ? (segment_size_l * (segment_id + 1) + segment_diff_pos[segment_id + 1]) : source_size; size_t source_start = source_end - send_right_size; size_t target_start = send_right_pos[segment_id]; for (size_t pos = thread_offset; pos < send_right_size; pos += threadgroup_size) { target_right_values[target_start + pos] = source_values[source_start + pos]; } } } } template <typename VAL> __global__ static void __launch_bounds__(THREADS_PER_BLOCK, MIN_CTAS_PER_SM) merge_rebalanced_buffers(const Buffer<int64_t> segment_diff_pos, const Buffer<int64_t> send_left, const Buffer<int64_t> send_right, const Buffer<int64_t> recv_left_pos, const Buffer<int64_t> recv_right_pos, const Buffer<VAL> source_values, const size_t source_size, const Buffer<VAL> recv_left_values, const Buffer<VAL> recv_right_values, VAL* target_values, const size_t num_segments_l, const size_t segment_size_l, const size_t my_rank, const size_t num_sort_ranks) { const size_t thread_offset = blockIdx.x * blockDim.x + threadIdx.x; const size_t threadgroup_size = blockDim.x * gridDim.x; const size_t segment_id = blockIdx.y * blockDim.y + threadIdx.y; if (segment_id >= num_segments_l) return; size_t target_offset = segment_id * segment_size_l; size_t source_start = segment_size_l * segment_id + segment_diff_pos[segment_id]; size_t source_end = (segment_id < num_segments_l - 1) ? (segment_size_l * (segment_id + 1) + segment_diff_pos[segment_id + 1]) : source_size; int64_t recv_left_size = send_left[segment_id] * -1; int64_t recv_right_size = send_right[segment_id] * -1; if (recv_left_size < 0) source_start -= recv_left_size; if (recv_right_size < 0) source_end += recv_right_size; // copy from left { if (recv_left_size > 0) { size_t recv_left_start = (recv_left_pos[segment_id] * -1); for (size_t pos = thread_offset; pos < recv_left_size; pos += threadgroup_size) { target_values[target_offset + pos] = recv_left_values[recv_left_start + pos]; } target_offset += recv_left_size; } } // copy main part { int64_t size = source_end - source_start; if (size > 0) { for (size_t pos = thread_offset; pos < size; pos += threadgroup_size) { target_values[target_offset + pos] = source_values[source_start + pos]; } target_offset += size; } } // copy from right { if (recv_right_size > 0) { size_t recv_right_start = (recv_right_pos[segment_id] * -1); for (size_t pos = thread_offset; pos < recv_right_size; pos += threadgroup_size) { target_values[target_offset + pos] = recv_right_values[recv_right_start + pos]; } target_offset += recv_right_size; } } #ifdef DEBUG_CUNUMERIC assert(target_offset == (segment_id + 1) * segment_size_l); #endif } template <typename VAL> __global__ static void __launch_bounds__(THREADS_PER_BLOCK, MIN_CTAS_PER_SM) extract_samples_segment(const VAL* data, const size_t num_segments_l, SegmentSample<VAL>* samples, const size_t num_samples_per_segment_l, const size_t segment_size_l, const size_t offset, const size_t num_sort_ranks, const size_t sort_rank) { const size_t sample_idx = blockIdx.x * blockDim.x + threadIdx.x; const size_t num_samples_l = num_samples_per_segment_l * num_segments_l; if (sample_idx >= num_samples_l) return; const size_t segment_id_l = sample_idx / num_samples_per_segment_l; const size_t segment_sample_idx = sample_idx % num_samples_per_segment_l; const size_t sample_index = offset + sample_idx; if (num_samples_per_segment_l < segment_size_l) { const size_t index = segment_id_l * segment_size_l + (segment_sample_idx + 1) * segment_size_l / num_samples_per_segment_l - 1; samples[sample_index].value = data[index]; samples[sample_index].rank = sort_rank; samples[sample_index].segment = segment_id_l; samples[sample_index].position = index; } else { // edge case where num_samples_l > volume if (segment_sample_idx < segment_size_l) { const size_t index = segment_id_l * segment_size_l + segment_sample_idx; samples[sample_index].value = data[index]; samples[sample_index].rank = sort_rank; samples[sample_index].segment = segment_id_l; samples[sample_index].position = index; } else { samples[sample_index].rank = -1; // not populated samples[sample_index].segment = segment_id_l; } } } // transpose from CUDA SDK #define BLOCK_DIM 16 __global__ void transpose(int64_t* odata, int64_t* idata, int width, int height) { __shared__ int64_t block[BLOCK_DIM][BLOCK_DIM + 1]; // read the matrix tile into shared memory unsigned int xIndex = blockIdx.x * BLOCK_DIM + threadIdx.x; unsigned int yIndex = blockIdx.y * BLOCK_DIM + threadIdx.y; if ((xIndex < width) && (yIndex < height)) { unsigned int index_in = yIndex * width + xIndex; block[threadIdx.y][threadIdx.x] = idata[index_in]; } __syncthreads(); // write the transposed matrix tile to global memory xIndex = blockIdx.y * BLOCK_DIM + threadIdx.x; yIndex = blockIdx.x * BLOCK_DIM + threadIdx.y; if ((xIndex < height) && (yIndex < width)) { unsigned int index_out = yIndex * height + xIndex; odata[index_out] = block[threadIdx.x][threadIdx.y]; } } struct subtract : public thrust::unary_function<int64_t, int64_t> { const int64_t constant_; subtract(int64_t constant) : constant_(constant) {} __host__ __device__ int64_t operator()(const int64_t& input) const { return input - constant_; } }; struct positive_value : public thrust::unary_function<int64_t, int64_t> { __host__ __device__ int64_t operator()(const int64_t& x) const { return x > 0 ? x : 0; } }; struct negative_value : public thrust::unary_function<int64_t, int64_t> { __host__ __device__ int64_t operator()(const int64_t& x) const { return x < 0 ? -x : 0; } }; struct positive_plus : public thrust::binary_function<int64_t, int64_t, int64_t> { __host__ __device__ int64_t operator()(const int64_t& lhs, const int64_t& rhs) const { return lhs > 0 ? (lhs + (rhs > 0 ? rhs : 0)) : (rhs > 0 ? rhs : 0); } }; struct negative_plus : public thrust::binary_function<int64_t, int64_t, int64_t> { __host__ __device__ int64_t operator()(const int64_t& lhs, const int64_t& rhs) const { return (lhs < 0 ? (lhs + (rhs < 0 ? rhs : 0)) : (rhs < 0 ? rhs : 0)); } }; struct modulusWithOffset : public thrust::binary_function<int64_t, int64_t, int64_t> { const size_t constant; modulusWithOffset(size_t _constant) : constant(_constant) {} __host__ __device__ int64_t operator()(const int64_t& lhs, const int64_t& rhs) const { return lhs % rhs + constant; } }; ///////////////////////////////////////////////////////////////////////////////////////////////// ///////////////////////////////////////////////////////////////////////////////////////////////// ///////////////////////////////////////////////////////////////////////////////////////////////// ///////////////////////////////////////////////////////////////////////////////////////////////// ///////////////////////////////////////////////////////////////////////////////////////////////// ///////////////////////////////////////////////////////////////////////////////////////////////// template <LegateTypeCode CODE> SegmentMergePiece<legate_type_of<CODE>> merge_all_buffers( std::vector<SegmentMergePiece<legate_type_of<CODE>>>& merge_buffers, bool segmented, bool argsort, ThrustAllocator& alloc, cudaStream_t stream) { using VAL = legate_type_of<CODE>; // fallback to full sort for 1D and > 64 parts if (!segmented && merge_buffers.size() > 64) { SegmentMergePiece<VAL> result; // initialize target size_t merged_size = 0; size_t num_sort_ranks = merge_buffers.size(); Buffer<size_t> target_offsets = create_buffer<size_t>(num_sort_ranks, Memory::Z_COPY_MEM); // loop comparably small -> no init kernel for (int i = 0; i < num_sort_ranks; ++i) { target_offsets[i] = merged_size; merged_size += merge_buffers[i].size; } result.values = create_buffer<VAL>(merged_size); result.indices = create_buffer<int64_t>(argsort ? merged_size : 0); result.segments = create_buffer<size_t>(segmented ? merged_size : 0); result.size = merged_size; // copy data into result { Buffer<VAL*> val_buffers_ptr = create_buffer<VAL*>(num_sort_ranks, Memory::Z_COPY_MEM); for (size_t r = 0; r < num_sort_ranks; r++) { val_buffers_ptr[r] = merge_buffers[r].values.ptr(0); } auto elements_per_rank = std::max<size_t>(merged_size / num_sort_ranks, 1); auto [grid_shape, block_shape] = generate_launchconfig_for_2d_copy(elements_per_rank, num_sort_ranks); combine_buffers_no_sort<<<grid_shape, block_shape, 0, stream>>>( val_buffers_ptr, target_offsets, result.values, merged_size, num_sort_ranks); if (argsort) { Buffer<int64_t*> idc_buffers_ptr = create_buffer<int64_t*>(num_sort_ranks, Memory::Z_COPY_MEM); for (size_t r = 0; r < num_sort_ranks; r++) { idc_buffers_ptr[r] = merge_buffers[r].indices.ptr(0); } combine_buffers_no_sort<<<grid_shape, block_shape, 0, stream>>>( idc_buffers_ptr, target_offsets, result.indices, merged_size, num_sort_ranks); CHECK_CUDA(cudaStreamSynchronize(stream)); // needed before Z-copy destroy() idc_buffers_ptr.destroy(); } else { CHECK_CUDA(cudaStreamSynchronize(stream)); // needed before Z-copy destroy() } val_buffers_ptr.destroy(); target_offsets.destroy(); // destroy buffers for (int i = 0; i < num_sort_ranks; ++i) { SegmentMergePiece<VAL> piece = merge_buffers[i]; piece.values.destroy(); if (argsort) { piece.indices.destroy(); } } merge_buffers.clear(); } // sort data (locally) auto p_values = result.values.ptr(0); auto p_indices = argsort ? result.indices.ptr(0) : nullptr; local_sort<CODE>( p_values, p_values, p_indices, p_indices, merged_size, merged_size, true, stream); CHECK_CUDA_STREAM(stream); return result; } else { // maybe k-way merge is more efficient here... auto exec_policy = thrust::cuda::par(alloc).on(stream); size_t num_sort_ranks = merge_buffers.size(); std::vector<SegmentMergePiece<VAL>> destroy_queue; for (size_t stride = 1; stride < num_sort_ranks; stride *= 2) { for (size_t pos = 0; pos + stride < num_sort_ranks; pos += 2 * stride) { SegmentMergePiece<VAL> source1 = merge_buffers[pos]; SegmentMergePiece<VAL> source2 = merge_buffers[pos + stride]; auto merged_size = source1.size + source2.size; auto merged_values = create_buffer<VAL>(merged_size); auto merged_indices = create_buffer<int64_t>(argsort ? merged_size : 0); auto merged_segments = create_buffer<size_t>(segmented ? merged_size : 0); auto p_merged_values = merged_values.ptr(0); auto p_values1 = source1.values.ptr(0); auto p_values2 = source2.values.ptr(0); if (segmented) { auto p_merged_segments = merged_segments.ptr(0); auto p_segments1 = source1.segments.ptr(0); auto p_segments2 = source2.segments.ptr(0); auto comb_keys_1 = thrust::make_zip_iterator(thrust::make_tuple(p_segments1, p_values1)); auto comb_keys_2 = thrust::make_zip_iterator(thrust::make_tuple(p_segments2, p_values2)); auto comb_keys_merged = thrust::make_zip_iterator(thrust::make_tuple(p_merged_segments, p_merged_values)); if (argsort) { // merge with key/value auto p_indices1 = source1.indices.ptr(0); auto p_indices2 = source2.indices.ptr(0); auto p_merged_indices = merged_indices.ptr(0); thrust::merge_by_key(exec_policy, comb_keys_1, comb_keys_1 + source1.size, comb_keys_2, comb_keys_2 + source2.size, p_indices1, p_indices2, comb_keys_merged, p_merged_indices, thrust::less<thrust::tuple<size_t, VAL>>()); } else { thrust::merge(exec_policy, comb_keys_1, comb_keys_1 + source1.size, comb_keys_2, comb_keys_2 + source2.size, comb_keys_merged, thrust::less<thrust::tuple<size_t, VAL>>()); } } else { if (argsort) { // merge with key/value auto p_indices1 = source1.indices.ptr(0); auto p_indices2 = source2.indices.ptr(0); auto p_merged_indices = merged_indices.ptr(0); thrust::merge_by_key(exec_policy, p_values1, p_values1 + source1.size, p_values2, p_values2 + source2.size, p_indices1, p_indices2, p_merged_values, p_merged_indices); } else { thrust::merge(exec_policy, p_values1, p_values1 + source1.size, p_values2, p_values2 + source2.size, p_merged_values); } } destroy_queue.push_back(source1); destroy_queue.push_back(source2); merge_buffers[pos].values = merged_values; merge_buffers[pos].indices = merged_indices; merge_buffers[pos].segments = merged_segments; merge_buffers[pos].size = merged_size; } // destroy buffers only after each sweep for (int i = 0; i < destroy_queue.size(); ++i) { SegmentMergePiece<VAL> piece = destroy_queue[i]; piece.values.destroy(); if (segmented) { piece.segments.destroy(); } if (argsort) { piece.indices.destroy(); } } destroy_queue.clear(); } SegmentMergePiece<VAL> result = merge_buffers[0]; merge_buffers.clear(); CHECK_CUDA_STREAM(stream); return result; } } ///////////////////////////////////////////////////////////////////////////////////////////////// ///////////////////////////////////////////////////////////////////////////////////////////////// ///////////////////////////////////////////////////////////////////////////////////////////////// ///////////////////////////////////////////////////////////////////////////////////////////////// ///////////////////////////////////////////////////////////////////////////////////////////////// ///////////////////////////////////////////////////////////////////////////////////////////////// template <typename VAL> void rebalance_data(SegmentMergePiece<VAL>& merge_buffer, void* output_ptr, /* global domain information */ size_t my_rank, // global NCCL rank /* domain information in sort dimension */ size_t my_sort_rank, // local rank id in sort dimension size_t num_sort_ranks, // #ranks that share a sort dimension size_t* sort_ranks, // rank ids that share a sort dimension with us size_t segment_size_l, // (local) segment size size_t num_segments_l, /* other */ bool argsort, ThrustAllocator& alloc, cudaStream_t stream, ncclComm_t* comm) { // output is either values or indices VAL* output_values = nullptr; int64_t* output_indices = nullptr; if (argsort) { output_indices = static_cast<int64_t*>(output_ptr); } else { output_values = static_cast<VAL*>(output_ptr); } auto exec_policy = thrust::cuda::par(alloc).on(stream); { // compute diff for each segment const size_t num_segments_l_aligned = get_16b_aligned_count(num_segments_l, sizeof(size_t)); auto segment_diff = create_buffer<int64_t>(num_segments_l_aligned, Memory::GPU_FB_MEM); { // start kernel to search from merge_buffer.segments const size_t num_blocks = (num_segments_l + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK; extract_segment_sizes<<<num_blocks, THREADS_PER_BLOCK, 0, stream>>>( merge_buffer.segments.ptr(0), merge_buffer.size, segment_diff.ptr(0), num_segments_l, segment_size_l); } merge_buffer.segments.destroy(); if (argsort) { merge_buffer.values.destroy(); } #ifdef DEBUG_CUNUMERIC { size_t reduce = thrust::reduce(exec_policy, segment_diff.ptr(0), segment_diff.ptr(0) + num_segments_l, 0); size_t volume = segment_size_l * num_segments_l; assert(merge_buffer.size - volume == reduce); } #endif // allocate target Buffer<int64_t> segment_diff_buffers = create_buffer<int64_t>(num_segments_l_aligned * num_sort_ranks, Memory::GPU_FB_MEM); // communicate segment diffs CHECK_NCCL(ncclGroupStart()); for (size_t r = 0; r < num_sort_ranks; r++) { CHECK_NCCL(ncclSend( segment_diff.ptr(0), num_segments_l_aligned, ncclInt64, sort_ranks[r], *comm, stream)); CHECK_NCCL(ncclRecv(segment_diff_buffers.ptr(r * num_segments_l_aligned), num_segments_l_aligned, ncclInt64, sort_ranks[r], *comm, stream)); } CHECK_NCCL(ncclGroupEnd()); // copy to transpose structure [segments][ranks] auto segment_diff_2d = create_buffer<int64_t>(num_segments_l_aligned * num_sort_ranks, Memory::GPU_FB_MEM); // Transpose { dim3 grid((num_segments_l_aligned + BLOCK_DIM - 1) / BLOCK_DIM, (num_sort_ranks + BLOCK_DIM - 1) / BLOCK_DIM); dim3 block(BLOCK_DIM, BLOCK_DIM); transpose<<<grid, block, 0, stream>>>(segment_diff_2d.ptr(0), segment_diff_buffers.ptr(0), num_segments_l_aligned, num_sort_ranks); } #ifdef DEBUG_CUNUMERIC { for (size_t segment = 0; segment < num_segments_l; ++segment) { assert(0 == thrust::reduce(exec_policy, segment_diff_2d.ptr(segment * num_sort_ranks), segment_diff_2d.ptr(segment * num_sort_ranks) + num_sort_ranks, 0)); } } #endif segment_diff_buffers.destroy(); // 2d data [segments][ranks] /* -2 2 1 1 -3 2 -1 -2 0 1 2 -1 1 0 (inclusive scan) neg --> receive from right pos --> send right 0 2 0 -1 -2 1 -1 (incl.scan right) neg --> receive from left pos --> send left edge case --> send more than whole line should not happen due to sample choice! */ // 2 (signed) arrays - left/right for every segment auto send_left = create_buffer<int64_t>(num_segments_l, Memory::GPU_FB_MEM); auto send_right = create_buffer<int64_t>(num_segments_l, Memory::GPU_FB_MEM); // compute data to send.... auto segment_diff_2d_scan = create_buffer<int64_t>(num_segments_l * num_sort_ranks, Memory::GPU_FB_MEM); thrust::device_ptr<int64_t> segment_diff_2d_ptr(segment_diff_2d.ptr(0)); thrust::device_ptr<int64_t> segment_diff_2d_scan_ptr(segment_diff_2d_scan.ptr(0)); thrust::inclusive_scan(exec_policy, segment_diff_2d_ptr, segment_diff_2d_ptr + num_segments_l * num_sort_ranks, segment_diff_2d_scan_ptr); CHECK_CUDA(cudaMemcpy2DAsync(send_right.ptr(0), sizeof(int64_t), segment_diff_2d_scan.ptr(0) + my_sort_rank, num_sort_ranks * sizeof(int64_t), sizeof(int64_t), num_segments_l, cudaMemcpyDeviceToDevice, stream)); thrust::reverse_iterator<thrust::device_vector<int64_t>::iterator> iter_in( segment_diff_2d_ptr + num_segments_l * num_sort_ranks); thrust::reverse_iterator<thrust::device_vector<int64_t>::iterator> iter_out( segment_diff_2d_scan_ptr + num_segments_l * num_sort_ranks); thrust::inclusive_scan( exec_policy, iter_in, iter_in + num_segments_l * num_sort_ranks, iter_out); CHECK_CUDA(cudaMemcpy2DAsync(send_left.ptr(0), sizeof(int64_t), segment_diff_2d_scan.ptr(0) + my_sort_rank, num_sort_ranks * sizeof(int64_t), sizeof(int64_t), num_segments_l, cudaMemcpyDeviceToDevice, stream)); segment_diff_2d.destroy(); segment_diff_2d_scan.destroy(); // package data to send size_t send_left_size = thrust::transform_reduce(exec_policy, send_left.ptr(0), send_left.ptr(0) + num_segments_l, positive_value(), 0, thrust::plus<int64_t>()); size_t recv_left_size = thrust::transform_reduce(exec_policy, send_left.ptr(0), send_left.ptr(0) + num_segments_l, negative_value(), 0, thrust::plus<int64_t>()); size_t send_right_size = thrust::transform_reduce(exec_policy, send_right.ptr(0), send_right.ptr(0) + num_segments_l, positive_value(), 0, thrust::plus<int64_t>()); size_t recv_right_size = thrust::transform_reduce(exec_policy, send_right.ptr(0), send_right.ptr(0) + num_segments_l, negative_value(), 0, thrust::plus<int64_t>()); SortPiece<VAL> send_left_data, recv_left_data, send_right_data, recv_right_data; send_left_data.size = send_left_size; recv_left_data.size = recv_left_size; send_right_data.size = send_right_size; recv_right_data.size = recv_right_size; if (argsort) { send_left_data.indices = create_buffer<int64_t>(send_left_size, Memory::GPU_FB_MEM); recv_left_data.indices = create_buffer<int64_t>(recv_left_size, Memory::GPU_FB_MEM); send_right_data.indices = create_buffer<int64_t>(send_right_size, Memory::GPU_FB_MEM); recv_right_data.indices = create_buffer<int64_t>(recv_right_size, Memory::GPU_FB_MEM); } else { send_left_data.values = create_buffer<VAL>(send_left_size, Memory::GPU_FB_MEM); recv_left_data.values = create_buffer<VAL>(recv_left_size, Memory::GPU_FB_MEM); send_right_data.values = create_buffer<VAL>(send_right_size, Memory::GPU_FB_MEM); recv_right_data.values = create_buffer<VAL>(recv_right_size, Memory::GPU_FB_MEM); } Buffer<int64_t> segment_diff_pos; { // need scan of segment_diff // need scan of (positive!) send_left, send_right segment_diff_pos = create_buffer<int64_t>(num_segments_l, Memory::GPU_FB_MEM); auto send_left_pos = create_buffer<int64_t>(num_segments_l, Memory::GPU_FB_MEM); auto send_right_pos = create_buffer<int64_t>(num_segments_l, Memory::GPU_FB_MEM); { thrust::device_ptr<int64_t> segment_diff_ptr(segment_diff.ptr(0)); thrust::device_ptr<int64_t> segment_diff_pos_ptr(segment_diff_pos.ptr(0)); thrust::device_ptr<int64_t> send_left_ptr(send_left.ptr(0)); thrust::device_ptr<int64_t> send_left_pos_ptr(send_left_pos.ptr(0)); thrust::device_ptr<int64_t> send_right_ptr(send_right.ptr(0)); thrust::device_ptr<int64_t> send_right_pos_ptr(send_right_pos.ptr(0)); thrust::exclusive_scan( exec_policy, segment_diff_ptr, segment_diff_ptr + num_segments_l, segment_diff_pos_ptr); thrust::exclusive_scan(exec_policy, send_left_ptr, send_left_ptr + num_segments_l, send_left_pos_ptr, 0, positive_plus()); thrust::exclusive_scan(exec_policy, send_right_ptr, send_right_ptr + num_segments_l, send_right_pos_ptr, 0, positive_plus()); } auto [grid_shape, block_shape] = generate_launchconfig_for_2d_copy(segment_size_l, num_segments_l); if (argsort) { copy_data_to_rebalance_buffers<<<grid_shape, block_shape, 0, stream>>>( segment_diff_pos, send_left, send_right, send_left_pos, send_right_pos, merge_buffer.indices, merge_buffer.size, send_left_data.indices, send_right_data.indices, num_segments_l, segment_size_l, my_rank, num_sort_ranks); } else { copy_data_to_rebalance_buffers<<<grid_shape, block_shape, 0, stream>>>( segment_diff_pos, send_left, send_right, send_left_pos, send_right_pos, merge_buffer.values, merge_buffer.size, send_left_data.values, send_right_data.values, num_segments_l, segment_size_l, my_rank, num_sort_ranks); } send_left_pos.destroy(); send_right_pos.destroy(); } assert(send_left_data.size == send_left_size); assert(send_right_data.size == send_right_size); // send/receive overlapping data if (send_left_size + recv_left_size + send_right_size + recv_right_size > 0) { if (argsort) { CHECK_NCCL(ncclGroupStart()); if (send_left_size > 0) { CHECK_NCCL(ncclSend(send_left_data.indices.ptr(0), send_left_data.size, ncclInt64, sort_ranks[my_sort_rank - 1], *comm, stream)); } if (recv_left_size > 0) { CHECK_NCCL(ncclRecv(recv_left_data.indices.ptr(0), recv_left_data.size, ncclInt64, sort_ranks[my_sort_rank - 1], *comm, stream)); } if (send_right_size > 0) { CHECK_NCCL(ncclSend(send_right_data.indices.ptr(0), send_right_data.size, ncclInt64, sort_ranks[my_sort_rank + 1], *comm, stream)); } if (recv_right_size > 0) { CHECK_NCCL(ncclRecv(recv_right_data.indices.ptr(0), recv_right_data.size, ncclInt64, sort_ranks[my_sort_rank + 1], *comm, stream)); } CHECK_NCCL(ncclGroupEnd()); } else { CHECK_NCCL(ncclGroupStart()); if (send_left_size > 0) { CHECK_NCCL(ncclSend(send_left_data.values.ptr(0), send_left_data.size * sizeof(VAL), ncclInt8, sort_ranks[my_sort_rank - 1], *comm, stream)); } if (recv_left_size > 0) { CHECK_NCCL(ncclRecv(recv_left_data.values.ptr(0), recv_left_data.size * sizeof(VAL), ncclInt8, sort_ranks[my_sort_rank - 1], *comm, stream)); } if (send_right_size > 0) { CHECK_NCCL(ncclSend(send_right_data.values.ptr(0), send_right_data.size * sizeof(VAL), ncclInt8, sort_ranks[my_sort_rank + 1], *comm, stream)); } if (recv_right_size > 0) { CHECK_NCCL(ncclRecv(recv_right_data.values.ptr(0), recv_right_data.size * sizeof(VAL), ncclInt8, sort_ranks[my_sort_rank + 1], *comm, stream)); } CHECK_NCCL(ncclGroupEnd()); } } if (argsort) { send_left_data.indices.destroy(); send_right_data.indices.destroy(); } else { send_left_data.values.destroy(); send_right_data.values.destroy(); } // merge data into target { // need scan of (negative!) send_left, send_right auto recv_left_pos = create_buffer<int64_t>(num_segments_l, Memory::GPU_FB_MEM); auto recv_right_pos = create_buffer<int64_t>(num_segments_l, Memory::GPU_FB_MEM); { thrust::device_ptr<int64_t> recv_left_ptr(send_left.ptr(0)); thrust::device_ptr<int64_t> recv_left_pos_ptr(recv_left_pos.ptr(0)); thrust::device_ptr<int64_t> recv_right_ptr(send_right.ptr(0)); thrust::device_ptr<int64_t> recv_right_pos_ptr(recv_right_pos.ptr(0)); thrust::exclusive_scan(exec_policy, recv_left_ptr, recv_left_ptr + num_segments_l, recv_left_pos_ptr, 0, negative_plus()); thrust::exclusive_scan(exec_policy, recv_right_ptr, recv_right_ptr + num_segments_l, recv_right_pos_ptr, 0, negative_plus()); } auto [grid_shape, block_shape] = generate_launchconfig_for_2d_copy(segment_size_l, num_segments_l); if (argsort) { merge_rebalanced_buffers<<<grid_shape, block_shape, 0, stream>>>(segment_diff_pos, send_left, send_right, recv_left_pos, recv_right_pos, merge_buffer.indices, merge_buffer.size, recv_left_data.indices, recv_right_data.indices, output_indices, num_segments_l, segment_size_l, my_rank, num_sort_ranks); } else { merge_rebalanced_buffers<<<grid_shape, block_shape, 0, stream>>>(segment_diff_pos, send_left, send_right, recv_left_pos, recv_right_pos, merge_buffer.values, merge_buffer.size, recv_left_data.values, recv_right_data.values, output_values, num_segments_l, segment_size_l, my_rank, num_sort_ranks); } segment_diff_pos.destroy(); recv_left_pos.destroy(); recv_right_pos.destroy(); } // remove segment_sizes, all buffers should be destroyed... segment_diff.destroy(); send_left.destroy(); send_right.destroy(); if (argsort) { merge_buffer.indices.destroy(); recv_left_data.indices.destroy(); recv_right_data.indices.destroy(); } else { merge_buffer.values.destroy(); recv_left_data.values.destroy(); recv_right_data.values.destroy(); } CHECK_CUDA_STREAM(stream); } } ///////////////////////////////////////////////////////////////////////////////////////////////// ///////////////////////////////////////////////////////////////////////////////////////////////// ///////////////////////////////////////////////////////////////////////////////////////////////// ///////////////////////////////////////////////////////////////////////////////////////////////// ///////////////////////////////////////////////////////////////////////////////////////////////// ///////////////////////////////////////////////////////////////////////////////////////////////// template <LegateTypeCode CODE> void sample_sort_nccl_nd(SortPiece<legate_type_of<CODE>> local_sorted, Array& output_array_unbound, // only for unbound usage when !rebalance void* output_ptr, /* global domain information */ size_t my_rank, // global NCCL rank size_t num_ranks, size_t segment_size_g, /* domain information in sort dimension */ size_t my_sort_rank, // local rank id in sort dimension size_t num_sort_ranks, // #ranks that share a sort dimension size_t* sort_ranks, // rank ids that share a sort dimension with us size_t segment_size_l, // (local) segment size /* other */ bool rebalance, bool argsort, cudaStream_t stream, ncclComm_t* comm) { using VAL = legate_type_of<CODE>; size_t volume = local_sorted.size; bool is_unbound_1d_storage = output_array_unbound.dim() == -1; ///////////////////////////////////////////////////////////////////////////////////////////////// /////////////// Part 0: detection of empty nodes ///////////////////////////////////////////////////////////////////////////////////////////////// // first of all we need to check for processes that don't want // to take part in the computation. This might lead to a reduction of // sort ranks. Note that if segment_size_l>0 && volume==0 means that we have // a full sort group being empty, this should not affect local sort rank size. { auto worker_count_d = create_buffer<int32_t>(1, Memory::GPU_FB_MEM); int worker_count = (segment_size_l > 0 ? 1 : 0); CHECK_CUDA(cudaMemcpyAsync( worker_count_d.ptr(0), &worker_count, sizeof(int32_t), cudaMemcpyHostToDevice, stream)); CHECK_NCCL(ncclAllReduce( worker_count_d.ptr(0), worker_count_d.ptr(0), 1, ncclInt32, ncclSum, *comm, stream)); CHECK_CUDA(cudaMemcpyAsync( &worker_count, worker_count_d.ptr(0), sizeof(int32_t), cudaMemcpyDeviceToHost, stream)); CHECK_CUDA(cudaStreamSynchronize(stream)); if (worker_count < num_ranks) { const size_t number_sort_groups = num_ranks / num_sort_ranks; num_sort_ranks = worker_count / number_sort_groups; } worker_count_d.destroy(); // early out if (volume == 0) { if (is_unbound_1d_storage) { // we need to return an empty buffer here if (argsort) { auto buffer = create_buffer<int64_t>(0, Memory::GPU_FB_MEM); output_array_unbound.return_data(buffer, Point<1>(0)); } else { auto buffer = create_buffer<VAL>(0, Memory::GPU_FB_MEM); output_array_unbound.return_data(buffer, Point<1>(0)); } } return; } } ///////////////////////////////////////////////////////////////////////////////////////////////// /////////////// Part 1: select and share samples accross sort domain ///////////////////////////////////////////////////////////////////////////////////////////////// // collect local samples - for now we take num_sort_ranks samples for every node/line // worst case this leads to imbalance of x2 size_t num_segments_l = volume / segment_size_l; size_t num_samples_per_segment_l = num_sort_ranks; size_t num_samples_l = num_samples_per_segment_l * num_segments_l; size_t num_samples_per_segment_g = num_samples_per_segment_l * num_sort_ranks; size_t num_samples_g = num_samples_per_segment_g * num_segments_l; auto samples = create_buffer<SegmentSample<VAL>>(num_samples_g, Memory::GPU_FB_MEM); size_t offset = num_samples_l * my_sort_rank; { const size_t num_blocks = (num_samples_l + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK; extract_samples_segment<<<num_blocks, THREADS_PER_BLOCK, 0, stream>>>( local_sorted.values.ptr(0), num_segments_l, samples.ptr(0), num_samples_per_segment_l, segment_size_l, offset, num_sort_ranks, my_sort_rank); CHECK_CUDA_STREAM(stream); } // AllGather does not work here as not all have the same amount! // This is all2all restricted to one sort row { // allocate receive buffer const size_t aligned_count = get_16b_aligned_count(num_samples_l, sizeof(SegmentSample<VAL>)); auto send_buffer = create_buffer<SegmentSample<VAL>>(aligned_count, Memory::GPU_FB_MEM); CHECK_CUDA(cudaMemcpyAsync(send_buffer.ptr(0), samples.ptr(offset), sizeof(SegmentSample<VAL>) * num_samples_l, cudaMemcpyDeviceToDevice, stream)); auto recv_buffer = create_buffer<SegmentSample<VAL>>(aligned_count * num_sort_ranks, Memory::GPU_FB_MEM); CHECK_NCCL(ncclGroupStart()); for (size_t r = 0; r < num_sort_ranks; r++) { if (r != my_sort_rank) { CHECK_NCCL(ncclSend(send_buffer.ptr(0), aligned_count * sizeof(SegmentSample<VAL>), ncclInt8, sort_ranks[r], *comm, stream)); CHECK_NCCL(ncclRecv(recv_buffer.ptr(aligned_count * r), aligned_count * sizeof(SegmentSample<VAL>), ncclInt8, sort_ranks[r], *comm, stream)); } } CHECK_NCCL(ncclGroupEnd()); // copy back for (size_t r = 0; r < num_sort_ranks; r++) { if (r != my_sort_rank) { CHECK_CUDA(cudaMemcpyAsync(samples.ptr(num_samples_l * r), recv_buffer.ptr(aligned_count * r), sizeof(SegmentSample<VAL>) * num_samples_l, cudaMemcpyDeviceToDevice, stream)); } } // destroy send_buffer.destroy(); recv_buffer.destroy(); CHECK_CUDA_STREAM(stream); } ///////////////////////////////////////////////////////////////////////////////////////////////// /////////////// Part 2: select splitters from samples and collect positions in local data ///////////////////////////////////////////////////////////////////////////////////////////////// // sort samples on device auto alloc = ThrustAllocator(Memory::GPU_FB_MEM); auto exec_policy = thrust::cuda::par(alloc).on(stream); thrust::stable_sort( exec_policy, samples.ptr(0), samples.ptr(0) + num_samples_g, SegmentSampleComparator<VAL>()); // check whether we have invalid samples (in case one participant did not have enough) SegmentSample<VAL> invalid_sample; invalid_sample.segment = 0; invalid_sample.rank = -1; auto lower_bound = thrust::lower_bound(exec_policy, samples.ptr(0), samples.ptr(0) + num_samples_per_segment_g, invalid_sample, SegmentSampleComparator<VAL>()); size_t num_usable_samples_per_segment = lower_bound - samples.ptr(0); // select splitters / positions based on samples (on device) // the indexing is split_positions[segments][positions] const size_t num_splitters = (num_sort_ranks - 1) * num_segments_l; auto split_positions = create_buffer<size_t>(num_splitters, Memory::GPU_FB_MEM); { const size_t num_blocks = (num_splitters + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK; extract_split_positions_segments<<<num_blocks, THREADS_PER_BLOCK, 0, stream>>>( local_sorted.values.ptr(0), segment_size_l, samples.ptr(0), num_segments_l, num_samples_per_segment_g, num_usable_samples_per_segment, split_positions.ptr(0), num_splitters, my_sort_rank); } // segment_blocks[r][segment]->position of data in segment for process r // perform blocksize wide scan on size_send[r][block*blocksize] within warp Buffer<size_t> segment_blocks = create_buffer<size_t>(num_segments_l * num_sort_ranks, Memory::GPU_FB_MEM); // initialize sizes to send const size_t num_segments_l_aligned = get_16b_aligned_count(num_segments_l + 1, sizeof(size_t)); Buffer<size_t> size_send = create_buffer<size_t>(num_segments_l_aligned * num_sort_ranks, Memory::GPU_FB_MEM); { const size_t num_send_parts = num_sort_ranks * num_segments_l; const size_t num_blocks = (num_send_parts + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK; compute_send_dimensions<<<num_blocks, THREADS_PER_BLOCK, 0, stream>>>(segment_size_l, size_send.ptr(0), num_segments_l, num_segments_l_aligned, split_positions.ptr(0), num_sort_ranks, num_send_parts); compute_scan_per_rank<<<num_sort_ranks, THREADS_PER_BLOCK, 0, stream>>>( segment_blocks.ptr(0), size_send.ptr(0), num_segments_l, num_segments_l_aligned); CHECK_CUDA_STREAM(stream); } // cleanup intermediate data structures samples.destroy(); split_positions.destroy(); ///////////////////////////////////////////////////////////////////////////////////////////////// /////////////// Part 3: communicate data in sort domain ///////////////////////////////////////////////////////////////////////////////////////////////// // all2all exchange send/receive sizes Buffer<size_t> size_recv = create_buffer<size_t>(num_segments_l_aligned * num_sort_ranks, Memory::GPU_FB_MEM); CHECK_NCCL(ncclGroupStart()); for (size_t r = 0; r < num_sort_ranks; r++) { CHECK_NCCL(ncclSend(size_send.ptr(r * num_segments_l_aligned), num_segments_l_aligned, ncclUint64, sort_ranks[r], *comm, stream)); CHECK_NCCL(ncclRecv(size_recv.ptr(r * num_segments_l_aligned), num_segments_l_aligned, ncclUint64, sort_ranks[r], *comm, stream)); } CHECK_NCCL(ncclGroupEnd()); // we need the amount of data to transfer on the host --> get it // FIXME auto kind = CuNumeric::has_numamem ? Memory::Kind::SOCKET_MEM : Memory::Kind::SYSTEM_MEM; Buffer<size_t> size_send_total = create_buffer<size_t>(num_sort_ranks, Memory::Z_COPY_MEM); Buffer<size_t> size_recv_total = create_buffer<size_t>(num_sort_ranks, Memory::Z_COPY_MEM); { CHECK_CUDA(cudaMemcpy2DAsync(size_send_total.ptr(0), 1 * sizeof(size_t), size_send.ptr(num_segments_l), num_segments_l_aligned * sizeof(size_t), sizeof(int64_t), num_sort_ranks, cudaMemcpyDeviceToHost, stream)); CHECK_CUDA(cudaMemcpy2DAsync(size_recv_total.ptr(0), 1 * sizeof(size_t), size_recv.ptr(num_segments_l), num_segments_l_aligned * sizeof(size_t), sizeof(int64_t), num_sort_ranks, cudaMemcpyDeviceToHost, stream)); // need to sync as we share values in between host/device CHECK_CUDA(cudaStreamSynchronize(stream)); } // copy values into aligned send buffer std::vector<Buffer<VAL>> val_send_buffers(num_sort_ranks); std::vector<Buffer<int64_t>> idc_send_buffers(num_sort_ranks); { for (size_t r = 0; r < num_sort_ranks; r++) { val_send_buffers[r] = create_buffer<VAL>(size_send_total[r], Memory::GPU_FB_MEM); if (argsort) { idc_send_buffers[r] = create_buffer<int64_t>(size_send_total[r], Memory::GPU_FB_MEM); } } { Buffer<VAL*> val_send_buffers_ptr = create_buffer<VAL*>(num_sort_ranks, Memory::Z_COPY_MEM); for (size_t r = 0; r < num_sort_ranks; r++) { val_send_buffers_ptr[r] = val_send_buffers[r].ptr(0); } auto [grid_shape, block_shape] = generate_launchconfig_for_2d_copy(segment_size_l, num_segments_l); copy_data_to_merge_buffers<<<grid_shape, block_shape, 0, stream>>>(segment_blocks, size_send, local_sorted.values, val_send_buffers_ptr, num_segments_l, num_segments_l_aligned, segment_size_l, my_rank, num_sort_ranks); if (argsort) { Buffer<int64_t*> idc_send_buffers_ptr = create_buffer<int64_t*>(num_sort_ranks, Memory::Z_COPY_MEM); for (size_t r = 0; r < num_sort_ranks; r++) { idc_send_buffers_ptr[r] = idc_send_buffers[r].ptr(0); } // need to sync as we share values in between host/device copy_data_to_merge_buffers<<<grid_shape, block_shape, 0, stream>>>(segment_blocks, size_send, local_sorted.indices, idc_send_buffers_ptr, num_segments_l, num_segments_l_aligned, segment_size_l, my_rank, num_sort_ranks); CHECK_CUDA(cudaStreamSynchronize(stream)); // needed before Z-copy destroy() idc_send_buffers_ptr.destroy(); } else { CHECK_CUDA(cudaStreamSynchronize(stream)); // needed before Z-copy destroy() } val_send_buffers_ptr.destroy(); CHECK_CUDA_STREAM(stream); } local_sorted.values.destroy(); if (argsort) local_sorted.indices.destroy(); segment_blocks.destroy(); } // allocate target buffers std::vector<SegmentMergePiece<VAL>> merge_buffers(num_sort_ranks); { for (size_t r = 0; r < num_sort_ranks; ++r) { auto size = size_recv_total[r]; merge_buffers[r].size = size; // initialize segment information if (num_segments_l > 1) { merge_buffers[r].segments = create_buffer<size_t>(size, Memory::GPU_FB_MEM); // 0 1 2 1 3 // counts per segment to receive // 0 1 3 4 7 // 0 1 2 3 4 5 6 // 1 1 0 1 1 0 0 // 1 2 2 3 4 4 4 // segment id for all received elements thrust::inclusive_scan(exec_policy, size_recv.ptr(r * num_segments_l_aligned), size_recv.ptr(r * num_segments_l_aligned) + num_segments_l + 1, size_recv.ptr(r * num_segments_l_aligned)); CHECK_CUDA( cudaMemsetAsync(merge_buffers[r].segments.ptr(0), 0, size * sizeof(size_t), stream)); const size_t num_blocks = (num_segments_l + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK; assert(sizeof(unsigned long long int) == sizeof(size_t)); // kernel needs to cast for atomicAdd... initialize_segment_start_positions<<<num_blocks, THREADS_PER_BLOCK, 0, stream>>>( size_recv.ptr(r * num_segments_l_aligned), num_segments_l - 1, merge_buffers[r].segments.ptr(0), merge_buffers[r].size); thrust::inclusive_scan(exec_policy, merge_buffers[r].segments.ptr(0), merge_buffers[r].segments.ptr(0) + size, merge_buffers[r].segments.ptr(0)); } merge_buffers[r].values = create_buffer<VAL>(size, Memory::GPU_FB_MEM); if (argsort) { merge_buffers[r].indices = create_buffer<int64_t>(size, Memory::GPU_FB_MEM); } else { merge_buffers[r].indices = create_buffer<int64_t>(0, Memory::GPU_FB_MEM); } } CHECK_CUDA_STREAM(stream); } // communicate all2all (in sort dimension) CHECK_NCCL(ncclGroupStart()); for (size_t r = 0; r < num_sort_ranks; r++) { CHECK_NCCL(ncclSend(val_send_buffers[r].ptr(0), size_send_total[r] * sizeof(VAL), ncclInt8, sort_ranks[r], *comm, stream)); CHECK_NCCL(ncclRecv(merge_buffers[r].values.ptr(0), merge_buffers[r].size * sizeof(VAL), ncclInt8, sort_ranks[r], *comm, stream)); } CHECK_NCCL(ncclGroupEnd()); if (argsort) { CHECK_NCCL(ncclGroupStart()); for (size_t r = 0; r < num_sort_ranks; r++) { CHECK_NCCL(ncclSend( idc_send_buffers[r].ptr(0), size_send_total[r], ncclInt64, sort_ranks[r], *comm, stream)); CHECK_NCCL(ncclRecv(merge_buffers[r].indices.ptr(0), merge_buffers[r].size, ncclInt64, sort_ranks[r], *comm, stream)); } CHECK_NCCL(ncclGroupEnd()); } // cleanup remaining buffers size_send.destroy(); size_recv.destroy(); size_send_total.destroy(); size_recv_total.destroy(); for (size_t r = 0; r < num_sort_ranks; r++) { val_send_buffers[r].destroy(); if (argsort) idc_send_buffers[r].destroy(); } CHECK_CUDA_STREAM(stream); ///////////////////////////////////////////////////////////////////////////////////////////////// /////////////// Part 4: merge data ///////////////////////////////////////////////////////////////////////////////////////////////// // now merge sort all into the result buffer SegmentMergePiece<VAL> merged_result = merge_all_buffers<CODE>(merge_buffers, num_segments_l > 1, argsort, alloc, stream); ///////////////////////////////////////////////////////////////////////////////////////////////// /////////////// Part 5: re-balance data to match input/output dimensions ///////////////////////////////////////////////////////////////////////////////////////////////// if (rebalance) { assert(!is_unbound_1d_storage); rebalance_data(merged_result, output_ptr, my_rank, my_sort_rank, num_sort_ranks, sort_ranks, segment_size_l, num_segments_l, argsort, alloc, stream, comm); } else { assert(is_unbound_1d_storage); merged_result.segments.destroy(); if (argsort) { merged_result.values.destroy(); output_array_unbound.return_data(merged_result.indices, Point<1>(merged_result.size)); } else { output_array_unbound.return_data(merged_result.values, Point<1>(merged_result.size)); } } } ///////////////////////////////////////////////////////////////////////////////////////////////// ///////////////////////////////////////////////////////////////////////////////////////////////// ///////////////////////////////////////////////////////////////////////////////////////////////// ///////////////////////////////////////////////////////////////////////////////////////////////// ///////////////////////////////////////////////////////////////////////////////////////////////// ///////////////////////////////////////////////////////////////////////////////////////////////// ///////////////////////////////////////////////////////////////////////////////////////////////// ///////////////////////////////////////////////////////////////////////////////////////////////// ///////////////////////////////////////////////////////////////////////////////////////////////// ///////////////////////////////////////////////////////////////////////////////////////////////// template <LegateTypeCode CODE, int32_t DIM> struct SortImplBody<VariantKind::GPU, CODE, DIM> { using VAL = legate_type_of<CODE>; void operator()(const Array& input_array, Array& output_array, const Pitches<DIM - 1>& pitches, const Rect<DIM>& rect, const size_t volume, const size_t segment_size_l, const size_t segment_size_g, const bool argsort, const bool stable, const bool is_index_space, const size_t local_rank, const size_t num_ranks, const size_t num_sort_ranks, const std::vector<comm::Communicator>& comms) { auto input = input_array.read_accessor<VAL, DIM>(rect); // we allow empty domains for distributed sorting assert(rect.empty() || input.accessor.is_dense_row_major(rect)); auto stream = get_cached_stream(); bool is_unbound_1d_storage = output_array.dim() == -1; bool need_distributed_sort = segment_size_l != segment_size_g || is_unbound_1d_storage; bool rebalance = !is_unbound_1d_storage; assert(DIM == 1 || !is_unbound_1d_storage); // initialize sort pointers SortPiece<VAL> local_sorted; int64_t* indices_ptr = nullptr; VAL* values_ptr = nullptr; if (argsort) { // make a buffer for input auto input_copy = create_buffer<VAL>(volume, Legion::Memory::Kind::GPU_FB_MEM); local_sorted.values = input_copy; values_ptr = input_copy.ptr(0); // initialize indices if (need_distributed_sort) { auto indices_buffer = create_buffer<int64_t>(volume, Legion::Memory::Kind::GPU_FB_MEM); indices_ptr = indices_buffer.ptr(0); local_sorted.indices = indices_buffer; local_sorted.size = volume; } else { AccessorWO<int64_t, DIM> output = output_array.write_accessor<int64_t, DIM>(rect); assert(rect.empty() || output.accessor.is_dense_row_major(rect)); indices_ptr = output.ptr(rect.lo); } size_t offset = rect.lo[DIM - 1]; if (volume > 0) { if (DIM == 1) { thrust::sequence(thrust::cuda::par.on(stream), indices_ptr, indices_ptr + volume, offset); } else { thrust::transform(thrust::cuda::par.on(stream), thrust::make_counting_iterator<int64_t>(0), thrust::make_counting_iterator<int64_t>(volume), thrust::make_constant_iterator<int64_t>(segment_size_l), indices_ptr, modulusWithOffset(offset)); } } } else { // initialize output if (need_distributed_sort) { auto input_copy = create_buffer<VAL>(volume, Legion::Memory::Kind::GPU_FB_MEM); values_ptr = input_copy.ptr(0); local_sorted.values = input_copy; local_sorted.indices = create_buffer<int64_t>(0, Legion::Memory::Kind::GPU_FB_MEM); local_sorted.size = volume; } else { AccessorWO<VAL, DIM> output = output_array.write_accessor<VAL, DIM>(rect); assert(rect.empty() || output.accessor.is_dense_row_major(rect)); values_ptr = output.ptr(rect.lo); } } CHECK_CUDA_STREAM(stream); if (volume > 0) { // sort data (locally) local_sort<CODE>(input.ptr(rect.lo), values_ptr, indices_ptr, indices_ptr, volume, segment_size_l, stable, stream); } CHECK_CUDA_STREAM(stream); if (need_distributed_sort) { if (is_index_space) { assert(is_index_space || is_unbound_1d_storage); std::vector<size_t> sort_ranks(num_sort_ranks); size_t rank_group = local_rank / num_sort_ranks; for (int r = 0; r < num_sort_ranks; ++r) sort_ranks[r] = rank_group * num_sort_ranks + r; void* output_ptr = nullptr; // in case the storage *is NOT* unbound -- we provide a target pointer // in case the storage *is* unbound -- the result will be appended to output_array if (volume > 0 && !is_unbound_1d_storage) { if (argsort) { auto output = output_array.write_accessor<int64_t, DIM>(rect); assert(output.accessor.is_dense_row_major(rect)); output_ptr = static_cast<void*>(output.ptr(rect.lo)); } else { auto output = output_array.write_accessor<VAL, DIM>(rect); assert(output.accessor.is_dense_row_major(rect)); output_ptr = static_cast<void*>(output.ptr(rect.lo)); } } sample_sort_nccl_nd<CODE>(local_sorted, output_array, output_ptr, local_rank, num_ranks, segment_size_g, local_rank % num_sort_ranks, num_sort_ranks, sort_ranks.data(), segment_size_l, rebalance, argsort, stream, comms[0].get<ncclComm_t*>()); } else { // edge case where we have an unbound store but only 1 GPU was assigned with the task if (argsort) { local_sorted.values.destroy(); output_array.return_data(local_sorted.indices, Point<1>(local_sorted.size)); } else { output_array.return_data(local_sorted.values, Point<1>(local_sorted.size)); } } } else if (argsort) { // cleanup for non distributed argsort local_sorted.values.destroy(); } CHECK_CUDA_STREAM(stream); } }; /*static*/ void SortTask::gpu_variant(TaskContext& context) { sort_template<VariantKind::GPU>(context); } } // namespace cunumeric
the_stack
#if BF_CUDA_ENABLED #include "transpose_gpu_kernel.cuh" #include "cuda.hpp" #else typedef int cudaStream_t; // WAR #endif #include <cstdio> #include <algorithm> #include <limits> #include <sstream> template<int N> struct aligned_type { typedef char type; }; template<> struct aligned_type< 2> { typedef short type; }; template<> struct aligned_type< 4> { typedef int type; }; template<> struct aligned_type< 8> { typedef int2 type; }; template<> struct aligned_type<16> { typedef int4 type; }; //inline int find_odim(int const* output_order, int ndim, int dim) { // return std::find(output_order, output_order+ndim, dim) - output_order; //} #define FIND_ODIM_(dim) \ (int(std::find(output_order, output_order+ndim, (dim)) - output_order)) namespace typed { namespace aligned_in { namespace aligned_in_out { template<int ALIGNMENT_IN, int ALIGNMENT_OUT, typename T> BFstatus transpose(int ndim, long const* sizes, // elements int const* output_order, T const* in, long const* in_strides, // bytes T * out, long const* out_strides, // bytes cudaStream_t stream) { enum { ELEMENT_SIZE = sizeof(T) }; // TODO: This is currently all tuned for a GTX Titan (sm_35) enum { TILE_DIM = 32, //BLOCK_ROWS = 8 //BLOCK_ROWS = (ALIGNMENT_IN >= 8 && ALIGNMENT_IN % 2 == 0) ? 16 : 8 BLOCK_ROWS = (ELEMENT_SIZE >= 8 && ELEMENT_SIZE % 2 == 0) ? 16 : 8, CONDITIONAL_WRITE = (ELEMENT_SIZE != 1 && ELEMENT_SIZE != 2) }; //// Note: These are all relative to the input int ifastdim = ndim-1; int islowdim = output_order[ndim-1]; int oslowdim = FIND_ODIM_(ifastdim); //int ofastdim = islowdim; //int oslowdim = ndim-1;//find_odim(output_order, ndim, ndim-1); //std::find(output_order, output_order+ndim, ndim-1)-output_order; //std::printf("****ifast,islow,ofast,oslow: %i %i %i %i\n", // ifastdim, islowdim, // ofastdim, oslowdim); if( ifastdim == islowdim ) {//ofastdim ) { return BF_STATUS_UNSUPPORTED; } long width = sizes[ifastdim]; long height = sizes[islowdim];//ofastdim]; long in_stride = in_strides[islowdim]; long out_stride = out_strides[oslowdim];//oslowdim)]; // TODO int ndimz = std::max(ndim-2, int(0)); int shapez[MAX_NDIM]; int istridez[MAX_NDIM]; int ostridez[MAX_NDIM]; long sizez = 1; int dd = 0; for( int d=0; d<(int)ndim-1; ++d ) { if( d != islowdim ) { shapez[dd] = sizes[d]; sizez *= sizes[d]; istridez[dd] = in_strides[d]; ostridez[dd] = out_strides[FIND_ODIM_(d)]; // TODO: Check this //std::printf("shapez, sizez, istridez, ostridez = %i %lu %i %i\n", // shapez[dd], sizez, istridez[dd], ostridez[dd]); ++dd; } } int cumushapez[MAX_NDIM]; cumushapez[ndimz-1] = 1; for( int d=ndimz-2; d>=0; --d ) { cumushapez[d] = cumushapez[d+1]*shapez[d+1]; } for( int d=MAX_NDIM-1; d>ndimz-2; --d ) { // WAR to avoid uninitialized values going through int_fastdiv // via SmallArray, which triggers errors when run through valgrind. cumushapez[d] = 1; } dim3 grid, block; block.x = TILE_DIM; block.y = BLOCK_ROWS; block.z = 1; grid.x = std::min(div_up(width, (long)TILE_DIM), (long)65535); grid.y = std::min(div_up(height, (long)TILE_DIM), (long)65535); // Note: inner_idepth*outer_idepth == inner_odepth*outer_odepth //grid.z = std::min(inner_idepth*outer_idepth, (long)65535); grid.z = std::min(sizez, (long)65535); //std::printf("ALIGN IN: %i %lu\n", ALIGNMENT_IN, sizeof(typename aligned_type<ALIGNMENT_IN>::type)); //std::printf("ALIGN ouT: %i %lu\n", ALIGNMENT_OUT, sizeof(typename aligned_type<ALIGNMENT_OUT>::type)); bool can_use_int = (sizes[0]*in_strides[0] < (long)std::numeric_limits<int>::max() && sizes[0]*out_strides[0] < (long)std::numeric_limits<int>::max()); #if BF_CUDA_ENABLED if( ELEMENT_SIZE == 6 || ELEMENT_SIZE == 8 || ELEMENT_SIZE == 16 ) { // TODO: Doing this here might be a bad idea cudaDeviceSetSharedMemConfig(cudaSharedMemBankSizeEightByte); } else { cudaDeviceSetSharedMemConfig(cudaSharedMemBankSizeFourByte); } if( can_use_int ) { kernel::transpose <TILE_DIM,BLOCK_ROWS,CONDITIONAL_WRITE, typename aligned_type<ALIGNMENT_IN>::type, typename aligned_type<ALIGNMENT_OUT>::type, T, int> <<<grid,block,0,stream>>>(width, height, in, in_stride, out, out_stride, sizez, ndimz, cumushapez, istridez, ostridez); /* inner_idepth, inner_idepth_log2, inner_odepth, inner_odepth_log2, outer_idepth, outer_odepth, inner_ipitch, inner_opitch, outer_ipitch, outer_opitch); */ //inner_depth_istride, //inner_depth_ostride, //outer_depth_istride, //outer_depth_ostride); } else { kernel::transpose <TILE_DIM,BLOCK_ROWS,CONDITIONAL_WRITE, typename aligned_type<ALIGNMENT_IN>::type, typename aligned_type<ALIGNMENT_OUT>::type, T, long> <<<grid,block,0,stream>>>(width, height, in, in_stride, out, out_stride, sizez, ndimz, cumushapez, istridez, ostridez); /* inner_idepth, inner_idepth_log2, inner_odepth, inner_odepth_log2, outer_idepth, outer_odepth, inner_ipitch, inner_opitch, outer_ipitch, outer_opitch); */ //inner_depth, //inner_depth_log2, //outer_depth, //inner_depth_istride, //inner_depth_ostride, //outer_depth_istride, //outer_depth_ostride); } cudaError_t error = cudaGetLastError(); if( error != cudaSuccess ) { std::printf("CUDA ERROR: %s\n", cudaGetErrorString(error)); } BF_ASSERT(error == cudaSuccess, BF_STATUS_INTERNAL_ERROR); #endif /* // TODO: Implement CPU version too #pragma omp parallel for collapse(3) for( int blockIdx_z=0; blockIdx_z<gridDim.z; ++blockIdx_z ) { for( int blockIdx_y=0; blockIdx_y<gridDim.y; ++blockIdx_y ) { for( int blockIdx_x=0; blockIdx_x<gridDim.x; ++blockIdx_x ) { for( int threadIdx_z=0; threadIdx_z<blockDim.z; ++threadIdx_z ) { for( int threadIdx_y=0; threadIdx_y<blockDim.y; ++threadIdx_y ) { for( int threadIdx_x=0; threadIdx_x<blockDim.x; ++threadIdx_x ) { transpose<...>(...); } } } } } } */ return BF_STATUS_SUCCESS; } } // namespace aligned_in_out template<int ALIGNMENT_IN, typename T> BFstatus transpose(int ndim, long const* sizes, // elements int const* output_order, T const* in, long const* in_strides, // bytes T * out, long const* out_strides, // bytes cudaStream_t stream) { unsigned long out_alignment = (unsigned long)out; for( int d=0; d<ndim; ++d ) { out_alignment = gcd(out_alignment, (unsigned long)out_strides[d]); } switch( out_alignment ) { case sizeof(T): return aligned_in_out::transpose<ALIGNMENT_IN,sizeof(T)>(ndim,sizes,output_order,in,in_strides,out,out_strides,stream); //case 16: return aligned_in_out::transpose<ALIGNMENT_IN,16>(ndim,sizes,output_order,in,in_strides,out,out_strides,stream); //case 8: return aligned_in_out::transpose<ALIGNMENT_IN, 8>(ndim,sizes,output_order,in,in_strides,out,out_strides,stream); //case 12: //case 4: return aligned_in_out::transpose<ALIGNMENT_IN, 4>(ndim,sizes,output_order,in,in_strides,out,out_strides,stream); //case 14: //ncase 10: //case 6: //case 2: return aligned_in_out::transpose<ALIGNMENT_IN, 2>(ndim,sizes,output_order,in,in_strides,out,out_strides,stream); default: return aligned_in_out::transpose<ALIGNMENT_IN, 1>(ndim,sizes,output_order,in,in_strides,out,out_strides,stream); } } } // namespace aligned_in template<typename T> BFstatus transpose(int ndim, long const* sizes, // elements int const* output_order, T const* in, long const* in_strides, // bytes T * out, long const* out_strides, // bytes cudaStream_t stream) { BF_TRACE_STREAM(stream); unsigned long in_alignment = (unsigned long)in; for( int d=0; d<ndim; ++d ) { in_alignment = gcd(in_alignment, (unsigned long)in_strides[d]); } switch( in_alignment ) { case sizeof(T): return aligned_in::transpose<sizeof(T)>(ndim,sizes,output_order,in,in_strides,out,out_strides,stream); //case 16: return aligned_in::transpose<16>(ndim,sizes,output_order,in,in_strides,out,out_strides,stream); //case 8: return aligned_in::transpose< 8>(ndim,sizes,output_order,in,in_strides,out,out_strides,stream); //case 12: //case 4: return aligned_in::transpose< 4>(ndim,sizes,output_order,in,in_strides,out,out_strides,stream); //case 14: //case 10: //case 6: //case 2: return aligned_in::transpose< 2>(ndim,sizes,output_order,in,in_strides,out,out_strides,stream); default: return aligned_in::transpose< 1>(ndim,sizes,output_order,in,in_strides,out,out_strides,stream); } } } // namespace typed // This is for when the fastest-changing dim is not transposed BFstatus transpose_simple(BFarray const* in, BFarray const* out, int const* axes) { BF_ASSERT(BF_MAX_DIMS <= 16, BF_STATUS_INTERNAL_ERROR); // Minor HACK to avoid using stringstream (which was inexplicably // segfaulting whenever I used it :|). static const char* hex_digits = "0123456789ABCDEF"; int ndim = in->ndim; int axes_inverted[BF_MAX_DIMS]; invert_permutation(ndim, axes, axes_inverted); axes = axes_inverted; std::string func_str; func_str += "out = in(i"; func_str += hex_digits[axes[0]]; for( int d=1; d<ndim; ++d ) { func_str += ", i"; func_str += hex_digits[axes[d]]; } func_str += ")"; // Minor HACK to avoid heap allocations char const* axis_names[] = { "i0", "i1", "i2", "i3", "i4", "i5", "i6", "i7", "i8", "i9", "iA", "iB", "iC", "iD", "iE", "iF" }; BFarray in_mutable, out_mutable; ::memcpy( &in_mutable, in, sizeof(BFarray)); ::memcpy(&out_mutable, out, sizeof(BFarray)); in_mutable.immutable = true; in_mutable.dtype = same_sized_storage_dtype(in_mutable.dtype); out_mutable.dtype = same_sized_storage_dtype(out_mutable.dtype); in = &in_mutable; out = &out_mutable; int narg = 2; BFarray const* args[] = {in, out}; char const* arg_names[] = {"in", "out"}; char const* func = func_str.c_str(); char const* extra_code = 0; return bfMap(ndim, out->shape, axis_names, narg, args, arg_names, "transpose_simple", func, extra_code, 0, 0); } // This is for when the fastest-changing input dim is small and should // be processed whole by each thread using vector loads. BFstatus transpose_vector_read(BFarray const* in, BFarray const* out, int const* axes) { BF_ASSERT(BF_MAX_DIMS <= 16, BF_STATUS_INTERNAL_ERROR); // Minor HACK to avoid using stringstream (which was inexplicably // segfaulting whenever I used it :|). static const char* hex_digits = "0123456789ABCDEF"; int ndim = in->ndim; BF_ASSERT(in->shape[ndim-1] <= 16, BF_STATUS_INTERNAL_ERROR); int K = in->shape[ndim-1]; int axes_inverted[BF_MAX_DIMS]; invert_permutation(ndim, axes, axes_inverted); axes = axes_inverted; int odim = axes[ndim-1]; std::string in_inds_str = "i"; in_inds_str += hex_digits[axes[0]]; std::string out_inds_str = (odim == 0) ? "k" : "i0"; for( int d=1; d<ndim; ++d ) { if( d < ndim-1 ) { in_inds_str += ", i"; in_inds_str += hex_digits[axes[d]]; } out_inds_str += ", "; if( d == odim ) { out_inds_str += "k"; } else { out_inds_str += "i"; out_inds_str += hex_digits[d]; } } std::string func_str; func_str += "enum { K = " + std::to_string(K) + " };\n"; func_str += "in_type ivals = in(" + in_inds_str + ");\n" "#pragma unroll\n" "for( int k=0; k<K; ++k ) {\n" " out(" + out_inds_str + ") = ivals[k];\n" "}\n"; // Minor HACK to avoid heap allocations char const* axis_names[] = { "i0", "i1", "i2", "i3", "i4", "i5", "i6", "i7", "i8", "i9", "iA", "iB", "iC", "iD", "iE", "iF" }; BFarray in_mutable, out_mutable; ::memcpy( &in_mutable, in, sizeof(BFarray)); ::memcpy(&out_mutable, out, sizeof(BFarray)); in_mutable.immutable = true; in_mutable.dtype = same_sized_storage_dtype(in_mutable.dtype); out_mutable.dtype = same_sized_storage_dtype(out_mutable.dtype); merge_last_dim_into_dtype(&in_mutable, &in_mutable); in = &in_mutable; out = &out_mutable; int narg = 2; BFarray const* args[] = {in, out}; char const* arg_names[] = {"in", "out"}; char const* func = func_str.c_str(); char const* extra_code = 0; long shape[BF_MAX_DIMS]; ::memcpy(shape, out->shape, ndim*sizeof(long)); shape[odim] = 1; // This dim is processed sequentially by each thread return bfMap(ndim, shape, axis_names, narg, args, arg_names, "transpose_vector_read", func, extra_code, 0, 0); } // This is for when the fastest-changing output dim is small and should // be processed whole by each thread using vector stores. BFstatus transpose_vector_write(BFarray const* in, BFarray const* out, int const* axes) { BF_ASSERT(BF_MAX_DIMS <= 16, BF_STATUS_INTERNAL_ERROR); // Minor HACK to avoid using stringstream (which was inexplicably // segfaulting whenever I used it :|). static const char* hex_digits = "0123456789ABCDEF"; int ndim = in->ndim; BF_ASSERT(out->shape[ndim-1] <= 16, BF_STATUS_INTERNAL_ERROR); int K = out->shape[ndim-1]; int axes_inverted[BF_MAX_DIMS]; invert_permutation(ndim, axes, axes_inverted); axes = axes_inverted; int idim = ndim - 1; std::string in_inds_str; if( idim == axes[0] ) { in_inds_str = "k"; } else { in_inds_str = "i"; in_inds_str += hex_digits[axes[0]]; } std::string out_inds_str = "i0"; for( int d=1; d<ndim; ++d ) { in_inds_str += ", "; if( axes[d] == idim ) { in_inds_str += "k"; } else { in_inds_str += "i"; in_inds_str += hex_digits[axes[d]]; } if( d < ndim-1 ) { out_inds_str += ", i"; out_inds_str += hex_digits[d]; } } std::string func_str; func_str += "enum { K = " + std::to_string(K) + " };\n"; func_str += "out_type ovals;\n" "#pragma unroll\n" "for( int k=0; k<K; ++k ) {\n" " ovals[k] = in(" + in_inds_str + ");\n" "}\n" "out(" + out_inds_str + ") = ovals;\n"; // Minor HACK to avoid heap allocations char const* axis_names[] = { "i0", "i1", "i2", "i3", "i4", "i5", "i6", "i7", "i8", "i9", "iA", "iB", "iC", "iD", "iE", "iF" }; long shape[BF_MAX_DIMS]; ::memcpy(shape, out->shape, ndim*sizeof(long)); shape[idim] = 1; // This dim is processed sequentially by each thread BFarray in_mutable, out_mutable; ::memcpy( &in_mutable, in, sizeof(BFarray)); ::memcpy(&out_mutable, out, sizeof(BFarray)); in_mutable.immutable = true; in_mutable.dtype = same_sized_storage_dtype(in_mutable.dtype); out_mutable.dtype = same_sized_storage_dtype(out_mutable.dtype); merge_last_dim_into_dtype(&out_mutable, &out_mutable); in = &in_mutable; out = &out_mutable; int narg = 2; BFarray const* args[] = {in, out}; char const* arg_names[] = {"in", "out"}; char const* func = func_str.c_str(); char const* extra_code = 0; return bfMap(ndim, shape, axis_names, narg, args, arg_names, "transpose_vector_write", func, extra_code, 0, 0); } BFstatus bfTranspose(BFarray const* in, BFarray const* out, int const* axes) { BF_TRACE(); BF_ASSERT(in, BF_STATUS_INVALID_POINTER); BF_ASSERT(out, BF_STATUS_INVALID_POINTER); BF_ASSERT(axes, BF_STATUS_INVALID_POINTER); BF_ASSERT(in->ndim >= 2, BF_STATUS_INVALID_SHAPE); BF_ASSERT(out->ndim == in->ndim, BF_STATUS_INVALID_SHAPE); BF_ASSERT(space_accessible_from(in->space, BF_SPACE_CUDA), BF_STATUS_UNSUPPORTED_SPACE); BF_ASSERT(in->dtype == out->dtype, BF_STATUS_INVALID_DTYPE); int element_size = BF_DTYPE_NBYTE(in->dtype); int ndim = in->ndim; // Handle negative axis numbers int axes_actual[BF_MAX_DIMS]; for( int d=0; d<ndim; ++d ) { int x = axes[d]; axes_actual[d] = x < 0 ? ndim + x : x; BF_ASSERT(out->shape[d] == in->shape[axes_actual[d]], BF_STATUS_INVALID_SHAPE); } // Special cases to be handled with different kernels int ifastdim = ndim-1; int ofastdim = axes_actual[ndim-1]; if( ifastdim == ofastdim ) { return transpose_simple(in, out, axes_actual); } else if( in->shape[ofastdim] <= 16 ) { // TODO: Tune this heuristic return transpose_vector_write(in, out, axes_actual); } else if( in->shape[ifastdim] <= 16 ) { // TODO: Tune this heuristic return transpose_vector_read(in, out, axes_actual); } switch( element_size ) { #define DEFINE_TYPE_CASE(N) \ case N: return typed::transpose(ndim, \ in->shape, \ axes_actual, \ (type_of_size<N>*)in->data, \ in->strides, \ (type_of_size<N>*)out->data, \ out->strides, \ g_cuda_stream); DEFINE_TYPE_CASE( 1); DEFINE_TYPE_CASE( 2); DEFINE_TYPE_CASE( 3); DEFINE_TYPE_CASE( 4); DEFINE_TYPE_CASE( 5); DEFINE_TYPE_CASE( 6); DEFINE_TYPE_CASE( 7); DEFINE_TYPE_CASE( 8); DEFINE_TYPE_CASE( 9); DEFINE_TYPE_CASE(10); DEFINE_TYPE_CASE(11); DEFINE_TYPE_CASE(12); DEFINE_TYPE_CASE(13); DEFINE_TYPE_CASE(14); DEFINE_TYPE_CASE(15); DEFINE_TYPE_CASE(16); #undef DEFINE_TYPE_CASE default: BF_FAIL("Supported bfTranspose element size", BF_STATUS_UNSUPPORTED_DTYPE); } }
the_stack
#include "ew_op_gpu.h" #include <stdio.h> #include <type_traits> // # kernel 1 // new_vr = decay * vr + (1 - decay) * np.mean(grad**2 + eps1, axis=1, keepdims=True) // tf.assign(vr, new_vr) // ltm = np.mean(new_vr, keepdims=True) template <typename T, typename V> __global__ void adafactor_row_variance( float* RV, float* RV_MEAN, const T* __restrict__ Grad, const float* __restrict__ Norm, float grad_scale, float decay, float epsilon, uint K, float rcpC, float rcpK, float saturate, uint zero_infs, uint zero_nans, uint use_norm) { float norm_scale = use_norm ? __ldg(Norm) : 1.0f; // skip all optimization if the global norm is bad. if (norm_scale != 0.0f) { uint tid = threadIdx.x; uint c = blockIdx.x; V var_sum; ew_zero(var_sum); #pragma unroll 1 for (uint k = tid, offset = c*K + tid; k < K; k += blockDim.x, offset += blockDim.x) { V grad = load(Grad + offset); if (zero_infs) grad = ew_zero_inf(grad); if (zero_nans) grad = ew_zero_nan(grad); if (saturate != 0.0f) grad = ew_maximum(ew_minimum(grad, saturate), -saturate); grad = ew_mul(grad, grad_scale * norm_scale); var_sum = ew_add(var_sum, ew_add(ew_sqr(grad), epsilon)); } float row_var = ew_sum(var_sum); // reduce within warp for (int i = 16; i > 0; i >>= 1) row_var += shfl_xor(row_var, i); // if using more than 1 warp, further reduced with shared memory if (blockDim.x > 32) { __shared__ float Share[32]; // first thread of each warp store to shared if ((tid & 31) == 0) Share[tid/32] = row_var; __syncthreads(); if (tid < 32) { // first warp loads all prior reductions row_var = Share[tid]; // reduce within this first warp #pragma unroll 1 for (int i = blockDim.x/64; i > 0; i >>= 1) row_var += shfl_xor(row_var, i); } } if (tid == 0) { row_var *= rcpK; RV += c; float old_rv = __ldg((const float*)RV); row_var = decay * old_rv + (1.0f - decay) * row_var; __stg(RV, row_var); atomicRed(RV_MEAN, row_var * rcpC); } } } // # kernel 2 // new_vc = decay * vc + (1 - decay) * np.mean(grad**2 + eps1, axis=0, keepdims=True) // tf.assign(vc, new_vc) template <typename T, typename V, uint THREADS> __global__ void __launch_bounds__(THREADS) adafactor_col_variance( V* CV, const T* __restrict__ Grad, const float* __restrict__ Norm, float grad_scale, float decay, float epsilon, uint C, uint K, float rcpC, float saturate, uint zero_infs, uint zero_nans, uint use_norm) { float norm_scale = use_norm ? __ldg(Norm) : 1.0f; // skip all optimization if the global norm is bad. if (norm_scale != 0.0f) { uint tid = threadIdx.x; uint k = blockIdx.x*32 + (tid & 31); uint c = tid / 32; uint ck = c*K + k; V var_sum; ew_zero(var_sum); if (k < K) { #pragma unroll 1 while (c < C) { V grad = load(Grad + ck); if (zero_infs) grad = ew_zero_inf(grad); if (zero_nans) grad = ew_zero_nan(grad); if (saturate != 0.0f) grad = ew_maximum(ew_minimum(grad, saturate), -saturate); grad = ew_mul(grad, grad_scale * norm_scale); var_sum = ew_add(var_sum, ew_add(ew_sqr(grad), epsilon)); ck += K*THREADS/32; c += THREADS/32; } } if (THREADS > 32) { __shared__ V Share[THREADS]; if (tid >= 64) Share[tid] = var_sum; __syncthreads(); if (tid < 64) { for (uint i = 1; i < THREADS/64; i++) var_sum = ew_add(var_sum, Share[tid + i*64]); Share[tid] = var_sum; } __syncthreads(); if (tid < 32) var_sum = ew_add(var_sum, Share[tid + 32]); } if (tid < 32 && k < K) { CV += k; V col_var = ew_mul(var_sum, rcpC); V old_cv = __ldg((const V*)CV); col_var = ew_add(ew_mul(old_cv, decay), ew_mul(col_var, 1.0f - decay)); __stg(CV, col_var); } } } // # kernel 3 // x = grad * np.rsqrt(new_vr / ltm) * np.rsqrt(new_vc) // rms_x = np.mean(x**2, keepdims=True) template <typename T, typename V> __global__ void adafactor_normalize_2d( V* X, float* RMS_X, const T* __restrict__ Grad, const float* __restrict__ Norm, const float* __restrict__ RV, const V* __restrict__ CV, const float* __restrict__ RV_MEAN, float grad_scale, uint K, float rcpCK, float saturate, uint zero_infs, uint zero_nans, uint use_norm) { float norm_scale = use_norm ? __ldg(Norm) : 1.0f; // skip all optimization if the global norm is bad. if (norm_scale != 0.0f) { uint tid = threadIdx.x; uint c = blockIdx.x; float rv = rsqrtf(RV[c] / *RV_MEAN); V rms_sum; ew_zero(rms_sum); #pragma unroll 1 for (uint k = tid, offset = c*K + tid; k < K; k += blockDim.x, offset += blockDim.x) { V grad = load(Grad + offset); V cv = ew_rsqrt(CV[k]); if (zero_infs) grad = ew_zero_inf(grad); if (zero_nans) grad = ew_zero_nan(grad); if (saturate != 0.0f) grad = ew_maximum(ew_minimum(grad, saturate), -saturate); grad = ew_mul(grad, grad_scale * norm_scale); V x = ew_mul(grad, ew_mul(cv, rv)); rms_sum = ew_add(rms_sum, ew_sqr(x)); store(X + offset, x); } float rms_x = ew_sum(rms_sum); // reduce within warp for (int i = 16; i > 0; i >>= 1) rms_x += shfl_xor(rms_x, i); // if using more than 1 warp, further reduced with shared memory if (blockDim.x > 32) { __shared__ float Share[32]; // first thread of each warp store to shared if ((tid & 31) == 0) Share[tid/32] = rms_x; __syncthreads(); if (tid < 32) { // first warp loads all prior reductions rms_x = Share[tid]; // reduce within this first warp #pragma unroll 1 for (int i = blockDim.x/64; i > 0; i >>= 1) rms_x += shfl_xor(rms_x, i); } } if (tid == 0) atomicRed(RMS_X, rms_x * rcpCK); } } // new_v = decay * v + (1 - decay) * (grad**2 + eps1) // tf.assign(v, new_v) // x = grad * tf.rsqrt(new_v) // rms_x = np.mean(x**2, keepdims=True) template <typename T> __global__ void __launch_bounds__(32) adafactor_normalize_1d( float* CV, float* X, float* RMS_X, const T* __restrict__ Grad, const float* __restrict__ Norm, float grad_scale, float decay, float epsilon, uint K, float rcpK, float saturate, uint zero_infs, uint zero_nans, uint use_norm) { float norm_scale = use_norm ? __ldg(Norm) : 1.0f; // skip all optimization if the global norm is bad. if (norm_scale != 0.0f) { uint tid = threadIdx.x; uint bid = blockIdx.x; float rms_x = 0.0f; #pragma unroll 1 for (uint k = bid*32 + tid; k < K; k += gridDim.x*32) { float grad = load(Grad + k); float cv = CV[k]; if (zero_infs) grad = ew_zero_inf(grad); if (zero_nans) grad = ew_zero_nan(grad); if (saturate != 0.0f) grad = ew_maximum(ew_minimum(grad, saturate), -saturate); grad = ew_mul(grad, grad_scale * norm_scale); float new_cv = decay * cv + (1.0f - decay) * (grad*grad + epsilon); float x = grad * rsqrtf(new_cv); CV[k] = new_cv; X[k] = x; rms_x += x*x; } // reduce within warp for (int i = 16; i > 0; i >>= 1) rms_x += shfl_xor(rms_x, i); if (tid == 0) atomicRed(RMS_X, rms_x * rcpK); } } // # kernel 4 // tf.assign_sub(param, learning_rate * x / np.maximum(1.0, np.sqrt(rms_x) / clipping_threshold) ) template <typename V> __global__ void adafactor_apply( V* P, const V* __restrict__ X, const float* __restrict__ RMS_X, const float* __restrict__ Norm, float learning_rate, float rcp_clip, uint size, uint use_norm) { float norm_scale = use_norm ? __ldg(Norm) : 1.0f; // skip all optimization if the global norm is bad. if (norm_scale != 0.0f) { uint tid = threadIdx.x; uint bid = blockIdx.x; float update_rate = learning_rate / fmaxf(sqrtf(__ldg(RMS_X)) * rcp_clip, 1.0f); #pragma unroll 1 for (uint i = bid*blockDim.x + tid; i < size; i += gridDim.x*blockDim.x) P[i] = ew_sub(P[i], ew_mul(X[i], update_rate)); } } #define MIN(a,b) (((a)<(b))?(a):(b)) #define MAX(a,b) (((a)>(b))?(a):(b)) template <typename T, typename V> bool Adafactor(CUstream stream, uint SMs, float* cv, float* rv, float* x, float* means, float* param, const T* grad, const float* norm_scale, float grad_scale, float learning_rate, float decay, float epsilon, float clip_thresh, uint C, uint K, float saturate, bool zero_infs, bool zero_nans) { cuMemsetD32Async((CUdeviceptr)means, 0, 2, stream); // used for row variance mean and RMS_X float rcpK = 1.0f / (float)K; float rcpClip = 1.0f / clip_thresh; float* rv_mean = means; float* rms_x = means + 1; // 1D case if (C == 1) { uint gridK = MIN(MAX(SMs*2, CEIL_DIV(K, 32*4)), SMs*32*2); adafactor_normalize_1d<T><<<gridK,32,0,stream>>>(cv, x, rms_x, grad, norm_scale, grad_scale, decay, epsilon, K, rcpK, saturate, zero_infs, zero_nans, norm_scale != 0); adafactor_apply<float><<<gridK,32,0,stream>>>(param, x, (const float*)rms_x, norm_scale, learning_rate, rcpClip, K, norm_scale != 0); } else { float rcpC = 1.0f / (float)C; uint gridK = CEIL_DIV(K, 32); adafactor_col_variance<T,float,1024><<<gridK,1024,0,stream>>>(cv, grad, norm_scale, grad_scale, decay, epsilon, C, K, rcpC, saturate, zero_infs, zero_nans, norm_scale != 0); if (K & 3) { uint CK = C*K; uint gridCK = CK > SMs*1024 ? SMs*2 : SMs; adafactor_row_variance<T,float><<<C,1024,0,stream>>>(rv, rv_mean, grad, norm_scale, grad_scale, decay, epsilon, K, rcpC, rcpK, saturate, zero_infs, zero_nans, norm_scale != 0); adafactor_normalize_2d<T,float><<<C,1024,0,stream>>>(x, rms_x, grad, norm_scale, (const float*)rv, (const float*)cv, (const float*)rv_mean, grad_scale, K, rcpC*rcpK, saturate, zero_infs, zero_nans, norm_scale != 0); adafactor_apply<float><<<gridCK,1024,0,stream>>>(param, (const float*)x, (const float*)rms_x, norm_scale, learning_rate, rcpClip, CK, norm_scale != 0); } else { K >>= 2; uint CK = C*K; uint gridCK = CK <= SMs*256*1 ? SMs*1 : CK <= SMs*256*2 ? SMs*2 : CK <= SMs*256*4 ? SMs*4 : SMs*8 ; adafactor_row_variance<V,float4><<<C,256,0,stream>>>(rv, rv_mean, (const V*)grad, norm_scale, grad_scale, decay, epsilon, K, rcpC, rcpK, saturate, zero_infs, zero_nans, norm_scale != 0); adafactor_normalize_2d<V,float4><<<C,256,0,stream>>>((float4*)x, rms_x, (const V*)grad, norm_scale, (const float*)rv, (const float4*)cv, (const float*)rv_mean, grad_scale, K, rcpC*rcpK, saturate, zero_infs, zero_nans, norm_scale != 0); adafactor_apply<float4><<<gridCK,256,0,stream>>>((float4*)param, (const float4*)x, (const float*)rms_x, norm_scale, learning_rate, rcpClip, CK, norm_scale != 0); } } return true; } template bool Adafactor<float,float4>(CUstream stream, uint SMs, float* cv, float* rv, float* x, float* means, float* param, const float* grad, const float* norm_scale, float grad_scale, float learning_rate, float decay, float epsilon, float clip, uint C, uint K, float saturate, bool zero_infs, bool zero_nans); template bool Adafactor<ehalf,ehalf4>(CUstream stream, uint SMs, float* cv, float* rv, float* x, float* means, float* param, const ehalf* grad, const float* norm_scale, float grad_scale, float learning_rate, float decay, float epsilon, float clip, uint C, uint K, float saturate, bool zero_infs, bool zero_nans); template bool Adafactor<bhalf,bhalf4>(CUstream stream, uint SMs, float* cv, float* rv, float* x, float* means, float* param, const bhalf* grad, const float* norm_scale, float grad_scale, float learning_rate, float decay, float epsilon, float clip, uint C, uint K, float saturate, bool zero_infs, bool zero_nans); template <typename TG, typename RM, typename RV> __global__ void apply_lazy_emb_adam( float* Param, RM* Mean, RV* Var, const TG* __restrict__ Grad, const float* __restrict__ Norm, float lr, float decay_mean, float decay_var, float epsilon, float grad_scale, float clip_sigma, uint K, float saturate, uint zero_infs, uint zero_nans, uint use_norm) { float norm_scale = use_norm ? __ldg(Norm) : 1.0f; if (norm_scale != 0.0f) { uint tid = threadIdx.x; uint c = blockIdx.x; uint k = blockIdx.y*blockDim.x + tid; uint offset = c*K + k; float g = load(add_ptr_u(Grad, offset), 0, k < K); if (zero_infs) g = ew_zero_inf(g); if (zero_nans) g = ew_zero_nan(g); if (saturate != 0.0f) g = ew_maximum(ew_minimum(g, saturate), -saturate); // max reduce gradient within this block. // If the whole block is zero that means that this embedding vector was not selected. // If the emb vector is bigger than the block then at least the probability is high of non-selection. // Make Adam a no-op in this case. float gmax = fabsf(g); for (int i = 16; i > 0; i >>= 1) gmax = fmaxf(gmax, shfl_xor(gmax, i)); if (blockDim.x > 32) { __shared__ float Share[32]; // first thread of each warp store to shared if ((tid & 31) == 0) Share[tid/32] = gmax; __syncthreads(); if (tid < 32) { // first warp loads all prior reductions gmax = Share[tid]; // reduce within this last warp #pragma unroll 1 for (int i = blockDim.x/64; i > 0; i >>= 1) gmax = fmaxf(gmax, shfl_xor(gmax, i)); // final reduction to shared Share[tid] = gmax; } __syncthreads(); gmax = Share[0]; } if (k < K && gmax > 0.0f) { float m = load(add_ptr_u((const RM*)Mean, offset)); float v = load(add_ptr_u((const RV*)Var, offset)); float p = load(add_ptr_u((const float*)Param, offset)); g *= grad_scale * norm_scale; v = decay_var * v + (1.0f - decay_var) * g*g; float sigma = sqrtf(v); if (clip_sigma != 0.0f) { float clip = clip_sigma * sigma; g = fmaxf(g, -clip); g = fminf(g, clip); } m = decay_mean * m + (1.0f - decay_mean) * g; p -= lr * m / (sigma + epsilon); store(add_ptr_u(Mean, offset), m); store(add_ptr_u(Var, offset), v); store(add_ptr_u(Param, offset), p); } } } template <typename TG, typename RM, typename RV> __global__ void apply_adam( float* Param, RM* Mean, RV* Var, const TG* __restrict__ Grad, const float* __restrict__ Norm, float lr, float decay_mean, float decay_var, float epsilon, float grad_scale, float clip_sigma, uint size, float saturate, uint zero_infs, uint zero_nans, uint use_norm) { float norm_scale = use_norm ? __ldg(Norm) : 1.0f; // skip all optimization if the global norm is bad. if (norm_scale != 0.0f) { uint tid = threadIdx.x; uint bid = blockIdx.x; for (uint offset = bid*blockDim.x + tid; offset < size; offset += gridDim.x*blockDim.x) { float g = load(add_ptr_u( Grad, offset)); float m = load(add_ptr_u((const RM*)Mean, offset)); float v = load(add_ptr_u((const RV*)Var, offset)); float p = load(add_ptr_u((const float*)Param, offset)); if (zero_infs) g = ew_zero_inf(g); if (zero_nans) g = ew_zero_nan(g); if (saturate != 0.0f) g = ew_maximum(ew_minimum(g, saturate), -saturate); g *= grad_scale * norm_scale; v = decay_var * v + (1.0f - decay_var) * g*g; float sigma = ew_sqrt(v); if (clip_sigma != 0.0f) { float clip = clip_sigma * sigma; g = fmaxf(g, -clip); g = fminf(g, clip); } m = decay_mean * m + (1.0f - decay_mean) * g; p -= lr * m * ew_rcp(sigma + epsilon); store(add_ptr_u(Mean, offset), m); store(add_ptr_u(Var, offset), v); store(add_ptr_u(Param, offset), p); } } } template <typename TG, typename TRM, typename TRV> bool ApplyAdam(CUstream stream, uint SMs, const TG* grad, const float* norm_scale, float* param, TRM* mean, TRV* var, float lr, float decay_mean, float decay_var, float epsilon, float grad_scale, float clip_sigma, uint size, uint lazy_emb, float saturate, bool zero_infs, bool zero_nans) { if (lazy_emb) { uint K = lazy_emb; uint C = size; uint threads, gridK; if (K <= 1024) { threads = THREAD_POW2(K); gridK = 1; } else { threads = 256; gridK = CEIL_DIV(K, 256); } apply_lazy_emb_adam<TG,TRM,TRV><<<dim3(C,gridK,1),threads,0,stream>>>(param, mean, var, grad, norm_scale, lr, decay_mean, decay_var, epsilon, grad_scale, clip_sigma, K, saturate, zero_infs, zero_nans, norm_scale != 0); } else { uint grid = SMs, threads = 64; if (size > SMs*1024) { threads = 1024; grid *= 2; } else if (size > SMs* 512) { threads = 1024; } else if (size > SMs* 256) { threads = 512; } else if (size > SMs* 128) { threads = 256; } else if (size > SMs* 64) { threads = 128; } apply_adam<TG,TRM,TRV><<<grid,threads,0,stream>>>(param, mean, var, grad, norm_scale, lr, decay_mean, decay_var, epsilon, grad_scale, clip_sigma, size, saturate, zero_infs, zero_nans, norm_scale != 0); } return true; } template bool ApplyAdam<float,float,float>(CUstream stream, uint SMs, const float* grad, const float* norm_scale, float* param, float* mean, float* var, float lr, float decay_mean, float decay_var, float epsilon, float grad_scale, float clip_sigma, uint size, uint lazy_update, float saturate, bool zero_infs, bool zero_nans); template bool ApplyAdam<ehalf,float,float>(CUstream stream, uint SMs, const ehalf* grad, const float* norm_scale, float* param, float* mean, float* var, float lr, float decay_mean, float decay_var, float epsilon, float grad_scale, float clip_sigma, uint size, uint lazy_update, float saturate, bool zero_infs, bool zero_nans); template bool ApplyAdam<bhalf,float,float>(CUstream stream, uint SMs, const bhalf* grad, const float* norm_scale, float* param, float* mean, float* var, float lr, float decay_mean, float decay_var, float epsilon, float grad_scale, float clip_sigma, uint size, uint lazy_update, float saturate, bool zero_infs, bool zero_nans); template bool ApplyAdam<float,mhalf,vhalf>(CUstream stream, uint SMs, const float* grad, const float* norm_scale, float* param, mhalf* mean, vhalf* var, float lr, float decay_mean, float decay_var, float epsilon, float grad_scale, float clip_sigma, uint size, uint lazy_update, float saturate, bool zero_infs, bool zero_nans); template bool ApplyAdam<ehalf,mhalf,vhalf>(CUstream stream, uint SMs, const ehalf* grad, const float* norm_scale, float* param, mhalf* mean, vhalf* var, float lr, float decay_mean, float decay_var, float epsilon, float grad_scale, float clip_sigma, uint size, uint lazy_update, float saturate, bool zero_infs, bool zero_nans); template bool ApplyAdam<bhalf,mhalf,vhalf>(CUstream stream, uint SMs, const bhalf* grad, const float* norm_scale, float* param, mhalf* mean, vhalf* var, float lr, float decay_mean, float decay_var, float epsilon, float grad_scale, float clip_sigma, uint size, uint lazy_update, float saturate, bool zero_infs, bool zero_nans); template <typename TG, typename RM, typename RV, uint BSIZE, uint THREADS> __global__ void __launch_bounds__(THREADS) apply_adam_gated( float* Param, RM* Mean, RV* Var, const TG* __restrict__ Grad, const float* __restrict__ Norm, const float* __restrict__ Gate, float lr, float decay_mean, float decay_var, float epsilon, float grad_scale, float clip_sigma, float saturate, uint zero_infs, uint zero_nans, uint use_norm) { const uint U = BSIZE*BSIZE/THREADS; float norm_scale = use_norm ? __ldg(Norm) : 1.0f; if (norm_scale != 0.0f) { uint bid = blockIdx.x; uint tid = threadIdx.x; if (Gate[bid] != 0.0f) { uint offset = bid*BSIZE*BSIZE + tid; Grad += offset; Mean += offset; Var += offset; Param += offset; float g[U], m[U], v[U], p[U]; for (uint j = 0; j < U; j++) g[j] = load((const TG*)Grad, j*THREADS); for (uint j = 0; j < U; j++) m[j] = load((const RM*)Mean, j*THREADS); for (uint j = 0; j < U; j++) v[j] = load((const RV*)Var, j*THREADS); for (uint j = 0; j < U; j++) p[j] = load((const float*)Param, j*THREADS); for (uint j = 0; j < U; j++) { if (zero_infs) g[j] = ew_zero_inf(g[j]); if (zero_nans) g[j] = ew_zero_nan(g[j]); if (saturate != 0.0f) g[j] = ew_maximum(ew_minimum(g[j], saturate), -saturate); g[j] *= grad_scale * norm_scale; v[j] = decay_var * v[j] + (1.0f - decay_var ) * g[j] * g[j]; float sig = sqrtf(v[j]); if (clip_sigma != 0.0f) { float clip = clip_sigma * sig; g[j] = fmaxf(g[j], -clip); g[j] = fminf(g[j], clip); } m[j] = decay_mean * m[j] + (1.0f - decay_mean) * g[j]; p[j] -= lr * m[j] / (sqrtf(v[j]) + epsilon); } for (uint j = 0; j < U; j++) store(Mean, m[j], j*THREADS); for (uint j = 0; j < U; j++) store(Var, v[j], j*THREADS); for (uint j = 0; j < U; j++) store(Param, p[j], j*THREADS); } } } template <typename TG, typename TRM, typename TRV> bool ApplyAdamGated(CUstream stream, const float* gate, const TG* grad, const float* norm_scale, float* param, TRM* mean, TRV* var, float lr, float decay_mean, float decay_var, float epsilon, float grad_scale, float clip_sigma, uint blocks, uint bsize, float saturate, bool zero_infs, bool zero_nans) { if (bsize == 8) apply_adam_gated<TG,TRM,TRV, 8, 32><<<blocks, 32,0,stream>>>(param, mean, var, grad, norm_scale, gate, lr, decay_mean, decay_var, epsilon, grad_scale, clip_sigma, saturate, zero_infs, zero_nans, norm_scale != 0); else if (bsize == 16) apply_adam_gated<TG,TRM,TRV,16, 64><<<blocks, 64,0,stream>>>(param, mean, var, grad, norm_scale, gate, lr, decay_mean, decay_var, epsilon, grad_scale, clip_sigma, saturate, zero_infs, zero_nans, norm_scale != 0); else if (bsize == 32) apply_adam_gated<TG,TRM,TRV,32, 256><<<blocks, 256,0,stream>>>(param, mean, var, grad, norm_scale, gate, lr, decay_mean, decay_var, epsilon, grad_scale, clip_sigma, saturate, zero_infs, zero_nans, norm_scale != 0); else if (bsize == 64) apply_adam_gated<TG,TRM,TRV,64,1024><<<blocks,1024,0,stream>>>(param, mean, var, grad, norm_scale, gate, lr, decay_mean, decay_var, epsilon, grad_scale, clip_sigma, saturate, zero_infs, zero_nans, norm_scale != 0); return true; } template bool ApplyAdamGated<float,float,float>(CUstream stream, const float* gate, const float* grad, const float* norm_scale, float* param, float* mean, float* var, float lr, float decay_mean, float decay_var, float epsilon, float grad_scale, float clip_sigma, uint blocks, uint bsize, float saturate, bool zero_infs, bool zero_nans); template bool ApplyAdamGated<ehalf,float,float>(CUstream stream, const float* gate, const ehalf* grad, const float* norm_scale, float* param, float* mean, float* var, float lr, float decay_mean, float decay_var, float epsilon, float grad_scale, float clip_sigma, uint blocks, uint bsize, float saturate, bool zero_infs, bool zero_nans); template bool ApplyAdamGated<bhalf,float,float>(CUstream stream, const float* gate, const bhalf* grad, const float* norm_scale, float* param, float* mean, float* var, float lr, float decay_mean, float decay_var, float epsilon, float grad_scale, float clip_sigma, uint blocks, uint bsize, float saturate, bool zero_infs, bool zero_nans); template bool ApplyAdamGated<float,mhalf,vhalf>(CUstream stream, const float* gate, const float* grad, const float* norm_scale, float* param, mhalf* mean, vhalf* var, float lr, float decay_mean, float decay_var, float epsilon, float grad_scale, float clip_sigma, uint blocks, uint bsize, float saturate, bool zero_infs, bool zero_nans); template bool ApplyAdamGated<ehalf,mhalf,vhalf>(CUstream stream, const float* gate, const ehalf* grad, const float* norm_scale, float* param, mhalf* mean, vhalf* var, float lr, float decay_mean, float decay_var, float epsilon, float grad_scale, float clip_sigma, uint blocks, uint bsize, float saturate, bool zero_infs, bool zero_nans); template bool ApplyAdamGated<bhalf,mhalf,vhalf>(CUstream stream, const float* gate, const bhalf* grad, const float* norm_scale, float* param, mhalf* mean, vhalf* var, float lr, float decay_mean, float decay_var, float epsilon, float grad_scale, float clip_sigma, uint blocks, uint bsize, float saturate, bool zero_infs, bool zero_nans); template <typename TG, typename RM, typename RV, uint BSIZE, uint THREADS> __global__ void __launch_bounds__(THREADS) apply_blocksparse_adam( float* Param, RM* Mean, RV* Var, const TG* __restrict__ Grad, const float* __restrict__ Select, const float* __restrict__ Norm, float lr_old, float lr_new, float decay_mean, float decay_var, float epsilon, float grad_scale, float saturate, uint zero_infs, uint zero_nans, uint use_select, uint use_norm) { const uint U = BSIZE*BSIZE/THREADS; float norm_scale = use_norm ? __ldg(Norm) : 1.0f; if (norm_scale != 0.0f) { grad_scale *= norm_scale; uint bid = blockIdx.x; uint tid = threadIdx.x; float select = use_select ? __ldg(Select + bid) : 0.0f; float lr = select == 0.0f ? lr_old : lr_new; uint offset = bid*BSIZE*BSIZE + tid; Grad += offset; Mean += offset; Var += offset; Param += offset; float g[U], m[U], v[U], p[U]; for (uint j = 0; j < U; j++) g[j] = load((const TG*)Grad, j*THREADS); for (uint j = 0; j < U; j++) m[j] = load((const RM*)Mean, j*THREADS); for (uint j = 0; j < U; j++) v[j] = load((const RV*)Var, j*THREADS); for (uint j = 0; j < U; j++) p[j] = load((const float*)Param, j*THREADS); for (uint j = 0; j < U; j++) { if (zero_infs) g[j] = ew_zero_inf(g[j]); if (zero_nans) g[j] = ew_zero_nan(g[j]); if (saturate != 0.0f) g[j] = ew_maximum(ew_minimum(g[j], saturate), -saturate); g[j] *= grad_scale; v[j] = decay_var * v[j] + (1.0f - decay_var ) * g[j] * g[j]; m[j] = decay_mean * m[j] + (1.0f - decay_mean) * g[j]; p[j] -= lr * m[j] * ew_rcp((ew_sqrt(v[j]) + epsilon)); } for (uint j = 0; j < U; j++) store(Mean, m[j], j*THREADS); for (uint j = 0; j < U; j++) store(Var, v[j], j*THREADS); for (uint j = 0; j < U; j++) store(Param, p[j], j*THREADS); } } template <typename TG, typename TRM, typename TRV> bool BlocksparseAdam(CUstream stream, float* param, TRM* mean, TRV* var, const TG* grad, const float* lr_select, const float* norm_scale, float lr_old, float lr_new, float decay_mean, float decay_var, float epsilon, float grad_scale, float saturate, bool zero_infs, bool zero_nans, uint blocks, uint bsize) { if (bsize == 8) apply_blocksparse_adam<TG,TRM,TRV, 8, 32><<<blocks, 32,0,stream>>>(param, mean, var, grad, lr_select, norm_scale, lr_old, lr_new, decay_mean, decay_var, epsilon, grad_scale, saturate, zero_infs, zero_nans, lr_select != 0, norm_scale != 0); else if (bsize == 16) apply_blocksparse_adam<TG,TRM,TRV,16, 64><<<blocks, 64,0,stream>>>(param, mean, var, grad, lr_select, norm_scale, lr_old, lr_new, decay_mean, decay_var, epsilon, grad_scale, saturate, zero_infs, zero_nans, lr_select != 0, norm_scale != 0); else if (bsize == 32) apply_blocksparse_adam<TG,TRM,TRV,32, 256><<<blocks, 256,0,stream>>>(param, mean, var, grad, lr_select, norm_scale, lr_old, lr_new, decay_mean, decay_var, epsilon, grad_scale, saturate, zero_infs, zero_nans, lr_select != 0, norm_scale != 0); else if (bsize == 64) apply_blocksparse_adam<TG,TRM,TRV,64,1024><<<blocks,1024,0,stream>>>(param, mean, var, grad, lr_select, norm_scale, lr_old, lr_new, decay_mean, decay_var, epsilon, grad_scale, saturate, zero_infs, zero_nans, lr_select != 0, norm_scale != 0); return true; } template bool BlocksparseAdam<float,float,float>(CUstream stream, float* param, float* mean, float* var, const float* grad, const float* lr_select, const float* norm_scale, float lr_old, float lr_new, float decay_mean, float decay_var, float epsilon, float grad_scale, float saturate, bool zero_infs, bool zero_nans, uint blocks, uint bsize); template bool BlocksparseAdam<ehalf,float,float>(CUstream stream, float* param, float* mean, float* var, const ehalf* grad, const float* lr_select, const float* norm_scale, float lr_old, float lr_new, float decay_mean, float decay_var, float epsilon, float grad_scale, float saturate, bool zero_infs, bool zero_nans, uint blocks, uint bsize); template bool BlocksparseAdam<float,mhalf,vhalf>(CUstream stream, float* param, mhalf* mean, vhalf* var, const float* grad, const float* lr_select, const float* norm_scale, float lr_old, float lr_new, float decay_mean, float decay_var, float epsilon, float grad_scale, float saturate, bool zero_infs, bool zero_nans, uint blocks, uint bsize); template bool BlocksparseAdam<ehalf,mhalf,vhalf>(CUstream stream, float* param, mhalf* mean, vhalf* var, const ehalf* grad, const float* lr_select, const float* norm_scale, float lr_old, float lr_new, float decay_mean, float decay_var, float epsilon, float grad_scale, float saturate, bool zero_infs, bool zero_nans, uint blocks, uint bsize); template <typename T, uint U> __global__ void __launch_bounds__(32) apply_ema(T* Ema, const float* __restrict__ Param, float decay, uint size) { uint tid = threadIdx.x; uint bid = blockIdx.x; uint offset = bid * U*32 + tid; bool pred[U]; for (uint j = 0; j < U; j++) pred[j] = offset + j*32 < size; Ema += offset; Param += offset; float e[U], p[U]; for (uint j = 0; j < U; j++) e[j] = load((const T*)Ema, j*32, pred[j]); for (uint j = 0; j < U; j++) p[j] = load( Param, j*32, pred[j]); for (uint j = 0; j < U; j++) e[j] -= (1.0f - decay) * (e[j] - p[j]); for (uint j = 0; j < U; j++) store(Ema, e[j], j*32, pred[j]); } template <typename T> bool ApplyEma(CUstream stream, T* ema, const float* param, float decay, uint size) { uint grid = CEIL_DIV(size, 128); // 1 warp with 4 unrolls if (grid > 200) { apply_ema<T,4><<<grid,32,0,stream>>>(ema, param, decay, size); } else { grid = CEIL_DIV(size, 32); // 1 warp with 1 unroll apply_ema<T,1><<<grid,32,0,stream>>>(ema, param, decay, size); } return true; } template bool ApplyEma<float>(CUstream stream, float* ema, const float* param, float decay, uint size); template bool ApplyEma<ehalf>(CUstream stream, ehalf* ema, const float* param, float decay, uint size); template <typename T, uint BSIZE, uint THREADS> __global__ void __launch_bounds__(THREADS) apply_ema_gated( T* Ema, const float* __restrict__ Param, const float* __restrict__ Gate, float decay) { const uint U = BSIZE*BSIZE/THREADS; uint bid = blockIdx.x; uint tid = threadIdx.x; if (Gate[bid] != 0.0f) { uint offset = bid*BSIZE*BSIZE + tid; Ema += offset; Param += offset; float e[U], p[U]; for (uint j = 0; j < U; j++) e[j] = load((const T*)Ema, j*THREADS); for (uint j = 0; j < U; j++) p[j] = load( Param, j*THREADS); for (uint j = 0; j < U; j++) e[j] -= (1.0f - decay) * (e[j] - p[j]); for (uint j = 0; j < U; j++) store(Ema, e[j], j*THREADS); } } template <typename T> bool ApplyEmaGated(CUstream stream, T* ema, const float* param, const float* gate, float decay, uint blocks, uint bsize) { if (bsize == 8) apply_ema_gated<T, 8, 32><<<blocks, 32,0,stream>>>(ema, param, gate, decay); else if (bsize == 16) apply_ema_gated<T,16, 64><<<blocks, 64,0,stream>>>(ema, param, gate, decay); else if (bsize == 32) apply_ema_gated<T,32, 256><<<blocks, 256,0,stream>>>(ema, param, gate, decay); else if (bsize == 64) apply_ema_gated<T,64,1024><<<blocks,1024,0,stream>>>(ema, param, gate, decay); return true; } template bool ApplyEmaGated<float>(CUstream stream, float* ema, const float* param, const float* gate, float decay, uint blocks, uint bsize); template bool ApplyEmaGated<ehalf>(CUstream stream, ehalf* ema, const float* param, const float* gate, float decay, uint blocks, uint bsize); template <typename T, uint BSIZE, uint THREADS, uint GATED> __global__ void __launch_bounds__(THREADS) blocksparse_l2_decay(T* Param, const float* __restrict__ Gate, float rate, float epsilon) { const uint U = BSIZE*BSIZE/THREADS; uint bid = blockIdx.x; uint tid = threadIdx.x; if (GATED == 0 || Gate[bid] != 0.0f) { uint offset = bid*BSIZE*BSIZE + tid; Param += offset; float p[U]; for (uint j = 0; j < U; j++) p[j] = load((const T*)Param, j*THREADS); // Reduce sum squared within this thread float sum_sqared = 0.0f; for (uint j = 0; j < U; j++) sum_sqared += p[j] * p[j]; // reduce within warp for (int i = 16; i > 0; i >>= 1) sum_sqared += shfl_xor(sum_sqared, i); // if using more than 1 warp, further reduced with shared memory if (THREADS > 32) { __shared__ float Share[32]; // first thread of each warp store to shared if ((tid & 31) == 0) Share[tid / 32] = sum_sqared; __syncthreads(); if (tid < 32) { // first warp loads all prior reductions sum_sqared = Share[tid]; // reduce within this first warp for (int i = THREADS/64; i > 0; i >>= 1) sum_sqared += shfl_xor(sum_sqared, i); // outputs final reduction to shared Share[tid] = sum_sqared; } __syncthreads(); // broadcast result to all threads sum_sqared = Share[0]; } // apply weight decay and store updated paramm float decay = fminf(rsqrtf(sum_sqared + epsilon) * rate, 1.0f); for (uint j = 0; j < U; j++) store(Param, p[j] - p[j] * decay, j*THREADS); } } template <typename T> bool BlocksparseL2Decay(CUstream stream, T* param, const float* gate, float rate, float epsilon, uint blocks, uint bsize) { if (gate != NULL) { if (bsize == 8) blocksparse_l2_decay<T, 8, 32,1><<<blocks, 32,0,stream>>>(param, gate, rate, epsilon); else if (bsize == 16) blocksparse_l2_decay<T,16, 64,1><<<blocks, 64,0,stream>>>(param, gate, rate, epsilon); else if (bsize == 32) blocksparse_l2_decay<T,32, 256,1><<<blocks, 256,0,stream>>>(param, gate, rate, epsilon); else if (bsize == 64) blocksparse_l2_decay<T,64,1024,1><<<blocks,1024,0,stream>>>(param, gate, rate, epsilon); } else { if (bsize == 8) blocksparse_l2_decay<T, 8, 32,0><<<blocks, 32,0,stream>>>(param, gate, rate, epsilon); else if (bsize == 16) blocksparse_l2_decay<T,16, 64,0><<<blocks, 64,0,stream>>>(param, gate, rate, epsilon); else if (bsize == 32) blocksparse_l2_decay<T,32, 256,0><<<blocks, 256,0,stream>>>(param, gate, rate, epsilon); else if (bsize == 64) blocksparse_l2_decay<T,64,1024,0><<<blocks,1024,0,stream>>>(param, gate, rate, epsilon); } return true; } template bool BlocksparseL2Decay<float>(CUstream stream, float* param, const float* gate, float rate, float epsilon, uint blocks, uint bsize); #define MAX_NORM 0 #define L2_NORM 1 template <typename T, uint BSIZE, uint THREADS, uint NORM> __global__ void __launch_bounds__(THREADS) blocksparse_norm(float* Norm, const T* __restrict__ Param) { const uint U = BSIZE*BSIZE/THREADS; uint bid = blockIdx.x; uint tid = threadIdx.x; uint offset = bid*BSIZE*BSIZE + tid; Param += offset; float p[U]; for (uint j = 0; j < U; j++) p[j] = load(Param, j*THREADS); // Reduce max within this thread float norm = 0.0f; for (uint j = 0; j < U; j++) if (NORM == MAX_NORM) norm = fmaxf(fabsf(p[j]), norm); else norm += ew_sqr(p[j]); // reduce within warp for (int i = 16; i > 0; i >>= 1) if (NORM == MAX_NORM) norm = fmaxf(norm, shfl_xor(norm, i)); else norm += shfl_xor(norm, i); // if using more than 1 warp, further reduced with shared memory if (THREADS > 32) { __shared__ float Share[32]; // first thread of each warp store to shared if ((tid & 31) == 0) Share[tid / 32] = norm; __syncthreads(); if (tid < 32) { // first warp loads all prior reductions norm = Share[tid]; // reduce within this first warp for (int i = THREADS/64; i > 0; i >>= 1) if (NORM == MAX_NORM) norm = fmaxf(norm, shfl_xor(norm, i)); else norm += shfl_xor(norm, i); } } // first thread has the final reduced max_abs if (tid == 0) { if (NORM == L2_NORM) norm = ew_sqrt(norm); Norm[bid] = norm; } } template <typename T> bool BlocksparseNorm(CUstream stream, float* norm, const T* param, uint blocks, uint bsize, uint norm_type) { if (norm_type == MAX_NORM) { if (bsize == 8) blocksparse_norm<T, 8, 32,MAX_NORM><<<blocks, 32,0,stream>>>(norm, param); else if (bsize == 16) blocksparse_norm<T,16, 64,MAX_NORM><<<blocks, 64,0,stream>>>(norm, param); else if (bsize == 32) blocksparse_norm<T,32, 256,MAX_NORM><<<blocks, 256,0,stream>>>(norm, param); else if (bsize == 64) blocksparse_norm<T,64,1024,MAX_NORM><<<blocks,1024,0,stream>>>(norm, param); } else { if (bsize == 8) blocksparse_norm<T, 8, 32, L2_NORM><<<blocks, 32,0,stream>>>(norm, param); else if (bsize == 16) blocksparse_norm<T,16, 64, L2_NORM><<<blocks, 64,0,stream>>>(norm, param); else if (bsize == 32) blocksparse_norm<T,32, 256, L2_NORM><<<blocks, 256,0,stream>>>(norm, param); else if (bsize == 64) blocksparse_norm<T,64,1024, L2_NORM><<<blocks,1024,0,stream>>>(norm, param); } return true; } template bool BlocksparseNorm<float>(CUstream stream, float* norm, const float* param, uint blocks, uint bsize, uint norm_type); template bool BlocksparseNorm<ehalf>(CUstream stream, float* norm, const ehalf* param, uint blocks, uint bsize, uint norm_type); template bool BlocksparseNorm<bhalf>(CUstream stream, float* norm, const bhalf* param, uint blocks, uint bsize, uint norm_type); __global__ void __launch_bounds__(256) blocksparse_prune(float* Gate, const uint* __restrict__ Idx, uint blocks, uint keep) { uint tid = threadIdx.x; uint bid = blockIdx.x; #pragma unroll 1 for (uint i = bid*256 + tid; i < blocks; i += gridDim.x*256) { Gate[Idx[i]] = i < keep ? 1.0f : 0.0f; } } bool BlocksparsePrune(CUstream stream, uint SMs, float* gate, const uint* idx, uint blocks, uint keep) { uint grid = blocks > SMs*512 ? SMs*4 : blocks > SMs*256 ? SMs*2 : SMs; blocksparse_prune<<<grid,256,0,stream>>>(gate, idx, blocks, keep); return true; } template <typename T, uint BSIZE, uint THREADS, uint NORM> __global__ void __launch_bounds__(THREADS) blocksparse_threshold_prune(const T* __restrict__ Param, float* Gate, float threshold) { const uint U = BSIZE*BSIZE/THREADS; uint bid = blockIdx.x; uint tid = threadIdx.x; uint offset = bid*BSIZE*BSIZE + tid; Param += offset; float p[U]; for (uint j = 0; j < U; j++) p[j] = load(Param, j*THREADS); // Reduce max within this thread float norm = 0.0f; for (uint j = 0; j < U; j++) if (NORM == MAX_NORM) norm = fmaxf(fabsf(p[j]), norm); else norm += ew_sqr(p[j]); // reduce within warp for (int i = 16; i > 0; i >>= 1) if (NORM == MAX_NORM) norm = fmaxf(norm, shfl_xor(norm, i)); else norm += shfl_xor(norm, i); // if using more than 1 warp, further reduced with shared memory if (THREADS > 32) { __shared__ float Share[32]; // first thread of each warp store to shared if ((tid & 31) == 0) Share[tid/32] = norm; __syncthreads(); if (tid < 32) { // first warp loads all prior reductions norm = Share[tid]; // reduce within this first warp for (int i = THREADS/64; i > 0; i >>= 1) if (NORM == MAX_NORM) norm = fmaxf(norm, shfl_xor(norm, i)); else norm += shfl_xor(norm, i); } } // first thread has the final reduced max_abs // compare against threshhold and update gate if needed. // if (bid < 2 && tid == 0) // printf("%d %d %.5f %.5f\n", bid, gridDim.x, max_abs, threshold); if (tid == 0) { if (NORM == L2_NORM) norm = ew_sqrt(norm); Gate[bid] = norm < threshold ? 0.0f : 1.0f; } } template <typename T> bool BlocksparseThresholdPrune(CUstream stream, const T* param, float* gate, float threshold, uint blocks, uint bsize, uint norm_type) { if (norm_type == MAX_NORM) { if (bsize == 8) blocksparse_threshold_prune<T, 8, 32,MAX_NORM><<<blocks, 32,0,stream>>>(param, gate, threshold); else if (bsize == 16) blocksparse_threshold_prune<T,16, 64,MAX_NORM><<<blocks, 64,0,stream>>>(param, gate, threshold); else if (bsize == 32) blocksparse_threshold_prune<T,32, 256,MAX_NORM><<<blocks, 256,0,stream>>>(param, gate, threshold); else if (bsize == 64) blocksparse_threshold_prune<T,64,1024,MAX_NORM><<<blocks,1024,0,stream>>>(param, gate, threshold); } else { if (bsize == 8) blocksparse_threshold_prune<T, 8, 32, L2_NORM><<<blocks, 32,0,stream>>>(param, gate, threshold); else if (bsize == 16) blocksparse_threshold_prune<T,16, 64, L2_NORM><<<blocks, 64,0,stream>>>(param, gate, threshold); else if (bsize == 32) blocksparse_threshold_prune<T,32, 256, L2_NORM><<<blocks, 256,0,stream>>>(param, gate, threshold); else if (bsize == 64) blocksparse_threshold_prune<T,64,1024, L2_NORM><<<blocks,1024,0,stream>>>(param, gate, threshold); } return true; } template bool BlocksparseThresholdPrune<float>(CUstream stream, const float* param, float* gate, float threshold, uint blocks, uint bsize, uint norm_type); template <typename T, typename V> __global__ void __launch_bounds__(1024) reduce_sum_squared(float* SumSquared, const T* X, uint size, float grad_scale, float saturate, uint zero_infs, uint zero_nans) { __shared__ float Share[32]; uint tid = threadIdx.x; uint bid = blockIdx.x; // check if this block has any work to do if (bid * 1024 < size) { float sum_squared = 0.0f; #pragma unroll 1 for (uint offset = bid * 1024 + tid; offset < size; offset += gridDim.x*1024) { V x = load(X + offset); if (zero_infs) x = ew_zero_inf(x); if (zero_nans) x = ew_zero_nan(x); if (saturate != 0.0f) x = ew_maximum(ew_minimum(x, saturate), -saturate); x = ew_mul(x, grad_scale); sum_squared += ew_sum(ew_sqr(x)); } // reduce within warp #pragma unroll for (int i = 16; i > 0; i >>= 1) sum_squared += shfl_xor(sum_squared, i); // first thread of each warp store to shared if ((tid & 31) == 0) Share[tid/32] = sum_squared; __syncthreads(); if (tid < 32) { // first warp loads all prior reductions sum_squared = Share[tid]; // reduce within this last warp for (int i = 16; i > 0; i >>= 1) sum_squared += shfl_xor(sum_squared, i); if (tid == 0) atomicRed(SumSquared, sum_squared); } } } template <typename T, typename V> bool ReduceSumSquared(CUstream stream, uint SMs, float* sum_squared, const T* x, uint size, float grad_scale, float saturate, bool zero_infs, bool zero_nans, uint tensor_idx, uint tensor_cnt) { if (tensor_idx == 0) cuMemsetD32Async((CUdeviceptr)sum_squared, 0, tensor_cnt, stream); sum_squared += tensor_idx; if ((size & 3) == 0 && size > SMs*1024) { size >>= 2; uint grid = size > SMs*1024 ? SMs*2 : SMs; reduce_sum_squared<V,float4><<<grid,1024,0,stream>>>(sum_squared, (const V*)x, size, grad_scale, saturate, zero_infs, zero_nans); } else { uint grid = size > SMs*1024 ? SMs*2 : SMs; reduce_sum_squared<T,float><<<grid,1024,0,stream>>>(sum_squared, x, size, grad_scale, saturate, zero_infs, zero_nans); } return true; } template bool ReduceSumSquared<float,float4>(CUstream stream, uint SMs, float* sum_squared, const float* x, uint size, float grad_scale, float saturate, bool zero_infs, bool zero_nans, uint tensor_idx, uint tensor_cnt); template bool ReduceSumSquared<bhalf,bhalf4>(CUstream stream, uint SMs, float* sum_squared, const bhalf* x, uint size, float grad_scale, float saturate, bool zero_infs, bool zero_nans, uint tensor_idx, uint tensor_cnt); template bool ReduceSumSquared<ehalf,ehalf4>(CUstream stream, uint SMs, float* sum_squared, const ehalf* x, uint size, float grad_scale, float saturate, bool zero_infs, bool zero_nans, uint tensor_idx, uint tensor_cnt); __global__ void compute_clip_norm(float* Norm, float* Scale, const float* SumSquared, float clip_norm, uint tensor_cnt) { __shared__ float Share[32]; uint tid = threadIdx.x; float sum_squared = 0.0f; #pragma unroll 1 for (uint offset = tid; offset < tensor_cnt; offset += 1024) sum_squared += __ldg(SumSquared + offset); // reduce within warp #pragma unroll for (int i = 16; i > 0; i >>= 1) sum_squared += shfl_xor(sum_squared, i); // first thread of each warp store to shared if ((tid & 31) == 0) Share[tid/32] = sum_squared; __syncthreads(); if (tid < 32) { // first warp loads all prior reductions sum_squared = Share[tid]; // reduce within this last warp for (int i = 16; i > 0; i >>= 1) sum_squared += shfl_xor(sum_squared, i); if (tid == 0) { float global_norm = sqrtf(sum_squared); uint is_finite; asm("{ \n\t" ".reg .pred is_finite; \n\t" "testp.finite.f32 is_finite, %1; \n\t" "selp.u32 %0, 1, 0, is_finite; \n\t" "}" : "=r"(is_finite) : "f"(global_norm)); if (is_finite == 1) *Scale = clip_norm / fmaxf(global_norm, clip_norm); else *Scale = 0.0f; // use zero for sentinal value to skip updates *Norm = global_norm; } } } bool ComputeClipNorm(CUstream stream, float* l2norm, float* scale, float* sum_squared, float clip_norm, uint tensor_cnt) { compute_clip_norm<<<1,1024,0,stream>>>(l2norm, scale, (const float*)sum_squared, clip_norm, tensor_cnt); return true; } #endif
the_stack
extern "C" { #include "../cwc.h" #include "../cwc_internal.h" } #include "../../inc/ccv_convnet_internal.h" template <int input_per_thread, int filter_per_thread, int filter_per_block> __global__ static void _cwc_kern_convolutional_forward_propagate(const int strides, const int border, const int batch, float* input, const int rows, const int cols, const int channels_per_partition, const int partition, float* out, const int out_rows, const int out_cols, float* filter, const int filter_rows, const int filter_cols, const int count, float* const biases) { assert(gridDim.x * partition * filter_per_block == out_cols * count); assert(gridDim.y == out_rows); assert(gridDim.z == partition); extern __shared__ float shared[]; float* shared_block = &shared[0]; float* shared_weights = &shared[batch]; float* shared_bias = &shared[batch + filter_per_block]; float prod[filter_per_thread][input_per_thread]; assert(batch == input_per_thread * blockDim.x); assert(filter_per_block == filter_per_thread * blockDim.y); const int thidx = threadIdx.x + threadIdx.y * blockDim.x; const int thcnt = blockDim.x * blockDim.y; int c, i, j, x, y; #pragma unroll for (i = 0; i < filter_per_thread; i++) #pragma unroll for (j = 0; j < input_per_thread; j++) prod[i][j] = 0; const int origin_x = blockIdx.x % out_cols; const int origin_y = blockIdx.y; const int filter_group_idx = blockIdx.z * count / (filter_per_block * partition) + blockIdx.x / out_cols; // for the partitioned filter group input += (blockIdx.z * channels_per_partition * rows * cols + origin_y * strides * cols + origin_x * strides) * batch; assert(thcnt >= batch); assert(thcnt >= filter_per_block); if (thidx < filter_per_block) shared_bias[thidx] = biases[filter_group_idx * filter_per_block + thidx]; const int start_x = max(origin_x * strides - border, 0) - (origin_x * strides - border); const int end_x = min(origin_x * strides - border + filter_cols, cols) - (origin_x * strides - border); const int start_y = max(origin_y * strides - border, 0) - (origin_y * strides - border); const int end_y = min(origin_y * strides - border + filter_rows, rows) - (origin_y * strides - border); filter += filter_group_idx * filter_per_block; for (c = 0; c < channels_per_partition; c++) { for (y = start_y; y < end_y; y++) for (x = start_x; x < end_x; x++) { if (thidx < batch) shared_block[thidx] = input[((y - border) * cols + x - border) * batch + thidx]; if (thidx < filter_per_block) shared_weights[thidx] = filter[(y * filter_cols + x) * count + thidx]; __syncthreads(); #pragma unroll for (i = 0; i < filter_per_thread; i++) #pragma unroll for (j = 0; j < input_per_thread; j++) prod[i][j] += shared_block[j + threadIdx.x * input_per_thread] * shared_weights[i + threadIdx.y * filter_per_thread]; __syncthreads(); } input += rows * cols * batch; filter += filter_rows * filter_cols * count; } const int outcnt = out_rows * out_cols * batch; out += (filter_group_idx * filter_per_block + threadIdx.y * filter_per_thread) * outcnt + (origin_y * out_cols + origin_x) * batch + threadIdx.x * input_per_thread; #pragma unroll for (i = 0; i < filter_per_thread; i++) { const float bias = shared_bias[i + threadIdx.y * filter_per_thread]; #pragma unroll for (j = 0; j < input_per_thread; j++) out[j] = max(0.0, prod[i][j] + bias); out += outcnt; } } static int _cwc_convnet_convolutional_forward_propagate_vary(ccv_convnet_layer_t* layer, int rows, int cols, int batch, float* a, float* b, const cudaStream_t& stream, int x, int y, int z) // these are the dynamic configurations { int out_rows, out_cols, out_partition; ccv_convnet_make_output(layer, rows, cols, &out_rows, &out_cols, &out_partition); // first do configuration validation if (!(batch % x == 0 && z % y == 0 && layer->net.convolutional.count % (z * out_partition) == 0 && batch / x * z / y <= 1024 && /* thread number constraint */ batch / x * z / y >= batch && batch / x * z / y >= z && /* kernel internal loading constraint */ sizeof(float) * (batch + z * 2) <= 48 * 1024 /* shared memory size constraint */)) return -1; assert(b); #define vary_block(_x, _y, _z) do { \ dim3 threads_per_block(batch / _x, _z / _y); \ assert(threads_per_block.x * threads_per_block.y <= 1024); \ dim3 num_blocks(out_cols * layer->net.convolutional.count / (_z * out_partition), out_rows, out_partition); \ int shared_memory_size = sizeof(float) * (batch + _z * 2); \ cudaFuncSetCacheConfig(_cwc_kern_convolutional_forward_propagate<_x, _y, _z>, cudaFuncCachePreferShared); \ _cwc_kern_convolutional_forward_propagate \ <_x, _y, _z> \ <<<num_blocks, threads_per_block, shared_memory_size, stream>>> \ (layer->net.convolutional.strides, layer->net.convolutional.border, batch, \ a, rows, cols, layer->input.matrix.channels / out_partition, out_partition, \ b, out_rows, out_cols, \ layer->w, layer->net.convolutional.rows, layer->net.convolutional.cols, layer->net.convolutional.count, \ layer->bias); \ } while (0) cwc_vary_4_a(x, 1, 2, 4, 8, cwc_vary_5_b, y, 1, 2, 4, 6, 8, cwc_vary_6_c, z, 16, 24, 32, 36, 64, 72, vary_block); #undef vary_block cudaError_t error = cudaGetLastError(); if (cudaErrorInvalidConfiguration == error) return -1; assert(error == cudaSuccess); return 0; } void cwc_convnet_convolutional_forward_propagate(ccv_convnet_layer_t* layer, int rows, int cols, int batch, float* a, float* b, const cudaStream_t& stream) { static int vary_x[] = { 1, 2, 4, 8 }; static int vary_y[] = { 1, 2, 4, 6, 8 }; static int vary_z[] = { 16, 24, 32, 36, 64, 72 }; CWC_IMPLEMENT_VARY_STUB(EXTRA(layer)->vary.convolutional.forward, vary_x, vary_y, vary_z, _cwc_convnet_convolutional_forward_propagate_vary, layer, rows, cols, batch, a, b, stream); } template <int channel_per_thread, int filter_per_thread, int channel_per_block, int batch_per_block> __global__ static void _cwc_kern_convolutional_backward_propagate_coefficient_default(const int strides, const int border, const int batch, const int batch_group_count, float* input, const int rows, const int cols, const int channels_per_partition, const int partition, float* out_grad, const int out_rows, const int out_cols, float* coeff, const int filter_rows, const int filter_cols, const int count_per_partition) { assert(gridDim.x == filter_cols); assert(gridDim.y == filter_rows); assert(gridDim.z * channel_per_block * batch_per_block == channels_per_partition * partition * batch); assert(batch == batch_per_block * batch_group_count); extern __shared__ float shared[]; float* shared_input = &shared[0]; float* shared_out_grad = &shared[channel_per_block]; const int thidx = threadIdx.x + threadIdx.y * blockDim.x; const int thcnt = blockDim.x * blockDim.y; assert(blockDim.x * filter_per_thread == count_per_partition); assert(blockDim.y * channel_per_thread == channel_per_block); assert(thcnt >= channel_per_block); assert(thcnt >= count_per_partition); const int origin_x = blockIdx.x; const int origin_y = blockIdx.y; const int channel_group_count = channels_per_partition / channel_per_block; const int partition_idx = blockIdx.z / (channel_group_count * batch_group_count); const int batch_group_idx = (blockIdx.z % (channel_group_count * batch_group_count)) / channel_group_count; const int channel_group_idx = blockIdx.z % channel_group_count; const int start_x = max(origin_x - border, 0) - (origin_x - border); const int end_x = min(out_cols, (cols + border - origin_x + strides - 1) / strides); const int start_y = max(origin_y - border, 0) - (origin_y - border); const int end_y = min(out_rows, (rows + border - origin_y + strides - 1) / strides); input += (partition_idx * batch + batch_group_idx * batch_per_block) * rows * cols * channels_per_partition + (origin_y * cols + origin_x) * channels_per_partition + channel_group_idx * channel_per_block; out_grad += (partition_idx * batch + batch_group_idx * batch_per_block) * out_rows * out_cols * count_per_partition; int i, j, c, x, y; float prod[channel_per_thread][filter_per_thread]; #pragma unroll for (i = 0; i < channel_per_thread; i++) #pragma unroll for (j = 0; j < filter_per_thread; j++) prod[i][j] = 0; for (c = 0; c < batch_per_block; c++) { for (y = start_y; y < end_y; y++) for (x = start_x; x < end_x; x++) { if (thidx < count_per_partition) shared_out_grad[thidx] = out_grad[(y * out_cols + x) * count_per_partition + thidx]; if (thidx < channel_per_block) shared_input[thidx] = input[((y * strides - border) * cols + x * strides - border) * channels_per_partition + thidx]; __syncthreads(); #pragma unroll for (i = 0; i < channel_per_thread; i++) #pragma unroll for (j = 0; j < filter_per_thread; j++) prod[i][j] += shared_input[i + threadIdx.y * channel_per_thread] * shared_out_grad[j + threadIdx.x * filter_per_thread]; __syncthreads(); } input += rows * cols * channels_per_partition; out_grad += out_rows * out_cols * count_per_partition; } const int cocnt = filter_cols * filter_rows * count_per_partition * partition; coeff += cocnt * (channels_per_partition * batch_group_idx + channel_group_idx * channel_per_block) + (origin_y * filter_cols + origin_x) * count_per_partition * partition + partition_idx * count_per_partition; #pragma unroll for (i = 0; i < channel_per_thread; i++) #pragma unroll for (j = 0; j < filter_per_thread; j++) coeff[(i + threadIdx.y * channel_per_thread) * cocnt + j + threadIdx.x * filter_per_thread] = prod[i][j]; } template <int channel_per_thread, int filter_per_thread, int static_filter_rows, int batch_per_block> __global__ static void _cwc_kern_convolutional_backward_propagate_coefficient_rows(const int strides, const int border, const int batch, float* input, const int rows, const int cols, const int channels, float* out_grad, const int out_rows, const int out_cols, float* coeff, const int filter_rows, const int filter_cols, const int count) { assert(gridDim.x == filter_cols); assert(gridDim.y == out_rows); assert(static_filter_rows == filter_rows); extern __shared__ float shared[]; float* shared_input = &shared[0]; float* shared_out_grad = &shared[filter_rows * channels * batch_per_block]; const int thidx = threadIdx.x + threadIdx.y * blockDim.x; const int thcnt = blockDim.x * blockDim.y; assert(blockDim.x * filter_per_thread == count); assert(blockDim.y * channel_per_thread == channels); assert(thcnt >= channels * batch_per_block); assert(thcnt >= count); const int origin_x = blockIdx.x; const int batch_group_idx = blockIdx.z; const int start_x = max(origin_x - border, 0) - (origin_x - border); const int end_x = min(out_cols, (cols + border - origin_x + strides - 1) / strides); input += (rows * cols * channels * batch_group_idx + origin_x * channels) * batch_per_block; out_grad += out_rows * out_cols * count * batch_group_idx * batch_per_block; int i, j, k, c, x; const int y = blockIdx.y; float prod[static_filter_rows][channel_per_thread][filter_per_thread]; #pragma unroll for (i = 0; i < static_filter_rows; i++) #pragma unroll for (j = 0; j < channel_per_thread; j++) #pragma unroll for (k = 0; k < filter_per_thread; k++) prod[i][j][k] = 0; const int iy = y * strides - border; input += y * strides * cols * channels * batch_per_block; out_grad += y * out_cols * count * batch_per_block; for (x = start_x; x < end_x; x++) { if (thidx < channels * batch_per_block) #pragma unroll for (i = 0; i < static_filter_rows; i++) shared_input[i * channels * batch_per_block + thidx] = (i + iy >= 0 && i + iy < rows) ? input[((i - border) * cols + x * strides - border) * channels * batch_per_block + thidx] : 0; if (thidx < count) #pragma unroll for (c = 0; c < batch_per_block; c++) shared_out_grad[c * count + thidx] = out_grad[x * count * batch_per_block + c * count + thidx]; __syncthreads(); #pragma unroll for (i = 0; i < static_filter_rows; i++) #pragma unroll for (j = 0; j < channel_per_thread; j++) #pragma unroll for (k = 0; k < filter_per_thread; k++) { float sum = 0; #pragma unroll for (c = 0; c < batch_per_block; c++) sum += shared_input[i * channels * batch_per_block + c * channels + j + threadIdx.y * channel_per_thread] * shared_out_grad[c * count + k + threadIdx.x * filter_per_thread]; prod[i][j][k] += sum; } __syncthreads(); } const int cocnt = filter_cols * filter_rows * count; coeff += cocnt * channels * (blockIdx.y + blockIdx.z * out_rows) + origin_x * count; #pragma unroll for (i = 0; i < channel_per_thread; i++) #pragma unroll for (j = 0; j < static_filter_rows; j++) #pragma unroll for (k = 0; k < filter_per_thread; k++) coeff[(i + threadIdx.y * channel_per_thread) * cocnt + j * filter_cols * count + k + threadIdx.x * filter_per_thread] = prod[j][i][k]; } template <int input_per_thread, int channel_per_thread, int channel_per_block, int strides> __global__ static void _cwc_kern_convolutional_backward_propagate_error(const int border, const int batch, float* input_grad, const int rows, const int cols, const int channels, float* out_grad, const int out_rows, const int out_cols, float* filter, const int filter_rows, const int filter_cols, const int count_per_partition, const int partition) { assert(gridDim.z == partition); extern __shared__ float shared[]; float* shared_grad = &shared[0]; float* shared_weights = &shared[batch]; float prod[input_per_thread][channel_per_thread]; assert(batch == input_per_thread * blockDim.x); assert(channel_per_block == channel_per_thread * blockDim.y); const int thidx = threadIdx.x + threadIdx.y * blockDim.x; const int thcnt = blockDim.x * blockDim.y; assert(thcnt >= batch); assert(thcnt >= channel_per_block); const int origin_x = blockIdx.x % cols; const int origin_y = blockIdx.y; const int channel_group_idx = blockIdx.z * channels / (channel_per_block * partition) + blockIdx.x / cols; int i, j, k, c, x, y; #pragma unroll for (i = 0; i < input_per_thread; i++) #pragma unroll for (j = 0; j < channel_per_thread; j++) prod[i][j] = 0; const int ycnt = (filter_rows - 1 - (origin_x + border) % strides) / strides + 1; const int xcnt = (filter_cols - 1 - (origin_y + border) % strides) / strides + 1; const int filter_y = (ycnt - 1) * strides + (origin_x + border) % strides; assert(filter_y < filter_rows); const int filter_x = (xcnt - 1) * strides + (origin_y + border) % strides; assert(filter_x < filter_cols); const int out_y = (origin_x + border) / strides - ycnt + 1; const int out_x = (origin_y + border) / strides - xcnt + 1; const int out_start_y = max(out_y, 0); const int out_start_x = max(out_x, 0); const int filter_start_y = filter_y - (out_start_y - out_y) * strides; const int filter_start_x = filter_x - (out_start_x - out_x) * strides; out_grad += (blockIdx.z * count_per_partition * out_rows * out_cols + out_start_y * out_cols + out_start_x) * batch; const int out_end_y = out_y + ycnt - 1; const int out_end_x = out_x + xcnt - 1; const int filter_end_y = (origin_x + border) % strides + (out_end_y - min(out_end_y, out_rows - 1)) * strides; const int filter_end_x = (origin_y + border) % strides + (out_end_x - min(out_end_x, out_cols - 1)) * strides; const int outcnt = out_rows * out_cols * batch; filter += channel_group_idx * channel_per_block; for (k = 0; k < count_per_partition; k++) { float* out_grad_per_filter = out_grad + k * outcnt; for (y = filter_start_y; y >= filter_end_y; y -= strides) { for (x = filter_start_x, c = 0; x >= filter_end_x; x -= strides, c++) { if (thidx < batch) shared_grad[thidx] = out_grad_per_filter[c * batch + thidx]; if (thidx < channel_per_block) shared_weights[thidx] = filter[(y * filter_cols + x) * channels + thidx]; __syncthreads(); #pragma unroll for (i = 0; i < input_per_thread; i++) #pragma unroll for (j = 0; j < channel_per_thread; j++) prod[i][j] += shared_grad[i + threadIdx.x * input_per_thread] * shared_weights[j + threadIdx.y * channel_per_thread]; __syncthreads(); } out_grad_per_filter += out_cols * batch; } filter += filter_rows * filter_cols * channels; } const int incnt = rows * cols * batch; input_grad += channel_group_idx * channel_per_block * incnt + (origin_x * cols + origin_y) * batch; #pragma unroll for (i = 0; i < channel_per_thread; i++) #pragma unroll for (j = 0; j < input_per_thread; j++) input_grad[(i + threadIdx.y * channel_per_thread) * incnt + j + threadIdx.x * input_per_thread] = prod[j][i]; } // this method rewinds a matrix template <int reorder_per_block> __global__ static void _cwc_kern_reorder_matrix_major(float* a, float* b, const int count, const int channels_per_partition, const int partition, const int batch) { assert(blockDim.x == reorder_per_block); const int batch_group_idx = blockIdx.y % (batch / reorder_per_block); const int channel_group_idx = blockIdx.y / (batch / reorder_per_block); a += (blockIdx.z * count * channels_per_partition + blockIdx.x + channel_group_idx * reorder_per_block * count) * batch + batch_group_idx * reorder_per_block; b += (blockIdx.z * count * batch + batch_group_idx * reorder_per_block * count + blockIdx.x) * channels_per_partition + channel_group_idx * reorder_per_block; __shared__ float prod[reorder_per_block][reorder_per_block]; int i; #pragma unroll for (i = 0; i < reorder_per_block; i++) prod[i][threadIdx.x] = a[i * count * batch + threadIdx.x]; __syncthreads(); #pragma unroll for (i = 0; i < reorder_per_block; i++) b[i * count * channels_per_partition + threadIdx.x] = prod[threadIdx.x][i]; } // this method rewinds a matrix __global__ static void _cwc_kern_reorder_matrix_major_parted(float* a, float* b, const int count, const int channels, const int batch, const int channels_per_partition, const int batch_per_partition, const int partition) { b[(threadIdx.x * count + blockIdx.x) * channels + blockIdx.y + threadIdx.y * channels_per_partition] = a[(blockIdx.y * count + blockIdx.x) * batch + threadIdx.x + threadIdx.y * batch_per_partition]; } // this method rewinds a matrix template <int batch_per_block> __global__ static void _cwc_kern_reorder_matrix_major_per_block_rows(float* a, float* b, const int count, const int channels, const int batch) { const int thidx = blockIdx.y * batch_per_block + threadIdx.y; b[(blockIdx.y * count + blockIdx.x) * channels * batch_per_block + threadIdx.y * channels + threadIdx.x] = a[(threadIdx.x * count + blockIdx.x) * batch + thidx]; } // this method rewinds a matrix template <int channel_per_block, int batch_per_block, int batch_group_per_block> __global__ static void _cwc_kern_reorder_matrix_major_per_block(float* a, float* b, const int count, const int channels, const int batch) { const int batch_group_idx = blockIdx.y % (batch / (batch_per_block * batch_group_per_block)); const int channel_group_idx = blockIdx.y / (batch / (batch_per_block * batch_group_per_block)); a += (channel_group_idx * channel_per_block * count + blockIdx.x) * batch + batch_group_idx * batch_per_block * batch_group_per_block; b += (batch_group_idx * batch_group_per_block * count + blockIdx.x) * channels * batch_per_block + channel_group_idx * channel_per_block; __shared__ float prod[channel_per_block][batch_per_block * batch_group_per_block]; int i, j; #pragma unroll for (i = 0; i < channel_per_block; i++) prod[i][threadIdx.x] = a[i * count * batch + threadIdx.x]; __syncthreads(); if (threadIdx.x < channel_per_block) #pragma unroll for (i = 0; i < batch_group_per_block; i++) #pragma unroll for (j = 0; j < batch_per_block; j++) b[(i * count * batch_per_block + j) * channels + threadIdx.x] = prod[threadIdx.x][i * batch_per_block + j]; } static void _cwc_convnet_reorder_matrix_major_per_block(float* a, float* b, const int count, const int channels, const int batch, const cudaStream_t& stream) { // this is by experience, ideally, this can be profile-guided too const int batch_group_count = batch / BATCH_PER_BLOCK; if (channels < 8) { assert(batch % BATCH_PER_BLOCK == 0); assert(channels * BATCH_PER_BLOCK <= 1024); _cwc_kern_reorder_matrix_major_per_block_rows <BATCH_PER_BLOCK> <<<dim3(count, batch_group_count), dim3(channels, BATCH_PER_BLOCK), 0, stream>>> (a, b, count, channels, batch); } else { assert(channels % THREAD_PER_BLOCK == 0); assert(THREAD_PER_BLOCK % BATCH_PER_BLOCK == 0); assert(batch % THREAD_PER_BLOCK == 0); _cwc_kern_reorder_matrix_major_per_block <THREAD_PER_BLOCK, BATCH_PER_BLOCK, THREAD_PER_BLOCK / BATCH_PER_BLOCK> <<<dim3(count, (channels / THREAD_PER_BLOCK) * (batch / THREAD_PER_BLOCK)), THREAD_PER_BLOCK, sizeof(float) * THREAD_PER_BLOCK * THREAD_PER_BLOCK, stream>>> (a, b, count, channels, batch); } } static int _cwc_convnet_convolutional_backward_propagate_coefficient_rows_vary(ccv_convnet_layer_t* layer, int batch, float* a, float* n, float* m, float* b, ccv_convnet_layer_t* configuration, float* scratch, float* unit, const cudaStream_t& stream, const cublasHandle_t& handle, int x, int y, int z) { if (!(layer->net.convolutional.count % y == 0 && layer->input.matrix.channels % x == 0 && layer->net.convolutional.count / y * layer->input.matrix.channels / x <= 1024 && /* thread per block constraint */ layer->net.convolutional.count / y * layer->input.matrix.channels / x >= layer->input.matrix.channels * BATCH_PER_BLOCK && layer->net.convolutional.count / y * layer->input.matrix.channels / x >= layer->net.convolutional.count && /* shared loading constraint */ sizeof(float) * BATCH_PER_BLOCK * (layer->net.convolutional.rows * layer->input.matrix.channels + layer->net.convolutional.count) <= 48 * 1024 /* shared memory size constraint */)) return -1; int out_rows, out_cols, out_partition; ccv_convnet_make_output(layer, layer->input.matrix.rows, layer->input.matrix.cols, &out_rows, &out_cols, &out_partition); assert(out_partition == 1); // this cannot handle partition float* chm = scratch; float* cha = scratch + layer->input.matrix.rows * layer->input.matrix.cols * layer->input.matrix.channels * batch; float* cbw = scratch + layer->input.matrix.rows * layer->input.matrix.cols * layer->input.matrix.channels * batch + out_rows * out_cols * layer->net.convolutional.count * batch; const int batch_group_count = batch / BATCH_PER_BLOCK; _cwc_convnet_reorder_matrix_major_per_block (m, chm, layer->input.matrix.rows * layer->input.matrix.cols, layer->input.matrix.channels, batch, stream); _cwc_convnet_reorder_matrix_major_per_block (a, cha, out_rows * out_cols, layer->net.convolutional.count, batch, stream); #define vary_block(_x, _y, _z) do { \ dim3 threads_per_block_for_coeff(layer->net.convolutional.count / _y, layer->input.matrix.channels / _x); \ assert(threads_per_block_for_coeff.x * threads_per_block_for_coeff.y <= 1024); \ dim3 num_blocks_for_coeff(layer->net.convolutional.cols, out_rows, batch_group_count); \ int shared_memory_size = sizeof(float) * BATCH_PER_BLOCK * (layer->net.convolutional.rows * layer->input.matrix.channels + layer->net.convolutional.count); \ cudaFuncSetCacheConfig(_cwc_kern_convolutional_backward_propagate_coefficient_rows<_x, _y, _z, BATCH_PER_BLOCK>, cudaFuncCachePreferShared); \ _cwc_kern_convolutional_backward_propagate_coefficient_rows \ <_x, _y, _z, BATCH_PER_BLOCK> \ <<<num_blocks_for_coeff, threads_per_block_for_coeff, shared_memory_size, stream>>> \ (layer->net.convolutional.strides, layer->net.convolutional.border, batch, \ chm, layer->input.matrix.rows, layer->input.matrix.cols, layer->input.matrix.channels, \ cha, out_rows, out_cols, \ cbw, layer->net.convolutional.rows, layer->net.convolutional.cols, layer->net.convolutional.count); \ } while (0) // special casing for image cwc_vary_4_a(x, 1, 2, 3, 4, cwc_vary_4_b, y, 1, 2, 3, 4, cwc_vary_5_c, layer->net.convolutional.rows, 3, 5, 7, 9, 11, vary_block); #undef vary_block cudaError_t error = cudaGetLastError(); if (cudaErrorInvalidConfiguration == error) return -1; assert(error == cudaSuccess); return 0; } static void _cwc_convnet_convolutional_backward_propagate_coefficient_rows(ccv_convnet_layer_t* layer, int batch, float* a, float* n, float* m, float* b, ccv_convnet_layer_t* configuration, float* scratch, float* unit, const cudaStream_t& stream, const cublasHandle_t& handle) { static int vary_x[] = { 1, 2, 3, 4 }; static int vary_y[] = { 1, 2, 3, 4 }; static int vary_z[] = { 1 }; // benchmarking requires it has no side effect CWC_IMPLEMENT_VARY_STUB(EXTRA(layer)->vary.convolutional.backward.coefficient, vary_x, vary_y, vary_z, _cwc_convnet_convolutional_backward_propagate_coefficient_rows_vary, layer, batch, a, n, m, b, configuration, scratch, unit, stream, handle); int out_rows, out_cols, out_partition; ccv_convnet_make_output(layer, layer->input.matrix.rows, layer->input.matrix.cols, &out_rows, &out_cols, &out_partition); float* cbw = scratch + layer->input.matrix.rows * layer->input.matrix.cols * layer->input.matrix.channels * batch + out_rows * out_cols * layer->net.convolutional.count * batch; int count = layer->net.convolutional.rows * layer->net.convolutional.cols * layer->net.convolutional.count * layer->input.matrix.channels; const int batch_group_count = batch / BATCH_PER_BLOCK; // this has side-effect since it is accumulation cublasSgemv(handle, CUBLAS_OP_N, count, out_rows * batch_group_count, &one, cbw, count, unit, 1, &one, configuration->w, 1); } static int _cwc_convnet_convolutional_backward_propagate_coefficient_default_vary(ccv_convnet_layer_t* layer, int batch, float* a, float* n, float* m, float* b, ccv_convnet_layer_t* configuration, float* scratch, float* unit, const cudaStream_t& stream, const cublasHandle_t& handle, int x, int y, int z) { int out_rows, out_cols, out_partition; ccv_convnet_make_output(layer, layer->input.matrix.rows, layer->input.matrix.cols, &out_rows, &out_cols, &out_partition); if (!(layer->net.convolutional.count % (y * out_partition) == 0 && z % x == 0 && layer->net.convolutional.channels % (z * out_partition) == 0 && layer->net.convolutional.count / (y * out_partition) * z / x <= 1024 && /* thread per block constraint */ layer->net.convolutional.count / (y * out_partition) * z / x >= z && layer->net.convolutional.count / (y * out_partition) * z / x >= layer->net.convolutional.count / out_partition && /* shared loading constraint */ sizeof(float) * (z + layer->net.convolutional.count / out_partition) <= 32 * 1024 /* shared memory size constraint */)) return -1; float* chm = scratch; float* cha = scratch + layer->input.matrix.rows * layer->input.matrix.cols * layer->input.matrix.channels * batch; float* cbw = scratch + layer->input.matrix.rows * layer->input.matrix.cols * layer->input.matrix.channels * batch + out_rows * out_cols * layer->net.convolutional.count * batch; const int batch_group_count = batch / BATCH_PER_BLOCK; assert((layer->input.matrix.channels / out_partition) % THREAD_PER_BLOCK == 0); assert((layer->net.convolutional.count / out_partition) % THREAD_PER_BLOCK == 0); assert(batch % THREAD_PER_BLOCK == 0); _cwc_kern_reorder_matrix_major <THREAD_PER_BLOCK> <<<dim3(layer->input.matrix.rows * layer->input.matrix.cols, (layer->input.matrix.channels / out_partition / THREAD_PER_BLOCK) * (batch / THREAD_PER_BLOCK), out_partition), THREAD_PER_BLOCK, sizeof(float) * THREAD_PER_BLOCK * THREAD_PER_BLOCK, stream>>> (m, chm, layer->input.matrix.rows * layer->input.matrix.cols, layer->input.matrix.channels / out_partition, out_partition, batch); _cwc_kern_reorder_matrix_major <THREAD_PER_BLOCK> <<<dim3(out_rows * out_cols, (layer->net.convolutional.count / out_partition / THREAD_PER_BLOCK) * (batch / THREAD_PER_BLOCK), out_partition), THREAD_PER_BLOCK, sizeof(float) * THREAD_PER_BLOCK * THREAD_PER_BLOCK, stream>>> (a, cha, out_rows * out_cols, layer->net.convolutional.count / out_partition, out_partition, batch); #define vary_block(_x, _y, _z) do { \ dim3 threads_per_block_for_coeff(layer->net.convolutional.count / (_y * out_partition), _z / _x); \ assert(threads_per_block_for_coeff.x * threads_per_block_for_coeff.y <= 1024); \ dim3 num_blocks_for_coeff(layer->net.convolutional.cols, layer->net.convolutional.rows, layer->net.convolutional.channels / _z * batch_group_count); \ int shared_memory_size = sizeof(float) * (_z + layer->net.convolutional.count / out_partition); \ _cwc_kern_convolutional_backward_propagate_coefficient_default \ <_x, _y, _z, BATCH_PER_BLOCK> \ <<<num_blocks_for_coeff, threads_per_block_for_coeff, shared_memory_size, stream>>> \ (layer->net.convolutional.strides, layer->net.convolutional.border, batch, batch_group_count, \ chm, layer->input.matrix.rows, layer->input.matrix.cols, layer->input.matrix.channels / out_partition, out_partition, \ cha, out_rows, out_cols, \ cbw, layer->net.convolutional.rows, layer->net.convolutional.cols, layer->net.convolutional.count / out_partition); \ } while (0) cwc_vary_6_a(x, 1, 2, 3, 4, 6, 8, cwc_vary_6_b, y, 1, 2, 3, 4, 6, 8, cwc_vary_4_c, z, 16, 24, 32, 36, vary_block); #undef vary_block cudaError_t error = cudaGetLastError(); if (cudaErrorInvalidConfiguration == error) return -1; assert(error == cudaSuccess); return 0; } static void _cwc_convnet_convolutional_backward_propagate_coefficient_default(ccv_convnet_layer_t* layer, int batch, float* a, float* n, float* m, float* b, ccv_convnet_layer_t* configuration, float* scratch, float* unit, const cudaStream_t& stream, const cublasHandle_t& handle) { static int vary_x[] = { 1, 2, 3, 4, 6, 8 }; static int vary_y[] = { 1, 2, 3, 4, 6, 8 }; static int vary_z[] = { 16, 24, 32, 36 }; // benchmarking requires it has no side effect CWC_IMPLEMENT_VARY_STUB(EXTRA(layer)->vary.convolutional.backward.coefficient, vary_x, vary_y, vary_z, _cwc_convnet_convolutional_backward_propagate_coefficient_default_vary, layer, batch, a, n, m, b, configuration, scratch, unit, stream, handle); int out_rows, out_cols, out_partition; ccv_convnet_make_output(layer, layer->input.matrix.rows, layer->input.matrix.cols, &out_rows, &out_cols, &out_partition); float* cbw = scratch + layer->input.matrix.rows * layer->input.matrix.cols * layer->input.matrix.channels * batch + out_rows * out_cols * layer->net.convolutional.count * batch; int count = layer->net.convolutional.rows * layer->net.convolutional.cols * layer->net.convolutional.count * layer->input.matrix.channels / out_partition; const int batch_group_count = batch / BATCH_PER_BLOCK; // this has side-effect since it is accumulation cublasSgemv(handle, CUBLAS_OP_N, count, batch_group_count, &one, cbw, count, unit, 1, &one, configuration->w, 1); } static int _cwc_convnet_convolutional_backward_propagate_error_vary(ccv_convnet_layer_t* layer, int batch, float* a, float* n, float* m, float* b, ccv_convnet_layer_t* configuration, float* scratch, float* unit, const cudaStream_t& stream, const cublasHandle_t& handle, int x, int y, int z) { int out_rows, out_cols, out_partition; ccv_convnet_make_output(layer, layer->input.matrix.rows, layer->input.matrix.cols, &out_rows, &out_cols, &out_partition); if (!(batch % x == 0 && z % y == 0 && layer->input.matrix.channels % (z * out_partition) == 0 && batch / x * z / y <= 1024 && /* thread per block constraint */ batch / x * z / y >= batch && batch / x * z / y >= z && /* shared memory loading constraint */ sizeof(float) * (batch + z) <= 48 * 1024 /* shared memory size constraint */)) return -1; float* chw = scratch; _cwc_kern_reorder_matrix_major_parted <<<dim3(layer->net.convolutional.rows * layer->net.convolutional.cols, layer->input.matrix.channels / out_partition), dim3(layer->net.convolutional.count / out_partition, out_partition), 0, stream>>> (layer->w, chw, layer->net.convolutional.rows * layer->net.convolutional.cols, layer->input.matrix.channels, layer->net.convolutional.count, layer->input.matrix.channels / out_partition, layer->net.convolutional.count / out_partition, out_partition); #define vary_block(_x, _y, _z, _s) do { \ dim3 threads_per_block(batch / _x, _z / _y); \ assert(threads_per_block.x * threads_per_block.y <= 1024); \ dim3 num_blocks(layer->input.matrix.cols * layer->input.matrix.channels / (_z * out_partition), layer->input.matrix.rows, out_partition); \ int shared_memory_size = sizeof(float) * (batch + _z); \ cudaFuncSetCacheConfig(_cwc_kern_convolutional_backward_propagate_error<_x, _y, _z, _s>, cudaFuncCachePreferShared); \ _cwc_kern_convolutional_backward_propagate_error \ <_x, _y, _z, _s> \ <<<num_blocks, threads_per_block, shared_memory_size, stream>>> \ (layer->net.convolutional.border, batch, \ b, layer->input.matrix.rows, layer->input.matrix.cols, layer->input.matrix.channels, \ a, out_rows, out_cols, \ chw, layer->net.convolutional.rows, layer->net.convolutional.cols, layer->net.convolutional.count / out_partition, out_partition); \ } while (0) cwc_vary_4_a(x, 1, 2, 4, 8, cwc_vary_5_b, y, 1, 2, 4, 6, 8, cwc_vary_6_c, z, 16, 24, 32, 36, 64, 72, cwc_vary_4_d, layer->net.convolutional.strides, 1, 2, 3, 4, vary_block); #undef vary_block cudaError_t error = cudaGetLastError(); if (cudaErrorInvalidConfiguration == error) return -1; assert(error == cudaSuccess); return 0; } static void _cwc_convnet_convolutional_backward_propagate_error(ccv_convnet_layer_t* layer, int batch, float* a, float* n, float* m, float* b, ccv_convnet_layer_t* configuration, float* scratch, float* unit, const cudaStream_t& stream, const cublasHandle_t& handle) { static int vary_x[] = { 1, 2, 4, 8 }; static int vary_y[] = { 1, 2, 4, 6, 8 }; static int vary_z[] = { 16, 24, 32, 36, 64, 72 }; CWC_IMPLEMENT_VARY_STUB(EXTRA(layer)->vary.convolutional.backward.gradient, vary_x, vary_y, vary_z, _cwc_convnet_convolutional_backward_propagate_error_vary, layer, batch, a, n, m, b, configuration, scratch, unit, stream, handle); } void cwc_convnet_convolutional_backward_propagate(ccv_convnet_layer_t* layer, int batch, float* a, float* n, float* m, float* b, ccv_convnet_layer_t* configuration, float* scratch, float* unit, const cudaStream_t& stream, const cublasHandle_t& handle) { assert(layer->net.convolutional.count % 4 == 0); assert(batch % BATCH_PER_BLOCK == 0); int out_rows, out_cols, out_partition; ccv_convnet_make_output(layer, layer->input.matrix.rows, layer->input.matrix.cols, &out_rows, &out_cols, &out_partition); // it turns out that first apply relu would save us a lot of computation because no need to low both out and out_grad any more cwc_kern_relu_backward_propagate <<<dim3(out_cols, out_rows, layer->net.convolutional.count), batch, 0, stream>>> (batch, n, a, out_rows, out_cols, layer->net.convolutional.count); assert(cudaGetLastError() == cudaSuccess); if (cwc_convnet_layer_use_rows(layer)) _cwc_convnet_convolutional_backward_propagate_coefficient_rows(layer, batch, a, n, m, b, configuration, scratch, unit, stream, handle); else _cwc_convnet_convolutional_backward_propagate_coefficient_default(layer, batch, a, n, m, b, configuration, scratch, unit, stream, handle); // compute the bias directly using gemv routine cublasSgemv(handle, CUBLAS_OP_T, out_rows * out_cols * batch, layer->net.convolutional.count, &one, a, out_rows * out_cols * batch, unit, 1, &one, configuration->bias, 1); assert(cudaGetLastError() == cudaSuccess); if (b) _cwc_convnet_convolutional_backward_propagate_error(layer, batch, a, n, m, b, configuration, scratch, unit, stream, handle); }
the_stack
#include "CUFLU.h" #if ( MODEL == HYDRO && \ ( RSOLVER == EXACT || CHECK_INTERMEDIATE == EXACT ) && \ ( FLU_SCHEME == MHM || FLU_SCHEME == MHM_RP || FLU_SCHEME == CTU ) ) // external functions #ifdef __CUDACC__ #include "CUFLU_Shared_FluUtility.cu" #else // #ifdef __CUDACC__ void Hydro_Con2Pri( const real In[], real Out[], const real MinPres, const bool FracPassive, const int NFrac, const int FracIdx[], const bool JeansMinPres, const real JeansMinPres_Coeff, const EoS_DE2P_t EoS_DensEint2Pres, const EoS_DP2E_t EoS_DensPres2Eint, const double EoS_AuxArray_Flt[], const int EoS_AuxArray_Int[], const real *const EoS_Table[EOS_NTABLE_MAX], real* const EintOut ); void Hydro_Rotate3D( real InOut[], const int XYZ, const bool Forward, const int Mag_Offset ); #endif // #ifdef __CUDACC__ ... else ... // internal functions (GPU_DEVICE is defined in CUFLU.h) GPU_DEVICE static real Solve_f( const real rho,const real p,const real p_star,const real Gamma ); #if ( FLU_SCHEME == MHM || FLU_SCHEME == MHM_RP || FLU_SCHEME == CTU ) GPU_DEVICE static void Set_Flux( real flux[], const real val[], const real Gamma ); #endif //------------------------------------------------------------------------------------------------------- // Function : Hydro_RiemmanSolver_Exact // Description : Exact Riemann solver // // Note : 1. Input data should be primitive variables // 2. This function is shared by MHM, MHM_RP, and CTU schemes // 3. Currently it does NOT check the minimum density and pressure criteria // // Parameter : XYZ : Target spatial direction : (0/1/2) --> (x/y/z) // Flux_Out : Output array to store the average flux along t axis // L/R_In : Input left/right states (conserved variables) // MinDens/Pres : Density and pressure floors // EoS_DensEint2Pres : EoS routine to compute the gas pressure // EoS_DensPres2CSqr : EoS routine to compute the sound speed squared // EoS_AuxArray_* : Auxiliary arrays for the EoS routines // EoS_Table : EoS tables //------------------------------------------------------------------------------------------------------ GPU_DEVICE void Hydro_RiemannSolver_Exact( const int XYZ, real Flux_Out[], const real L_In[], const real R_In[], const real MinDens, const real MinPres, const EoS_DE2P_t EoS_DensEint2Pres, const EoS_DP2C_t EoS_DensPres2CSqr, const double EoS_AuxArray_Flt[], const int EoS_AuxArray_Int[], const real* const EoS_Table[EOS_NTABLE_MAX] ) { // check # ifdef GAMER_DEBUG # if ( EOS != EOS_GAMMA ) printf( "ERROR : EOS != EOS_GAMMA is NOT supported at file <%s>, line <%d>, function <%s> !!\n", __FILE__, __LINE__, __FUNCTION__ ); # endif # ifdef MHD printf( "ERROR : MHD is NOT supported at file <%s>, line <%d>, function <%s> !!\n", __FILE__, __LINE__, __FUNCTION__ ); # endif # endif // #ifdef GAMER_DEBUG const real Gamma = EoS_AuxArray_Flt[0]; // only support constant-gamma EoS (i.e., EOS_GAMMA) const real Gamma_m1 = EoS_AuxArray_Flt[1]; const real Gamma_p1 = Gamma + (real)1.0; const real c = Gamma_m1 / Gamma_p1; const bool FracPassive_No = false; // no need to convert passive scalars to mass fraction const bool JeansMinPres_No = false; real eival[5], L_star[5], R_star[5]; real L[NCOMP_TOTAL], R[NCOMP_TOTAL], Temp; // convert conserved variables to primitive variables Hydro_Con2Pri( L_In, L, MinPres, FracPassive_No, NULL_INT, NULL, JeansMinPres_No, NULL_REAL, EoS_DensEint2Pres, NULL, EoS_AuxArray_Flt, EoS_AuxArray_Int, EoS_Table, NULL ); Hydro_Con2Pri( R_In, R, MinPres, FracPassive_No, NULL_INT, NULL, JeansMinPres_No, NULL_REAL, EoS_DensEint2Pres, NULL, EoS_AuxArray_Flt, EoS_AuxArray_Int, EoS_Table, NULL ); // reorder the input variables for different spatial directions Hydro_Rotate3D( L, XYZ, true, MAG_OFFSET ); Hydro_Rotate3D( R, XYZ, true, MAG_OFFSET ); // solution of the tangential velocity L_star[2] = L[2]; L_star[3] = L[3]; R_star[2] = R[2]; R_star[3] = R[3]; // solution of pressure { const real du = R[1] - L[1]; real f; real f_L; real f_R; real bound[2]; real compare[2]; bound[0] = FMIN( L[4], R[4] ); bound[1] = FMAX( L[4], R[4] ); for (int i=0; i<2; i++) { f_L = Solve_f( L[0], L[4], bound[i], Gamma ); f_R = Solve_f( R[0], R[4], bound[i], Gamma ); compare[i] = f_L + f_R + du; } if( compare[0]*compare[1] > (real)0.0 ) { if( compare[0] > (real)0.0 ) { bound[1] = bound[0]; bound[0] = (real)0.0; } else if( compare[1] < (real)0.0 ) { bool Continue; bound[0] = bound[1]; bound[1] = (real)2.0*bound[0]; do { for (int i=0; i<2; i++) { f_L = Solve_f( L[0], L[4], bound[i], Gamma ); f_R = Solve_f( R[0], R[4], bound[i], Gamma ); compare[i] = f_L + f_R + du; } Continue = ( compare[0]*compare[1] > (real)0.0 ); if ( Continue ) { bound[0] = bound[1]; bound[1] = bound[0]*(real)2.0; } } while ( Continue ); } } // search p_star do { L_star[4] = (real)0.5 * ( bound[0] + bound[1] ); if ( ( L_star[4] == bound[0] ) || ( L_star[4] == bound[1] ) ) break; else { f_L = Solve_f( L[0], L[4], L_star[4], Gamma ); f_R = Solve_f( R[0], R[4], L_star[4], Gamma ); f = f_L + f_R + du; if ( f > (real)0.0 ) bound[1] = L_star[4]; else bound[0] = L_star[4]; } } while ( FABS(f) >= MAX_ERROR ); } R_star[4] = L_star[4]; // solution normal velocity { real f_L = Solve_f( L[0], L[4], L_star[4], Gamma ); real f_R = Solve_f( R[0], R[4], R_star[4], Gamma ); L_star[1] = (real)0.5*( L[1] + R[1] ) + (real)0.5*( f_R - f_L ); } R_star[1] = L_star[1]; // left complete solution if ( L_star[4] > L[4] ) // left shock { real r = L_star[4] / L[4]; L_star[0] = L[0]*( ( r + c ) / ( c*r + (real)1.0 ) ); // solution of density } else // left rarefaction { L_star[0] = L[0]*POW( L_star[4]/L[4], (real)1.0/Gamma ); // solution of density # ifdef CHECK_NEGATIVE_IN_FLUID if ( Hydro_CheckNegative(L[4]) ) printf( "ERROR : invalid pressure (%14.7e) at file <%s>, line <%d>, function <%s>\n", L[4], __FILE__, __LINE__, __FUNCTION__ ); if ( Hydro_CheckNegative(L[0]) ) printf( "ERROR : invalid density (%14.7e) at file <%s>, line <%d>, function <%s>\n", L[0], __FILE__, __LINE__, __FUNCTION__ ); if ( Hydro_CheckNegative(L_star[4]) ) printf( "ERROR : invalid pressure (%14.7e) at file <%s>, line <%d>, function <%s>\n", L_star[4], __FILE__, __LINE__, __FUNCTION__ ); if ( Hydro_CheckNegative(L_star[0]) ) printf( "ERROR : invalid density(%14.7e) at file <%s>, line <%d>, function <%s>\n", L_star[0], __FILE__, __LINE__, __FUNCTION__ ); # endif real a_L = SQRT( Gamma * L[4] / L[0] ); // sound speed of left region real a_star = SQRT( Gamma * L_star[4] / L_star[0] ); // sound speed of left star region if ( L[1] < a_L && L_star[1] > a_star ) // sonic rarefaction { L_star[0] = L[0]*POW( (real)2.0/Gamma_p1 + c*L[1]/a_L, (real)2.0/Gamma_m1 ); L_star[1] = (real)2.0*( a_L + (real)0.5*Gamma_m1*L[1] ) / Gamma_p1; L_star[4] = L[4]*POW( (real)2.0/Gamma_p1 + c*L[1]/a_L, (real)2.0*Gamma/Gamma_m1 ); } } // right complete solution if ( R_star[4] > R[4] ) // right shock { real r = R_star[4] / R[4]; R_star[0] = R[0]*( ( r + c ) / ( c * r + (real)1.0 ) ); // solution of density } else // right rarefaction { R_star[0] = R[0]*POW( R_star[4]/R[4], (real)1.0/Gamma ); // solution of density # ifdef CHECK_NEGATIVE_IN_FLUID if ( Hydro_CheckNegative(R[4]) ) printf( "ERROR : invalid pressure (%14.7e) at file <%s>, line <%d>, function <%s>\n", R[4], __FILE__, __LINE__, __FUNCTION__ ); if ( Hydro_CheckNegative(R[0]) ) printf( "ERROR : invalid density (%14.7e) at file <%s>, line <%d>, function <%s>\n", R[0], __FILE__, __LINE__, __FUNCTION__ ); if ( Hydro_CheckNegative(R_star[4]) ) printf( "ERROR : invalid pressure (%14.7e) at file <%s>, line <%d>, function <%s>\n", R_star[4], __FILE__, __LINE__, __FUNCTION__ ); if ( Hydro_CheckNegative(R_star[0]) ) printf( "ERROR : invalid density(%14.7e) at file <%s>, line <%d>, function <%s>\n", R_star[0], __FILE__, __LINE__, __FUNCTION__ ); # endif real a_R = SQRT( Gamma * R[4] / R[0] ); // sound speed of right region real a_star = SQRT( Gamma * R_star[4] / R_star[0] ); // sound speed of right star region if ( R[1] > -a_R && R_star[1] < -a_star ) // sonic rarefaction { R_star[0] = R[0]*POW ( (real)2.0/Gamma_p1 - c*R[1]/a_R, (real)2.0/Gamma_m1 ); R_star[1] = (real)2.0*( -a_R + (real)0.5*Gamma_m1*R[1] ) / Gamma_p1; R_star[4] = R[4]*POW ( (real)2.0/Gamma_p1 - c*R[1]/a_R, (real)2.0*Gamma/Gamma_m1 ); } } // solve speed of waves eival[1] = L_star[1]; eival[2] = L_star[1]; eival[3] = L_star[1]; # ifdef CHECK_NEGATIVE_IN_FLUID if ( Hydro_CheckNegative( R[4]) ) printf( "ERROR : invalid pressure (%14.7e) at file <%s>, line <%d>, function <%s>\n", R[4], __FILE__, __LINE__, __FUNCTION__ ); if ( Hydro_CheckNegative(R[0]) ) printf( "ERROR : invalid density (%14.7e) at file <%s>, line <%d>, function <%s>\n", R[0], __FILE__, __LINE__, __FUNCTION__ ); if ( Hydro_CheckNegative(L[4]) ) printf( "ERROR : invalid pressure (%14.7e) at file <%s>, line <%d>, function <%s>\n", L[4], __FILE__, __LINE__, __FUNCTION__ ); if ( Hydro_CheckNegative(L[0]) ) printf( "ERROR : invalid density (%14.7e) at file <%s>, line <%d>, function <%s>\n", L[0], __FILE__, __LINE__, __FUNCTION__ ); # endif if ( L[4] < L_star[4] ) // left shock { Temp = (real)0.5/Gamma*( Gamma_p1*L_star[4]/L[4] + Gamma_m1 ); # ifdef CHECK_NEGATIVE_IN_FLUID if ( Hydro_CheckNegative(Temp) ) printf( "ERROR : invalid value (%14.7e) at file <%s>, line <%d>, function <%s>\n", Temp, __FILE__, __LINE__, __FUNCTION__ ); # endif eival[0] = L[1] - SQRT( Gamma*L[4]/L[0] )*SQRT( Temp ); } else // left rarefaction eival[0] = L[1] - SQRT ( Gamma*L[4]/L[0] ); if ( R[4] < R_star[4] ) // right shock { Temp = (real)0.5/Gamma*( Gamma_p1*R_star[4]/R[4] + Gamma_m1 ); # ifdef CHECK_NEGATIVE_IN_FLUID if ( Hydro_CheckNegative(Temp) ) printf( "ERROR : invalid value (%14.7e) at file <%s>, line <%d>, function <%s>\n", Temp, __FILE__, __LINE__, __FUNCTION__ ); # endif eival[4] = R[1] + SQRT( Gamma*R[4]/R[0] )*SQRT( Temp ); } else // right rarefaction eival[4] = R[1] + SQRT ( Gamma*R[4]/R[0] ); // evaluate the average fluxes along the t axis if ( FABS( eival[1] ) < MAX_ERROR ) // contact wave is zero { Flux_Out[0] = (real)0.0; Flux_Out[1] = L_star[4]; Flux_Out[2] = (real)0.0; Flux_Out[3] = (real)0.0; Flux_Out[4] = (real)0.0; } else { if ( eival[1] > (real)0.0 ) { if ( eival[0] > (real)0.0 ) Set_Flux( Flux_Out, L, Gamma ); else Set_Flux( Flux_Out, L_star, Gamma ); } else { if ( eival[4] < (real)0.0 ) Set_Flux( Flux_Out, R, Gamma ); else Set_Flux( Flux_Out, R_star, Gamma ); } } // evaluate the fluxes for passive scalars // --> note that L_In and R_In are mass density instead of mass fraction for passive scalars # if ( NCOMP_PASSIVE > 0 ) if ( Flux_Out[FLUX_DENS] >= (real)0.0 ) { const real vx = Flux_Out[FLUX_DENS] / L_In[DENS]; for (int v=NCOMP_FLUID; v<NCOMP_TOTAL; v++) Flux_Out[v] = L_In[v]*vx; } else { const real vx = Flux_Out[FLUX_DENS] / R_In[DENS]; for (int v=NCOMP_FLUID; v<NCOMP_TOTAL; v++) Flux_Out[v] = R_In[v]*vx; } # endif // restore the correct order Hydro_Rotate3D( Flux_Out, XYZ, false, MAG_OFFSET ); } // FUNCTION : Hydro_RiemannSolve_Exact //------------------------------------------------------------------------------------------------------- // Function : Solve_f // Description : Solve the parameter f in Godunov's method // // paremater : rho : Density // p : Pressure // p_star : Pressure in star region // Gamma : Ratio of specific heats //------------------------------------------------------------------------------------------------------- GPU_DEVICE real Solve_f( const real rho, const real p, const real p_star, const real Gamma ) { const real Gamma_m1 = Gamma - (real)1.0; const real Gamma_p1 = Gamma + (real)1.0; real f, Temp; if ( p_star > p ) { real A = (real)2.0/( rho*Gamma_p1 ); real B = p*Gamma_m1/Gamma_p1; Temp = A/(p_star+B); # ifdef CHECK_NEGATIVE_IN_FLUID if ( Hydro_CheckNegative(Temp) ) printf( "ERROR : invalid value (%14.7e) at file <%s>, line <%d>, function <%s>\n", Temp, __FILE__, __LINE__, __FUNCTION__ ); # endif f = (p_star-p)*SQRT( Temp ); } else { # ifdef CHECK_NEGATIVE_IN_FLUID if ( Hydro_CheckNegative(p) ) printf( "ERROR : invalid pressure (%14.7e) at file <%s>, line <%d>, function <%s>\n", p, __FILE__, __LINE__, __FUNCTION__ ); if ( Hydro_CheckNegative(rho) ) printf( "ERROR : invalid density (%14.7e) at file <%s>, line <%d>, function <%s>\n", rho, __FILE__, __LINE__, __FUNCTION__ ); # endif real a = SQRT( Gamma*p/rho ); real c = p_star/p; f = (real)2.0*a*( POW( c, (real)0.5*Gamma_m1/Gamma ) - (real)1.0 ) / Gamma_m1; } return f; } // FUNCTION : Solve_f #if ( FLU_SCHEME == MHM || FLU_SCHEME == MHM_RP || FLU_SCHEME == CTU ) //------------------------------------------------------------------------------------------------------- // Function : Set_Flux // Description : Set the flux function evaluated at the given state // // Parameter : flux : Output flux // val : Input primitive variables // Gamma : Ratio of specific heats //------------------------------------------------------------------------------------------------------- GPU_DEVICE void Set_Flux( real flux[], const real val[], const real Gamma ) { const real Gamma_m1 = Gamma -(real)1.0; // set flux flux[0] = val[0]*val[1]; flux[1] = val[0]*val[1]*val[1] + val[4]; flux[2] = val[0]*val[1]*val[2]; flux[3] = val[0]*val[1]*val[3]; flux[4] = val[1]*( (real)0.5*val[0]*( val[1]*val[1] + val[2]*val[2] + val[3]*val[3] ) + val[4]/Gamma_m1 + val[4] ); } // FUNCTION : Set_Flux #endif // #if ( FLU_SCHEME == MHM || FLU_SCHEME == MHM_RP || FLU_SCHEME == CTU ) #endif // #if ( MODEL == HYDRO && ( RSOLVER == EXACT || CHECK_INTE == EXACT ) && ( SCHEME == MHM/MHM_RP/CTU ) ) #endif // #ifndef __CUFLU_RIEMANNSOLVER_EXACT__
the_stack
using namespace std; #ifndef MAP_FILE #define MAP_FILE MAP_SHARED #endif long fsize(int fd) { struct stat stat; int res = fstat(fd, &stat); return stat.st_size; } int printll(char *s) { while (*s != '\n' && *s != ',' && *s != '\t') { putchar(*s++); } return 0; } long hash(char *str0, int len) { unsigned char *str = (unsigned char *)str0; unsigned long hash = 5381; int c; while ((c = *str++) && len--) hash = ((hash << 5) + hash) + c; /* hash * 33 + c */ return hash; } long HEAP_SIZE_CPU = 1073741826; // 1048576; // 536870912; // 268435456; // 2097152; 1610612739; // 4294967304; // void *mallocBase = calloc(HEAP_SIZE_CPU, 1); void *mallocAddr = mallocBase; void *waterMark = mallocBase; void *myMalloc(size_t bytes) { void *res = mallocAddr; mallocAddr = (void *)((char *)mallocAddr + bytes); if ((long)mallocAddr >= (long)mallocBase + HEAP_SIZE_CPU) fprintf(stderr, "CPU memory breached limit of HEAP_SIZE_CPU\n"); return res; } long HEAP_SIZE = 8589934608; // 4294967304; // this is for GPU int timeval_subtract(struct timeval *result, struct timeval *t2, struct timeval *t1) { long int diff = (t2->tv_usec + 1000000 * t2->tv_sec) - (t1->tv_usec + 1000000 * t1->tv_sec); result->tv_sec = diff / 1000000; result->tv_usec = diff % 1000000; return (diff < 0); } #define CUDA_CALL(f) { \ cudaError_t err = (f); \ if (err != cudaSuccess) { \ fprintf(stderr, "CUDA error occurred: %s (%s:%d)\n", \ cudaGetErrorString(err), __FILE__, __LINE__); \ exit(err); \ } \ } #define CUBLAS_CALL(f) { \ cublasStatus_t stat = (f); \ if (stat != CUBLAS_STATUS_SUCCESS) { \ fprintf(stderr, "cuBLAS error occurred: %d (%s:%d)\n", \ stat, __FILE__, __LINE__); \ exit(stat); \ } \ } void *gpuMallocBase; void *gpuMallocAddr; // Alignment boundary size, in bytes. constexpr int N = 4; // 16 void *myGpuMalloc(size_t bytes) { bytes = ((bytes + (1 << N) - 1) >> N) << N; void *res = gpuMallocAddr; gpuMallocAddr = (void *)((char *)gpuMallocAddr + bytes); if ((long)gpuMallocAddr >= (long)gpuMallocBase + HEAP_SIZE) fprintf(stderr, "GPU breached memory limit of HEAP_SIZE\n"); return res; } template <typename T> __global__ void arrayUpdate(T *data, int index, T value) { data[index] = value; } __global__ void arrayFill(float* data, float value, int size) { int stride = gridDim.x * blockDim.x; int tid = threadIdx.x + blockIdx.x * blockDim.x; for (int i = tid; i < size; i += stride) data[i] = value; } __global__ void hardTanh(float* in, float* out, float min_val, float max_val, int size) { int tid = threadIdx.x + blockIdx.x * blockDim.x; int stride = gridDim.x * blockDim.x; for (int i = tid; i < size; i += stride) { out[i] = in[i] < min_val ? min_val : (in[i] > max_val ? max_val : in[i]); } } __global__ void hardTanh_grad(float* in_x, float* in_d, float* out_d, float min_val, float max_val, int size, bool inplace) { int tid = threadIdx.x + blockIdx.x * blockDim.x; int stride = gridDim.x * blockDim.x; for (int i = tid; i < size; i += stride) { if (inplace) { if (in_x[i] < min_val || in_x[i] > max_val) in_d[i] = 0; } else { if (in_x[i] >= min_val && in_x[i] <= max_val) in_d[i] += out_d[i]; } } } __global__ void nllLoss(float *x, int x_stride, float *y, int* target) { int tid = threadIdx.x + blockIdx.x * blockDim.x; int offset = tid * x_stride + target[tid]; y[tid] = -1 * x[offset]; } __global__ void nllLoss_grad(int x_stride, float *yGrad, int* target, float* xGrad) { int tid = threadIdx.x + blockIdx.x * blockDim.x; int offset = tid * x_stride + target[tid]; xGrad[offset] += -1 * yGrad[tid]; } // only for 4D tensor in and 3D tensor out __global__ void sum_grad(float* in, int inSize0, int inSize1, int inSize2, int inSize3, int nElement, float* out, int outStride0, int outStride1, int outStride2, int dim) { int tid = blockIdx.x * blockDim.x + threadIdx.x; int stride = gridDim.x * blockDim.x; for (int i = tid; i < nElement; i += stride) { int inOff2 = i / inSize3; int inDim3 = i - inOff2 * inSize3; int inOff1 = inOff2 / inSize2; int inDim2 = inOff2 - inOff1 * inSize2; int inDim0 = inOff1 / inSize1; int inDim1 = inOff1 - inDim0 * inSize1; int outOff = 0; if (dim == 0) outOff = inDim1 * outStride0 + inDim2 * outStride1 + inDim3 * outStride2; if (dim == 1) outOff = inDim0 * outStride0 + inDim2 * outStride1 + inDim3 * outStride2; if (dim == 2) outOff = inDim0 * outStride0 + inDim1 * outStride1 + inDim3 * outStride2; if (dim == 3) outOff = inDim0 * outStride0 + inDim1 * outStride1 + inDim2 * outStride2; in[i] += out[outOff]; } } //following - https://github.com/torch/cutorch/blob/master/lib/THC/THCTensorMath.cuh#L49 static inline __device__ int compute(int outputSize0, int outputSize1, int outputSize2, int outputSize3, int outputStride0, int outputStride1, int outputStride2, int outputStride3, const int dimSize, const int concatDim, int linearIndex) { int offset = 0; int curDimSize = 3 == concatDim ? dimSize : outputSize3; int nextDimIndex = linearIndex / curDimSize; int curDimIndex = linearIndex - curDimSize * nextDimIndex; int curDimOffset = curDimIndex * outputStride3; offset += curDimOffset; linearIndex = nextDimIndex; curDimSize = 2 == concatDim ? dimSize : outputSize2; nextDimIndex = linearIndex / curDimSize; curDimIndex = linearIndex - curDimSize * nextDimIndex; curDimOffset = curDimIndex * outputStride2; offset += curDimOffset; linearIndex = nextDimIndex; curDimSize = 1 == concatDim ? dimSize : outputSize1; nextDimIndex = linearIndex / curDimSize; curDimIndex = linearIndex - curDimSize * nextDimIndex; curDimOffset = curDimIndex * outputStride1; offset += curDimOffset; linearIndex = nextDimIndex; return offset + linearIndex * outputStride0; // for (int i = 3; i >= 1; i--) { // int curDimSize = i == concatDim ? dimSize : outputSize[i]; // int nextDimIndex = linearIndex / curDimSize; // int curDimIndex = linearIndex - curDimSize * nextDimIndex; // int curDimOffset = curDimIndex * outputStride[i]; // offset += curDimOffset; // linearIndex = nextDimIndex; // } // return offset + linearIndex * outputStride[0]; } // TODO: Only for Dim of rank 4, and only for 2 inputs, and only for concat at dim = 1 __global__ void concat2D_1D_greg(float* in1, int dimSize1, int nElement1, float* in2, int dimSize2, int nElement2, float* out, int concatDim, int outSize0, int outSize1, int outSize2, int outSize3, int outStride0, int outStride1, int outStride2, int outStride3) { int tid = blockIdx.x * blockDim.x + threadIdx.x; int nElement = blockIdx.y == 0 ? nElement1 : nElement2; if (tid >= nElement) return; float* data = blockIdx.y == 0 ? in1 : in2; int offset = blockIdx.y == 0 ? 0 : dimSize1; int dimSize = blockIdx.y == 0 ? dimSize1 : dimSize2; int dataOffset = offset * outStride1; int stride = gridDim.x * blockDim.x; while (tid < nElement) { int elementOffset = compute(outSize0, outSize1, outSize2, outSize3, outStride0, outStride1, outStride2, outStride3, dimSize, concatDim, tid); out[dataOffset + elementOffset] = data[tid]; tid += stride; } } // TODO: Only for Dim of rank 4, and only for 2 inputs, and only for concat at dim = 1 __global__ void concat2D_1D_greg_grad(float* in1, int dimSize1, int nElement1, float* in2, int dimSize2, int nElement2, float* out, int concatDim, int outSize0, int outSize1, int outSize2, int outSize3, int outStride0, int outStride1, int outStride2, int outStride3) { int tid = blockIdx.x * blockDim.x + threadIdx.x; int nElement = blockIdx.y == 0 ? nElement1 : nElement2; if (tid >= nElement) return; float* data = blockIdx.y == 0 ? in1 : in2; int offset = blockIdx.y == 0 ? 0 : dimSize1; int dimSize = blockIdx.y == 0 ? dimSize1 : dimSize2; int dataOffset = offset * outStride1; int stride = gridDim.x * blockDim.x; while (tid < nElement) { int elementOffset = compute(outSize0, outSize1, outSize2, outSize3, outStride0, outStride1, outStride2, outStride3, dimSize, concatDim, tid); data[tid] += out[dataOffset + elementOffset]; tid += stride; } } __global__ void repeat0(float* in, float* out, int outStride0, int outStride1, int outScalarCount) { int tid = blockIdx.x * blockDim.x + threadIdx.x; int stride = gridDim.x * blockDim.x; for (; tid < outScalarCount; tid += stride) { int linearIndex = tid; int outIndex0 = linearIndex / outStride0; linearIndex = linearIndex - outIndex0 * outStride0; int outIndex1 = linearIndex / outStride1; int outIndex2 = linearIndex - outIndex1 * outStride1; int inIndex = outIndex2 + (outIndex0 + outIndex1) * outStride1; out[tid] = in[inIndex]; } } __global__ void shift0(float* in, float* out, int inDim0, int inStride0, int inStride1, int inScalarCount) { int tid = blockIdx.x * blockDim.x + threadIdx.x; int stride = gridDim.x * blockDim.x; for (; tid < inScalarCount; tid += stride) { int linearIndex = tid; int inIndex0 = linearIndex / inStride0; linearIndex = linearIndex - inIndex0 * inStride0; int inIndex1 = linearIndex / inStride1; if (inIndex0 + inIndex1 >= inDim0) return; out[tid + inIndex1 * inStride0] = in[tid]; } } __global__ void adagrad_update_1D_1D(float* x, float* d, float* m, float clip, float lr, int size) { int tid = blockIdx.x * blockDim.x + threadIdx.x; int stride = gridDim.x * blockDim.x; for (; tid < size; tid += stride) { if (d[tid] > clip) d[tid] = clip; if (d[tid] < -clip) d[tid] = -clip; m[tid] += d[tid] * d[tid]; x[tid] -= lr * d[tid] / sqrt(m[tid] + 0.00000001); d[tid] = 0; } } __global__ void momentum_update_1D_1D(float* x, float* d, float* m, float learning_rate, float momentum, float gradClip, bool nesterov, int size) { int tid = blockIdx.x * blockDim.x + threadIdx.x; int stride = gridDim.x * blockDim.x; for (; tid < size; tid += stride) { float temp = d[tid]; if (temp > gradClip) temp = gradClip; if (temp < -gradClip) temp = -gradClip; m[tid] *= momentum; m[tid] += temp; if (nesterov) { temp += momentum * m[tid]; } else { temp = m[tid]; } x[tid] -= learning_rate * temp; d[tid] = 0; } } __global__ void addScalar(float* in, float* out, float add, int size) { int tid = blockIdx.x * blockDim.x + threadIdx.x; int stride = gridDim.x * blockDim.x; for (; tid < size; tid += stride) if (tid < size) out[tid] = in[tid] + add; } __global__ void minusScalar(float* in, float* out, float minus, int size) { int tid = blockIdx.x * blockDim.x + threadIdx.x; int stride = gridDim.x * blockDim.x; for (; tid < size; tid += stride) if (tid < size) out[tid] = in[tid] - minus; } __global__ void multScalar(float* in, float* out, float mult, int size) { int tid = blockIdx.x * blockDim.x + threadIdx.x; int stride = gridDim.x * blockDim.x; for (; tid < size; tid += stride) if (tid < size) out[tid] = in[tid] * mult; } __global__ void divScalar(float* in, float* out, float div, int size) { int tid = blockIdx.x * blockDim.x + threadIdx.x; int stride = gridDim.x * blockDim.x; for (; tid < size; tid += stride) if (tid < size) out[tid] = in[tid] / div; } __global__ void elementwise_1D_1D_mul(float* in1, float* in2, float* out, int size) { int tid = blockIdx.x * blockDim.x + threadIdx.x; int stride = gridDim.x * blockDim.x; for (; tid < size; tid += stride) if (tid < size) out[tid] = in1[tid] * in2[tid]; } __global__ void elementwise_1D_1D_mul_mutate(float* in1, float* in2, float* out, int size) { int tid = blockIdx.x * blockDim.x + threadIdx.x; int stride = gridDim.x * blockDim.x; for (; tid < size; tid += stride) if (tid < size) out[tid] += in1[tid] * in2[tid]; } __global__ void elementwise_1D_1D_add(float* in1, float* in2, float* out, int size) { int tid = blockIdx.x * blockDim.x + threadIdx.x; int stride = gridDim.x * blockDim.x; for (; tid < size; tid += stride) if (tid < size) out[tid] = in1[tid] + in2[tid]; } __global__ void elementwise_1D_1D_minus(float* in1, float* in2, float* out, int size) { int tid = blockIdx.x * blockDim.x + threadIdx.x; int stride = gridDim.x * blockDim.x; for (; tid < size; tid += stride) if (tid < size) out[tid] = in1[tid] - in2[tid]; } __global__ void elementwise_1D_1D_div(float* in1, float* in2, float* out, int size) { int tid = blockIdx.x * blockDim.x + threadIdx.x; int stride = gridDim.x * blockDim.x; for (; tid < size; tid += stride) if (tid < size) out[tid] = in1[tid] / in2[tid]; } __global__ void elementwise_1D_1D_exp(float* in, float* out, int size) { int tid = blockIdx.x * blockDim.x + threadIdx.x; int stride = gridDim.x * blockDim.x; for (; tid < size; tid += stride) if (tid < size) out[tid] = exp(in[tid]); } __global__ void elementwise_1D_1D_log(float* in, float* out, int size) { int tid = blockIdx.x * blockDim.x + threadIdx.x; int stride = gridDim.x * blockDim.x; for (; tid < size; tid += stride) if (tid < size) out[tid] = log(in[tid]); } __global__ void elementwise_1D_1D_sqrt(float* in, float* out, int size) { int tid = blockIdx.x * blockDim.x + threadIdx.x; int stride = gridDim.x * blockDim.x; for (; tid < size; tid += stride) if (tid < size) out[tid] = sqrt(in[tid]); } __global__ void elementwise_1D_1D_square(float* in, float* out, int size) { int tid = blockIdx.x * blockDim.x + threadIdx.x; int stride = gridDim.x * blockDim.x; for (; tid < size; tid += stride) if (tid < size) out[tid] = in[tid] * in[tid]; } __global__ void elementwise_1D_1D_exp_grad(float* in_x, float* in_d, float* out_x, float * out_d, int size) { int tid = blockIdx.x * blockDim.x + threadIdx.x; int stride = gridDim.x * blockDim.x; for (; tid < size; tid += stride) if (tid < size) in_d[tid] += out_d[tid] * out_x[tid]; } __global__ void elementwise_1D_1D_log_grad(float* in_x, float* in_d, float* out_x, float * out_d, int size) { int tid = blockIdx.x * blockDim.x + threadIdx.x; int stride = gridDim.x * blockDim.x; for (; tid < size; tid += stride) if (tid < size) in_d[tid] += out_d[tid] / in_x[tid]; } __global__ void elementwise_1D_1D_sqrt_grad(float* in_x, float* in_d, float* out_x, float * out_d, int size) { int tid = blockIdx.x * blockDim.x + threadIdx.x; int stride = gridDim.x * blockDim.x; for (; tid < size; tid += stride) if (tid < size) in_d[tid] += out_d[tid] / out_x[tid] / 2; } __global__ void elementwise_1D_1D_square_grad(float* in_x, float* in_d, float* out_x, float * out_d, int size) { int tid = blockIdx.x * blockDim.x + threadIdx.x; int stride = gridDim.x * blockDim.x; for (; tid < size; tid += stride) if (tid < size) in_d[tid] += out_d[tid] * 2 * in_x[tid]; } __global__ void clipAt(float* in, float bound, int size) { int tid = blockIdx.x * blockDim.x + threadIdx.x; int stride = gridDim.x * blockDim.x; for (; tid < size; tid += stride) if (tid < size) { if (in[tid] > bound) in[tid] = bound; if (in[tid] < -bound) in[tid] = -bound; } } __global__ void mask4D(float* in, int* mask, int xstrides0, int xstrides1, int xstrides2, int xstrides3, int scalarCount) { int tid = blockIdx.x * blockDim.x + threadIdx.x; int stride = gridDim.x * blockDim.x; for (; tid < scalarCount; tid += stride) { int linearIndex = tid; int xindex0 = linearIndex / xstrides0; linearIndex = linearIndex - xstrides0 * xindex0; int xindex1 = linearIndex / xstrides1; linearIndex = linearIndex - xstrides1 * xindex1; int xindex2 = linearIndex / xstrides2; int xindex3 = linearIndex - xstrides2 * xindex2; if (xindex3 >= mask[xindex0]) in[tid] = 0; } } __global__ void mul_sub(float* in1, float* in2, float* out, int in1ScalarCount, int in2ScalarCount) { int tid = blockIdx.x * blockDim.x + threadIdx.x; int stride = gridDim.x * blockDim.x; for (; tid < in1ScalarCount; tid += stride) { out[tid] = in1[tid] * in2[tid % in2ScalarCount]; } } __global__ void mul_sub_grad(float* in1_x, float* in1_d, float* in2_x, float* in2_d, float* out, int in1ScalarCount, int in2ScalarCount) { int tid = blockIdx.x * blockDim.x + threadIdx.x; int stride = gridDim.x * blockDim.x; for (; tid < in1ScalarCount; tid += stride) { int index = tid % in2ScalarCount; in1_d[tid] += out[tid] * in2_x[index]; in2_d[tid] = in1_x[tid] * out[tid]; // this is the temp array, need to be reduced! } } // From: https://github.com/pytorch/pytorch/blob/master/aten/src/THC/THCIntegerDivider.cuh // Result of div/mod operation stored together. template <typename Value> struct DivMod { Value div, mod; __host__ __device__ DivMod(Value div, Value mod) : div(div), mod(mod) { } }; // Base case: we only have an implementation for uint32_t for now. For // everything else, we use plain division. template <typename Value> struct IntDivider { IntDivider() { } // Dummy constructor for arrays. IntDivider(Value d) : divisor(d) { } __host__ __device__ inline Value div(Value n) const { return n / divisor; } __host__ __device__ inline Value mod(Value n) const { return n % divisor; } __host__ __device__ inline DivMod<Value> divmod(Value n) const { return DivMod<Value>(n / divisor, n % divisor); } Value divisor; }; // Implement fast integer division. template <> struct IntDivider<unsigned int> { static_assert(sizeof(unsigned int) == 4, "Assumes 32-bit unsigned int."); IntDivider() { } // Dummy constructor for arrays. IntDivider(unsigned int d) : divisor(d) { assert(divisor >= 1 && divisor <= INT32_MAX); // TODO: gcc/clang has __builtin_clz() but it's not portable. for (shift = 0; shift < 32; shift++) if ((1U << shift) >= divisor) break; uint64_t one = 1; uint64_t magic = ((one << 32) * ((one << shift) - divisor)) / divisor + 1; m1 = magic; assert(m1 > 0 && m1 == magic); // m1 must fit in 32 bits. } __host__ __device__ inline unsigned int div(unsigned int n) const { #ifdef __CUDA_ARCH__ // 't' is the higher 32-bits of unsigned 32-bit multiplication of 'n' and // 'm1'. unsigned int t = __umulhi(n, m1); return (t + n) >> shift; #else // Using uint64_t so that the addition does not overflow. uint64_t t = ((uint64_t) n * m1) >> 32; return (t + n) >> shift; #endif } __host__ __device__ inline unsigned int mod(unsigned int n) const { return n - div(n) * divisor; } __host__ __device__ inline DivMod<unsigned int> divmod(unsigned int n) const { unsigned int q = div(n); return DivMod<unsigned int>(q, n - q * divisor); } unsigned int divisor; // d above. unsigned int m1; // Magic number: m' above. unsigned int shift; // Shift amounts. }; // From: https://github.com/pytorch/pytorch/blob/master/aten/src/ATen/cuda/detail/OffsetCalculator.cuh /// OffsetCalculator calculates the offset in bytes of a linear index for NARGS /// operands that share the same shape, but may have different strides. template <int NARGS> struct OffsetCalculator { static constexpr int MAX_DIMS = 25; // The offset for each argument (in bytes). Wrapper around fixed-size array. struct offsets_t { __host__ __device__ uint32_t& operator[](int idx) { return values[idx]; } uint32_t values[NARGS]; }; // OffsetCalculator(int dims, const int64_t* sizes, const int64_t* const* strides) : dims(dims) { OffsetCalculator(int dims, const int32_t* sizes, const int32_t* const* strides) : dims(dims) { for (int i = 0; i < MAX_DIMS; ++i) { if (i < dims) { sizes_[i] = IntDivider<uint32_t>(sizes[i]); } else { sizes_[i] = IntDivider<uint32_t>(1); } for (int arg = 0; arg < NARGS; arg++) { strides_[i][arg] = i < dims ? strides[arg][i] : 0; } } } __host__ __device__ offsets_t get(uint32_t linear_idx) const { offsets_t offsets; #pragma unroll for (int arg = 0; arg < NARGS; arg++) { offsets[arg] = 0; } #pragma unroll for (int dim = 0; dim < MAX_DIMS; ++dim) { if (dim == dims) { break; } auto divmod = sizes_[dim].divmod(linear_idx); linear_idx = divmod.div; #pragma unroll for (int arg = 0; arg < NARGS; arg++) { offsets[arg] += divmod.mod * strides_[dim][arg]; } } return offsets; } void print() { for (auto i = 1; i < 128; i++) { auto offsets = get(i); printf("offsets[%d]: ", i); for (auto arg = 0; arg < NARGS; arg++) { printf("%d ", offsets[arg]); } printf("\n"); } } int dims; IntDivider<uint32_t> sizes_[MAX_DIMS]; uint32_t strides_[MAX_DIMS][NARGS]; }; // From: https://github.com/pytorch/pytorch/blob/master/aten/src/ATen/native/cuda/Loops.cuh template<int nt, int vt, typename func_t> __launch_bounds__(nt, 4) __global__ void elementwise_kernel(int N, func_t f) { int tid = threadIdx.x; int nv = nt * vt; int idx = nv * blockIdx.x + tid; #pragma unroll for (int i = 0; i < vt; i++) { if (idx < N) { f(idx); idx += nt; } } } template<int nt, int vt, typename func_t> static void launch_kernel(int64_t N, const func_t& f) { if (N == 0) { return; } dim3 block(nt); dim3 grid((N + block.x * vt - 1) / (block.x * vt)); elementwise_kernel<nt, vt, func_t><<<grid, block, 0>>>(N, f); } template<typename func_t> void gpu_unary_kernel(float *res, float *x, int32_t resRank, const int32_t resScalarCount, const int32_t* resShape, const int32_t* const* strides, const func_t& f) { OffsetCalculator<2> calc(resRank, resShape, strides); launch_kernel<128, 4>(resScalarCount, [=]__device__(int idx) { auto offsets = calc.get(idx); float* out = &res[offsets[0]]; float* in = &x[offsets[1]]; *out = f(*in); }); } template<typename func_t> void gpu_binary_kernel(float *res, float *x, float *y, int32_t resRank, const int32_t resScalarCount, const int32_t* resShape, const int32_t* const* strides, const func_t& f) { OffsetCalculator<3> calc(resRank, resShape, strides); launch_kernel<128, 4>(resScalarCount, [=]__device__(int idx) { auto offsets = calc.get(idx); float* out = &res[offsets[0]]; float* in1 = &x[offsets[1]]; float* in2 = &y[offsets[2]]; *out = f(*in1, *in2); }); } #define CUDNN_CALL(f) { \ cudnnStatus_t stat = (f); \ if (stat != CUDNN_STATUS_SUCCESS) { \ fprintf(stderr, "cuDNN error occurred: %d (%s:%d)\n", \ stat, __FILE__, __LINE__); \ exit(stat); \ } \ } void Snippet(char *); std::random_device rd{}; std::mt19937 gen{rd()}; std::normal_distribution<> d{0, 0.01}; int main(int argc, char *argv[]) { if (argc != 2) { printf("usage: query <filename>\n"); return 0; } Snippet(argv[1]); return 0; } /***************************************** Emitting C Generated Code *******************************************/ #include <stdio.h> #include <stdlib.h> #include <string.h> #include <stdbool.h> void Snippet(char* x0) { // Backend setup. cublasHandle_t cublasHandle; CUBLAS_CALL(cublasCreate(&cublasHandle)); CUDA_CALL(cudaMalloc(&gpuMallocBase, HEAP_SIZE)); CUDA_CALL(cudaMemset(gpuMallocBase, 0, HEAP_SIZE)); gpuMallocAddr = gpuMallocBase; cudnnHandle_t cudnnHandle; CUDNN_CALL(cudnnCreate(&cudnnHandle)); srand(42); struct timeval begin_0, end_0, diff_0; gettimeofday(&begin_0, NULL); int32_t x7 = open("../../cifar10_data/cifar-10-batches-bin/data_batch_1.bin",0); int64_t x8 = fsize(x7); int64_t x10 = x8 / 3073LL; int32_t x11 = (int32_t)x10; int32_t x12 = x11 * 3072; float* x13 = (float*)myMalloc(x12 * sizeof(float));; int* x14 = (int32_t*)myMalloc(x11 * sizeof(int32_t));; char* x9 = (char*)mmap(0, x8, PROT_READ | PROT_WRITE, MAP_FILE | MAP_PRIVATE, x7, 0); for(int x16=0; x16 < x11; x16++) { int32_t x17 = x16 * 3073; char x18 = x9[x17]; int32_t x19 = (int32_t)(unsigned char)x18; x14[x16] = x19; int32_t x25 = x17 + 1; int32_t x23 = x16 * 3072; for(int x22=0; x22 < 3072; x22++) { int32_t x26 = x25 + x22; char x27 = x9[x26]; int32_t x24 = x23 + x22; float x28 = (float)(unsigned char)x27; float x29 = x28 / 255.0f; x13[x24] = x29; } } gettimeofday(&end_0, NULL); timeval_subtract(&diff_0, &end_0, &begin_0);; int64_t x37 = ((diff_0.tv_sec * 1000000L) + (diff_0.tv_usec)); float x38 = (float)x37; float x39 = x38 / 1000000.0f; printf("Data reading in %lf sec\n",x39); // Tensor 'toGPU' invocation. float* x98 = (float*)myGpuMalloc(32768 * sizeof(float)); int32_t x41 = open("/home/fei/bitbucket/Lantern/src/out/PLDI19evaluation/squeezenet/squeezenetCifar10.onnx.bin",0); int64_t x42 = fsize(x41); float* x43 = (float*)mmap(0, x42, PROT_READ | PROT_WRITE, MAP_FILE | MAP_PRIVATE, x41, 0); float* x45 = x43+526720; CUDA_CALL(cudaMemcpy(x98, x45, 32768 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x101 = (float*)myGpuMalloc(48 * sizeof(float)); float* x46 = x43+245136; CUDA_CALL(cudaMemcpy(x101, x46, 48 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x104 = (float*)myGpuMalloc(64 * sizeof(float)); float* x47 = x43+17696; CUDA_CALL(cudaMemcpy(x104, x47, 64 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x107 = (float*)myGpuMalloc(81920 * sizeof(float)); float* x48 = x43+723904; CUDA_CALL(cudaMemcpy(x107, x48, 81920 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x110 = (float*)myGpuMalloc(64 * sizeof(float)); float* x49 = x43+14544; CUDA_CALL(cudaMemcpy(x110, x49, 64 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x113 = (float*)myGpuMalloc(36864 * sizeof(float)); float* x50 = x43+35392; CUDA_CALL(cudaMemcpy(x113, x50, 36864 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x116 = (float*)myGpuMalloc(4096 * sizeof(float)); float* x51 = x43+80608; CUDA_CALL(cudaMemcpy(x116, x51, 4096 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x119 = (float*)myGpuMalloc(16 * sizeof(float)); float* x52 = x43+4224; CUDA_CALL(cudaMemcpy(x119, x52, 16 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x122 = (float*)myGpuMalloc(64 * sizeof(float)); float* x53 = x43+362304; CUDA_CALL(cudaMemcpy(x122, x53, 64 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x125 = (float*)myGpuMalloc(4096 * sizeof(float)); float* x54 = x43+27040; CUDA_CALL(cudaMemcpy(x125, x54, 4096 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x128 = (float*)myGpuMalloc(1024 * sizeof(float)); float* x55 = x43+16672; CUDA_CALL(cudaMemcpy(x128, x55, 1024 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x131 = (float*)myGpuMalloc(2048 * sizeof(float)); float* x56 = x43+14608; CUDA_CALL(cudaMemcpy(x131, x56, 2048 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x134 = (float*)myGpuMalloc(256 * sizeof(float)); float* x57 = x43+526464; CUDA_CALL(cudaMemcpy(x134, x57, 256 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x137 = (float*)myGpuMalloc(18432 * sizeof(float)); float* x58 = x43+226704; CUDA_CALL(cudaMemcpy(x137, x58, 18432 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x140 = (float*)myGpuMalloc(32 * sizeof(float)); float* x59 = x43+80576; CUDA_CALL(cudaMemcpy(x140, x59, 32 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x143 = (float*)myGpuMalloc(128 * sizeof(float)); float* x60 = x43+121696; CUDA_CALL(cudaMemcpy(x143, x60, 128 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x146 = (float*)myGpuMalloc(256 * sizeof(float)); float* x61 = x43+723648; CUDA_CALL(cudaMemcpy(x146, x61, 256 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x149 = (float*)myGpuMalloc(82944 * sizeof(float)); float* x62 = x43+254592; CUDA_CALL(cudaMemcpy(x149, x62, 82944 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x152 = (float*)myGpuMalloc(9216 * sizeof(float)); float* x63 = x43+17760; CUDA_CALL(cudaMemcpy(x152, x63, 9216 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x155 = (float*)myGpuMalloc(64 * sizeof(float)); float* x64 = x43+559488; CUDA_CALL(cudaMemcpy(x155, x64, 64 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x158 = (float*)myGpuMalloc(128 * sizeof(float)); float* x65 = x43+84704; CUDA_CALL(cudaMemcpy(x158, x65, 128 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x161 = (float*)myGpuMalloc(9216 * sizeof(float)); float* x66 = x43+245184; CUDA_CALL(cudaMemcpy(x161, x66, 9216 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x164 = (float*)myGpuMalloc(32 * sizeof(float)); float* x67 = x43+31136; CUDA_CALL(cudaMemcpy(x164, x67, 32 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x167 = (float*)myGpuMalloc(1024 * sizeof(float)); float* x68 = x43+4240; CUDA_CALL(cudaMemcpy(x167, x68, 1024 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x170 = (float*)myGpuMalloc(16 * sizeof(float)); float* x69 = x43+16656; CUDA_CALL(cudaMemcpy(x170, x69, 16 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x173 = (float*)myGpuMalloc(256 * sizeof(float)); float* x70 = x43+575936; CUDA_CALL(cudaMemcpy(x173, x70, 256 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x176 = (float*)myGpuMalloc(8192 * sizeof(float)); float* x71 = x43+72384; CUDA_CALL(cudaMemcpy(x176, x71, 8192 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x179 = (float*)myGpuMalloc(147456 * sizeof(float)); float* x72 = x43+379008; CUDA_CALL(cudaMemcpy(x179, x72, 147456 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x182 = (float*)myGpuMalloc(192 * sizeof(float)); float* x73 = x43+226512; CUDA_CALL(cudaMemcpy(x182, x73, 192 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x185 = (float*)myGpuMalloc(147456 * sizeof(float)); float* x74 = x43+576192; CUDA_CALL(cudaMemcpy(x185, x74, 147456 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x188 = (float*)myGpuMalloc(64 * sizeof(float)); float* x75 = x43+5264; CUDA_CALL(cudaMemcpy(x188, x75, 64 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x191 = (float*)myGpuMalloc(192 * sizeof(float)); float* x76 = x43+254400; CUDA_CALL(cudaMemcpy(x191, x76, 192 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x194 = (float*)myGpuMalloc(2592 * sizeof(float)); float* x77 = x43+0; CUDA_CALL(cudaMemcpy(x194, x77, 2592 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x197 = (float*)myGpuMalloc(24576 * sizeof(float)); float* x78 = x43+337728; CUDA_CALL(cudaMemcpy(x197, x78, 24576 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x200 = (float*)myGpuMalloc(4096 * sizeof(float)); float* x79 = x43+31168; CUDA_CALL(cudaMemcpy(x200, x79, 4096 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x203 = (float*)myGpuMalloc(36864 * sizeof(float)); float* x80 = x43+84832; CUDA_CALL(cudaMemcpy(x203, x80, 36864 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x206 = (float*)myGpuMalloc(64 * sizeof(float)); float* x81 = x43+26976; CUDA_CALL(cudaMemcpy(x206, x81, 64 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x209 = (float*)myGpuMalloc(16384 * sizeof(float)); float* x82 = x43+559552; CUDA_CALL(cudaMemcpy(x209, x82, 16384 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x212 = (float*)myGpuMalloc(82944 * sizeof(float)); float* x83 = x43+143568; CUDA_CALL(cudaMemcpy(x212, x83, 82944 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x215 = (float*)myGpuMalloc(256 * sizeof(float)); float* x84 = x43+378752; CUDA_CALL(cudaMemcpy(x215, x84, 256 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x218 = (float*)myGpuMalloc(128 * sizeof(float)); float* x85 = x43+72256; CUDA_CALL(cudaMemcpy(x218, x85, 128 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x221 = (float*)myGpuMalloc(12288 * sizeof(float)); float* x86 = x43+121824; CUDA_CALL(cudaMemcpy(x221, x86, 12288 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x224 = (float*)myGpuMalloc(96 * sizeof(float)); float* x87 = x43+2592; CUDA_CALL(cudaMemcpy(x224, x87, 96 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x227 = (float*)myGpuMalloc(192 * sizeof(float)); float* x88 = x43+337536; CUDA_CALL(cudaMemcpy(x227, x88, 192 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x230 = (float*)myGpuMalloc(128 * sizeof(float)); float* x89 = x43+35264; CUDA_CALL(cudaMemcpy(x230, x89, 128 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x233 = (float*)myGpuMalloc(192 * sizeof(float)); float* x90 = x43+143376; CUDA_CALL(cudaMemcpy(x233, x90, 192 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x236 = (float*)myGpuMalloc(9216 * sizeof(float)); float* x91 = x43+5328; CUDA_CALL(cudaMemcpy(x236, x91, 9216 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x239 = (float*)myGpuMalloc(9216 * sizeof(float)); float* x92 = x43+134160; CUDA_CALL(cudaMemcpy(x239, x92, 9216 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x242 = (float*)myGpuMalloc(16384 * sizeof(float)); float* x93 = x43+362368; CUDA_CALL(cudaMemcpy(x242, x93, 16384 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x245 = (float*)myGpuMalloc(1536 * sizeof(float)); float* x94 = x43+2688; CUDA_CALL(cudaMemcpy(x245, x94, 1536 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x248 = (float*)myGpuMalloc(10 * sizeof(float)); float* x95 = x43+805824; CUDA_CALL(cudaMemcpy(x248, x95, 10 * sizeof(float), cudaMemcpyHostToDevice)); // Tensor 'toGPU' invocation. float* x251 = (float*)myGpuMalloc(48 * sizeof(float)); float* x96 = x43+134112; CUDA_CALL(cudaMemcpy(x251, x96, 48 * sizeof(float), cudaMemcpyHostToDevice)); int64_t x253 = (long)mallocAddr; int64_t x254 = (long)gpuMallocAddr; // inferencing loop starts here int32_t x262 = x11 / 64; int32_t x272 = 31 / 1; int32_t x273 = x272 + 1; int32_t x277 = 6144 * x273; int32_t x278 = x277 * x273; int32_t x274 = x273 * x273; int32_t x275 = 96 * x274; int32_t x276 = 64 * x275; int32_t x300 = x273 - 2; int32_t x301 = x300 / 2; int32_t x302 = x301 + 1; int32_t x306 = 6144 * x302; int32_t x307 = x306 * x302; bool x310 = x302 >= 1; bool x311; if (x310) { x311 = x310; } else { x311 = false; } int32_t x316 = x301 / 1; int32_t x317 = x316 + 1; int32_t x321 = 1024 * x317; int32_t x322 = x321 * x317; int32_t x318 = x317 * x317; int32_t x319 = 16 * x318; int32_t x320 = 64 * x319; bool x340 = x317 >= 1; bool x341; if (x340) { x341 = x340; } else { x341 = false; } int32_t x346 = x316 / 1; int32_t x347 = x346 + 1; int32_t x351 = 4096 * x347; int32_t x352 = x351 * x347; int32_t x348 = x347 * x347; int32_t x349 = 64 * x348; int32_t x350 = 64 * x349; int32_t x370 = x317 + 2; bool x371 = x370 >= 3; bool x372; if (x371) { x372 = x371; } else { x372 = false; } int32_t x377 = x370 - 3; int32_t x378 = x377 / 1; int32_t x379 = x378 + 1; int32_t x383 = 4096 * x379; int32_t x384 = x383 * x379; int32_t x380 = x379 * x379; int32_t x381 = 64 * x380; int32_t x382 = 64 * x381; bool x402 = true || false; bool x404; if (x402) { bool x403 = true || true; x404 = x403; } else { x404 = false; } bool x407; if (x404) { bool x405 = x379 == x347; bool x406 = x405 || false; x407 = x406; } else { x407 = false; } bool x408; if (x407) { bool x405 = x379 == x347; bool x406 = x405 || false; x408 = x406; } else { x408 = false; } int32_t x417 = 8192 * x347; int32_t x418 = x417 * x347; int32_t x415 = 128 * x348; bool x421 = x347 >= 1; bool x422; if (x421) { x422 = x421; } else { x422 = false; } int32_t x427 = x346 / 1; int32_t x428 = x427 + 1; int32_t x432 = 1024 * x428; int32_t x433 = x432 * x428; int32_t x429 = x428 * x428; int32_t x430 = 16 * x429; int32_t x431 = 64 * x430; bool x451 = x428 >= 1; bool x452; if (x451) { x452 = x451; } else { x452 = false; } int32_t x457 = x427 / 1; int32_t x458 = x457 + 1; int32_t x462 = 4096 * x458; int32_t x463 = x462 * x458; int32_t x459 = x458 * x458; int32_t x460 = 64 * x459; int32_t x461 = 64 * x460; int32_t x481 = x428 + 2; bool x482 = x481 >= 3; bool x483; if (x482) { x483 = x482; } else { x483 = false; } int32_t x488 = x481 - 3; int32_t x489 = x488 / 1; int32_t x490 = x489 + 1; int32_t x494 = 4096 * x490; int32_t x495 = x494 * x490; int32_t x491 = x490 * x490; int32_t x492 = 64 * x491; int32_t x493 = 64 * x492; bool x515; if (x404) { bool x513 = x490 == x458; bool x514 = x513 || false; x515 = x514; } else { x515 = false; } bool x516; if (x515) { bool x513 = x490 == x458; bool x514 = x513 || false; x516 = x514; } else { x516 = false; } int32_t x525 = 8192 * x458; int32_t x526 = x525 * x458; int32_t x523 = 128 * x459; bool x529 = x458 >= 1; bool x530; if (x529) { x530 = x529; } else { x530 = false; } int32_t x535 = x457 / 1; int32_t x536 = x535 + 1; int32_t x540 = 2048 * x536; int32_t x541 = x540 * x536; int32_t x537 = x536 * x536; int32_t x538 = 32 * x537; int32_t x539 = 64 * x538; bool x559 = x536 >= 1; bool x560; if (x559) { x560 = x559; } else { x560 = false; } int32_t x565 = x535 / 1; int32_t x566 = x565 + 1; int32_t x570 = 8192 * x566; int32_t x571 = x570 * x566; int32_t x567 = x566 * x566; int32_t x568 = 128 * x567; int32_t x569 = 64 * x568; int32_t x589 = x536 + 2; bool x590 = x589 >= 3; bool x591; if (x590) { x591 = x590; } else { x591 = false; } int32_t x596 = x589 - 3; int32_t x597 = x596 / 1; int32_t x598 = x597 + 1; int32_t x602 = 8192 * x598; int32_t x603 = x602 * x598; int32_t x599 = x598 * x598; int32_t x600 = 128 * x599; int32_t x601 = 64 * x600; bool x623; if (x404) { bool x621 = x598 == x566; bool x622 = x621 || false; x623 = x622; } else { x623 = false; } bool x624; if (x623) { bool x621 = x598 == x566; bool x622 = x621 || false; x624 = x622; } else { x624 = false; } int32_t x633 = 16384 * x566; int32_t x634 = x633 * x566; int32_t x631 = 256 * x567; int32_t x641 = x566 - 2; int32_t x642 = x641 / 2; int32_t x643 = x642 + 1; int32_t x647 = 16384 * x643; int32_t x648 = x647 * x643; bool x651 = x643 >= 1; bool x652; if (x651) { x652 = x651; } else { x652 = false; } int32_t x657 = x642 / 1; int32_t x658 = x657 + 1; int32_t x662 = 2048 * x658; int32_t x663 = x662 * x658; int32_t x659 = x658 * x658; int32_t x660 = 32 * x659; int32_t x661 = 64 * x660; bool x681 = x658 >= 1; bool x682; if (x681) { x682 = x681; } else { x682 = false; } int32_t x687 = x657 / 1; int32_t x688 = x687 + 1; int32_t x692 = 8192 * x688; int32_t x693 = x692 * x688; int32_t x689 = x688 * x688; int32_t x690 = 128 * x689; int32_t x691 = 64 * x690; int32_t x711 = x658 + 2; bool x712 = x711 >= 3; bool x713; if (x712) { x713 = x712; } else { x713 = false; } int32_t x718 = x711 - 3; int32_t x719 = x718 / 1; int32_t x720 = x719 + 1; int32_t x724 = 8192 * x720; int32_t x725 = x724 * x720; int32_t x721 = x720 * x720; int32_t x722 = 128 * x721; int32_t x723 = 64 * x722; bool x745; if (x404) { bool x743 = x720 == x688; bool x744 = x743 || false; x745 = x744; } else { x745 = false; } bool x746; if (x745) { bool x743 = x720 == x688; bool x744 = x743 || false; x746 = x744; } else { x746 = false; } int32_t x755 = 16384 * x688; int32_t x756 = x755 * x688; int32_t x753 = 256 * x689; bool x759 = x688 >= 1; bool x760; if (x759) { x760 = x759; } else { x760 = false; } int32_t x765 = x687 / 1; int32_t x766 = x765 + 1; int32_t x770 = 3072 * x766; int32_t x771 = x770 * x766; int32_t x767 = x766 * x766; int32_t x768 = 48 * x767; int32_t x769 = 64 * x768; bool x789 = x766 >= 1; bool x790; if (x789) { x790 = x789; } else { x790 = false; } int32_t x795 = x765 / 1; int32_t x796 = x795 + 1; int32_t x800 = 12288 * x796; int32_t x801 = x800 * x796; int32_t x797 = x796 * x796; int32_t x798 = 192 * x797; int32_t x799 = 64 * x798; int32_t x819 = x766 + 2; bool x820 = x819 >= 3; bool x821; if (x820) { x821 = x820; } else { x821 = false; } int32_t x826 = x819 - 3; int32_t x827 = x826 / 1; int32_t x828 = x827 + 1; int32_t x832 = 12288 * x828; int32_t x833 = x832 * x828; int32_t x829 = x828 * x828; int32_t x830 = 192 * x829; int32_t x831 = 64 * x830; bool x853; if (x404) { bool x851 = x828 == x796; bool x852 = x851 || false; x853 = x852; } else { x853 = false; } bool x854; if (x853) { bool x851 = x828 == x796; bool x852 = x851 || false; x854 = x852; } else { x854 = false; } int32_t x863 = 24576 * x796; int32_t x864 = x863 * x796; int32_t x861 = 384 * x797; bool x867 = x796 >= 1; bool x868; if (x867) { x868 = x867; } else { x868 = false; } int32_t x873 = x795 / 1; int32_t x874 = x873 + 1; int32_t x878 = 3072 * x874; int32_t x879 = x878 * x874; int32_t x875 = x874 * x874; int32_t x876 = 48 * x875; int32_t x877 = 64 * x876; bool x897 = x874 >= 1; bool x898; if (x897) { x898 = x897; } else { x898 = false; } int32_t x903 = x873 / 1; int32_t x904 = x903 + 1; int32_t x908 = 12288 * x904; int32_t x909 = x908 * x904; int32_t x905 = x904 * x904; int32_t x906 = 192 * x905; int32_t x907 = 64 * x906; int32_t x927 = x874 + 2; bool x928 = x927 >= 3; bool x929; if (x928) { x929 = x928; } else { x929 = false; } int32_t x934 = x927 - 3; int32_t x935 = x934 / 1; int32_t x936 = x935 + 1; int32_t x940 = 12288 * x936; int32_t x941 = x940 * x936; int32_t x937 = x936 * x936; int32_t x938 = 192 * x937; int32_t x939 = 64 * x938; bool x961; if (x404) { bool x959 = x936 == x904; bool x960 = x959 || false; x961 = x960; } else { x961 = false; } bool x962; if (x961) { bool x959 = x936 == x904; bool x960 = x959 || false; x962 = x960; } else { x962 = false; } int32_t x971 = 24576 * x904; int32_t x972 = x971 * x904; int32_t x969 = 384 * x905; bool x975 = x904 >= 1; bool x976; if (x975) { x976 = x975; } else { x976 = false; } int32_t x981 = x903 / 1; int32_t x982 = x981 + 1; int32_t x986 = 4096 * x982; int32_t x987 = x986 * x982; int32_t x983 = x982 * x982; int32_t x984 = 64 * x983; int32_t x985 = 64 * x984; bool x1005 = x982 >= 1; bool x1006; if (x1005) { x1006 = x1005; } else { x1006 = false; } int32_t x1011 = x981 / 1; int32_t x1012 = x1011 + 1; int32_t x1016 = 16384 * x1012; int32_t x1017 = x1016 * x1012; int32_t x1013 = x1012 * x1012; int32_t x1014 = 256 * x1013; int32_t x1015 = 64 * x1014; int32_t x1035 = x982 + 2; bool x1036 = x1035 >= 3; bool x1037; if (x1036) { x1037 = x1036; } else { x1037 = false; } int32_t x1042 = x1035 - 3; int32_t x1043 = x1042 / 1; int32_t x1044 = x1043 + 1; int32_t x1048 = 16384 * x1044; int32_t x1049 = x1048 * x1044; int32_t x1045 = x1044 * x1044; int32_t x1046 = 256 * x1045; int32_t x1047 = 64 * x1046; bool x1069; if (x404) { bool x1067 = x1044 == x1012; bool x1068 = x1067 || false; x1069 = x1068; } else { x1069 = false; } bool x1070; if (x1069) { bool x1067 = x1044 == x1012; bool x1068 = x1067 || false; x1070 = x1068; } else { x1070 = false; } int32_t x1079 = 32768 * x1012; int32_t x1080 = x1079 * x1012; int32_t x1077 = 512 * x1013; int32_t x1087 = x1012 - 2; int32_t x1088 = x1087 / 2; int32_t x1089 = x1088 + 1; int32_t x1093 = 32768 * x1089; int32_t x1094 = x1093 * x1089; bool x1097 = x1089 >= 1; bool x1098; if (x1097) { x1098 = x1097; } else { x1098 = false; } int32_t x1103 = x1088 / 1; int32_t x1104 = x1103 + 1; int32_t x1108 = 4096 * x1104; int32_t x1109 = x1108 * x1104; int32_t x1105 = x1104 * x1104; int32_t x1106 = 64 * x1105; int32_t x1107 = 64 * x1106; bool x1127 = x1104 >= 1; bool x1128; if (x1127) { x1128 = x1127; } else { x1128 = false; } int32_t x1133 = x1103 / 1; int32_t x1134 = x1133 + 1; int32_t x1138 = 16384 * x1134; int32_t x1139 = x1138 * x1134; int32_t x1135 = x1134 * x1134; int32_t x1136 = 256 * x1135; int32_t x1137 = 64 * x1136; int32_t x1157 = x1104 + 2; bool x1158 = x1157 >= 3; bool x1159; if (x1158) { x1159 = x1158; } else { x1159 = false; } int32_t x1164 = x1157 - 3; int32_t x1165 = x1164 / 1; int32_t x1166 = x1165 + 1; int32_t x1170 = 16384 * x1166; int32_t x1171 = x1170 * x1166; int32_t x1167 = x1166 * x1166; int32_t x1168 = 256 * x1167; int32_t x1169 = 64 * x1168; bool x1191; if (x404) { bool x1189 = x1166 == x1134; bool x1190 = x1189 || false; x1191 = x1190; } else { x1191 = false; } bool x1192; if (x1191) { bool x1189 = x1166 == x1134; bool x1190 = x1189 || false; x1192 = x1190; } else { x1192 = false; } int32_t x1201 = 32768 * x1134; int32_t x1202 = x1201 * x1134; int32_t x1199 = 512 * x1135; bool x1205 = x1134 >= 4; bool x1206; if (x1205) { x1206 = x1205; } else { x1206 = false; } int32_t x1211 = x1134 - 4; int32_t x1212 = x1211 / 1; int32_t x1213 = x1212 + 1; int32_t x1217 = 640 * x1213; int32_t x1218 = x1217 * x1213; int32_t x1214 = x1213 * x1213; int32_t x1215 = 10 * x1214; int32_t x1216 = 64 * x1215; int64_t x1264 = (int64_t)x11; for(int x257=0; x257 < 4; x257++) { struct timeval begin_1, end_1, diff_1; int32_t x259 = x257 + 1; printf("Start inferencing epoch %d\n",x259); gettimeofday(&begin_1, NULL); for(int x264=0; x264 < x262; x264++) { int32_t x265 = x264 * 64; int32_t x266 = x265 * 3072; float* x267 = x13+x266; int* x268 = x14+x265; // Tensor 'toGPU' invocation. float* x270 = (float*)myGpuMalloc(196608 * sizeof(float)); CUDA_CALL(cudaMemcpy(x270, x267, 196608 * sizeof(float), cudaMemcpyHostToDevice)); float* x279 = (float*)myGpuMalloc(x278 * sizeof(float)); float* x280 = (float*)myMalloc(1 * sizeof(float));; x280[0] = 0.0f; float* x282 = (float*)myMalloc(1 * sizeof(float));; x282[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 3, 32, 32)); cudnnFilterDescriptor_t filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 96, 3, 3, 3)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 96, x273, x273)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 1, 1, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionFwdAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionForwardAlgorithm( cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, CUDNN_CONVOLUTION_FWD_PREFER_FASTEST, 0, &algo)); // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionForwardWorkspaceSize( cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); // Execute convolution. CUDNN_CALL(cudnnConvolutionForward( cudnnHandle, x282, in_desc, x270, filt_desc, x194, conv_desc, algo, ws_data, ws_size, x280, out_desc, x279)); }; float* x285 = (float*)myMalloc(1 * sizeof(float));; x285[0] = 1.0f; float* x287 = (float*)myMalloc(1 * sizeof(float));; x287[0] = 1.0f; { cudnnTensorDescriptor_t bias_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&bias_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( bias_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 1, 96, 1, 1)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 96, x273, x273)); CUDNN_CALL(cudnnAddTensor( cudnnHandle, x285, bias_desc, x224, x287, out_desc, x279)); }; float* x290 = (float*)myMalloc(1 * sizeof(float));; x290[0] = 0.0f; float* x292 = (float*)myMalloc(1 * sizeof(float));; x292[0] = 1.0f; float* x294 = (float*)myGpuMalloc(x276 * sizeof(float)); { cudnnTensorDescriptor_t x_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 96, x273, x273)); cudnnActivationDescriptor_t act_desc; CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc)); CUDNN_CALL(cudnnSetActivationDescriptor(act_desc, /*mode=*/ CUDNN_ACTIVATION_RELU, /*reluNanOpt=*/ CUDNN_PROPAGATE_NAN, /*relu_coef=*/ 0)); CUDNN_CALL(cudnnActivationForward( cudnnHandle, act_desc, x292, x_desc, x279, x290, x_desc, x294)); }; float* x296 = (float*)myMalloc(1 * sizeof(float));; x296[0] = 0.0f; float* x298 = (float*)myMalloc(1 * sizeof(float));; x298[0] = 1.0f; float* x308 = (float*)myGpuMalloc(x307 * sizeof(float)); { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 96, x273, x273) ); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 96, x302, x302)); cudnnPoolingDescriptor_t poolingDesc; CUDNN_CALL(cudnnCreatePoolingDescriptor(&poolingDesc)); CUDNN_CALL(cudnnSetPooling2dDescriptor( poolingDesc, CUDNN_POOLING_MAX, CUDNN_NOT_PROPAGATE_NAN, 2, 2, 0, 0, 2, 2 )); CUDNN_CALL(cudnnPoolingForward( cudnnHandle, poolingDesc, x298, in_desc, x294, x296, out_desc, x308)); }; if (x311) { } else { assert(false && "ERROR not specified"); } float* x323 = (float*)myGpuMalloc(x322 * sizeof(float)); float* x324 = (float*)myMalloc(1 * sizeof(float));; x324[0] = 0.0f; float* x326 = (float*)myMalloc(1 * sizeof(float));; x326[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 96, x302, x302)); cudnnFilterDescriptor_t filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 16, 96, 1, 1)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 16, x317, x317)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 0, 0, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionFwdAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionForwardAlgorithm( cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, CUDNN_CONVOLUTION_FWD_PREFER_FASTEST, 0, &algo)); // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionForwardWorkspaceSize( cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); // Execute convolution. CUDNN_CALL(cudnnConvolutionForward( cudnnHandle, x326, in_desc, x308, filt_desc, x245, conv_desc, algo, ws_data, ws_size, x324, out_desc, x323)); }; float* x329 = (float*)myMalloc(1 * sizeof(float));; x329[0] = 1.0f; float* x331 = (float*)myMalloc(1 * sizeof(float));; x331[0] = 1.0f; { cudnnTensorDescriptor_t bias_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&bias_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( bias_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 1, 16, 1, 1)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 16, x317, x317)); CUDNN_CALL(cudnnAddTensor( cudnnHandle, x329, bias_desc, x119, x331, out_desc, x323)); }; float* x334 = (float*)myMalloc(1 * sizeof(float));; x334[0] = 0.0f; float* x336 = (float*)myMalloc(1 * sizeof(float));; x336[0] = 1.0f; float* x338 = (float*)myGpuMalloc(x320 * sizeof(float)); { cudnnTensorDescriptor_t x_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 16, x317, x317)); cudnnActivationDescriptor_t act_desc; CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc)); CUDNN_CALL(cudnnSetActivationDescriptor(act_desc, /*mode=*/ CUDNN_ACTIVATION_RELU, /*reluNanOpt=*/ CUDNN_PROPAGATE_NAN, /*relu_coef=*/ 0)); CUDNN_CALL(cudnnActivationForward( cudnnHandle, act_desc, x336, x_desc, x323, x334, x_desc, x338)); }; if (x341) { } else { assert(false && "ERROR not specified"); } float* x353 = (float*)myGpuMalloc(x352 * sizeof(float)); float* x354 = (float*)myMalloc(1 * sizeof(float));; x354[0] = 0.0f; float* x356 = (float*)myMalloc(1 * sizeof(float));; x356[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 16, x317, x317)); cudnnFilterDescriptor_t filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 64, 16, 1, 1)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 64, x347, x347)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 0, 0, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionFwdAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionForwardAlgorithm( cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, CUDNN_CONVOLUTION_FWD_PREFER_FASTEST, 0, &algo)); // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionForwardWorkspaceSize( cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); // Execute convolution. CUDNN_CALL(cudnnConvolutionForward( cudnnHandle, x356, in_desc, x338, filt_desc, x167, conv_desc, algo, ws_data, ws_size, x354, out_desc, x353)); }; float* x359 = (float*)myMalloc(1 * sizeof(float));; x359[0] = 1.0f; float* x361 = (float*)myMalloc(1 * sizeof(float));; x361[0] = 1.0f; { cudnnTensorDescriptor_t bias_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&bias_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( bias_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 1, 64, 1, 1)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 64, x347, x347)); CUDNN_CALL(cudnnAddTensor( cudnnHandle, x359, bias_desc, x188, x361, out_desc, x353)); }; float* x364 = (float*)myMalloc(1 * sizeof(float));; x364[0] = 0.0f; float* x366 = (float*)myMalloc(1 * sizeof(float));; x366[0] = 1.0f; float* x368 = (float*)myGpuMalloc(x350 * sizeof(float)); { cudnnTensorDescriptor_t x_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 64, x347, x347)); cudnnActivationDescriptor_t act_desc; CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc)); CUDNN_CALL(cudnnSetActivationDescriptor(act_desc, /*mode=*/ CUDNN_ACTIVATION_RELU, /*reluNanOpt=*/ CUDNN_PROPAGATE_NAN, /*relu_coef=*/ 0)); CUDNN_CALL(cudnnActivationForward( cudnnHandle, act_desc, x366, x_desc, x353, x364, x_desc, x368)); }; if (x372) { } else { assert(false && "ERROR not specified"); } float* x385 = (float*)myGpuMalloc(x384 * sizeof(float)); float* x386 = (float*)myMalloc(1 * sizeof(float));; x386[0] = 0.0f; float* x388 = (float*)myMalloc(1 * sizeof(float));; x388[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 16, x317, x317)); cudnnFilterDescriptor_t filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 64, 16, 3, 3)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 64, x379, x379)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 1, 1, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionFwdAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionForwardAlgorithm( cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, CUDNN_CONVOLUTION_FWD_PREFER_FASTEST, 0, &algo)); // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionForwardWorkspaceSize( cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); // Execute convolution. CUDNN_CALL(cudnnConvolutionForward( cudnnHandle, x388, in_desc, x338, filt_desc, x236, conv_desc, algo, ws_data, ws_size, x386, out_desc, x385)); }; float* x391 = (float*)myMalloc(1 * sizeof(float));; x391[0] = 1.0f; float* x393 = (float*)myMalloc(1 * sizeof(float));; x393[0] = 1.0f; { cudnnTensorDescriptor_t bias_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&bias_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( bias_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 1, 64, 1, 1)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 64, x379, x379)); CUDNN_CALL(cudnnAddTensor( cudnnHandle, x391, bias_desc, x110, x393, out_desc, x385)); }; float* x396 = (float*)myMalloc(1 * sizeof(float));; x396[0] = 0.0f; float* x398 = (float*)myMalloc(1 * sizeof(float));; x398[0] = 1.0f; float* x400 = (float*)myGpuMalloc(x382 * sizeof(float)); { cudnnTensorDescriptor_t x_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 64, x379, x379)); cudnnActivationDescriptor_t act_desc; CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc)); CUDNN_CALL(cudnnSetActivationDescriptor(act_desc, /*mode=*/ CUDNN_ACTIVATION_RELU, /*reluNanOpt=*/ CUDNN_PROPAGATE_NAN, /*relu_coef=*/ 0)); CUDNN_CALL(cudnnActivationForward( cudnnHandle, act_desc, x398, x_desc, x385, x396, x_desc, x400)); }; if (x408) { } else { printf("all dimensions except the concatenation dimension should be the same\n"); assert(false && ""); } // back prop for concat float* x419 = (float*)myGpuMalloc(x418 * sizeof(float)); { dim3 grid(28, 2); concat2D_1D_greg<<<grid, 512>>>(x368, 64, x350, x400, 64, x382, x419, 1, 64, 128, x347, x347, x415, x348, x347, 1); }; if (x422) { } else { assert(false && "ERROR not specified"); } float* x434 = (float*)myGpuMalloc(x433 * sizeof(float)); float* x435 = (float*)myMalloc(1 * sizeof(float));; x435[0] = 0.0f; float* x437 = (float*)myMalloc(1 * sizeof(float));; x437[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 128, x347, x347)); cudnnFilterDescriptor_t filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 16, 128, 1, 1)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 16, x428, x428)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 0, 0, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionFwdAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionForwardAlgorithm( cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, CUDNN_CONVOLUTION_FWD_PREFER_FASTEST, 0, &algo)); // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionForwardWorkspaceSize( cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); // Execute convolution. CUDNN_CALL(cudnnConvolutionForward( cudnnHandle, x437, in_desc, x419, filt_desc, x131, conv_desc, algo, ws_data, ws_size, x435, out_desc, x434)); }; float* x440 = (float*)myMalloc(1 * sizeof(float));; x440[0] = 1.0f; float* x442 = (float*)myMalloc(1 * sizeof(float));; x442[0] = 1.0f; { cudnnTensorDescriptor_t bias_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&bias_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( bias_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 1, 16, 1, 1)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 16, x428, x428)); CUDNN_CALL(cudnnAddTensor( cudnnHandle, x440, bias_desc, x170, x442, out_desc, x434)); }; float* x445 = (float*)myMalloc(1 * sizeof(float));; x445[0] = 0.0f; float* x447 = (float*)myMalloc(1 * sizeof(float));; x447[0] = 1.0f; float* x449 = (float*)myGpuMalloc(x431 * sizeof(float)); { cudnnTensorDescriptor_t x_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 16, x428, x428)); cudnnActivationDescriptor_t act_desc; CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc)); CUDNN_CALL(cudnnSetActivationDescriptor(act_desc, /*mode=*/ CUDNN_ACTIVATION_RELU, /*reluNanOpt=*/ CUDNN_PROPAGATE_NAN, /*relu_coef=*/ 0)); CUDNN_CALL(cudnnActivationForward( cudnnHandle, act_desc, x447, x_desc, x434, x445, x_desc, x449)); }; if (x452) { } else { assert(false && "ERROR not specified"); } float* x464 = (float*)myGpuMalloc(x463 * sizeof(float)); float* x465 = (float*)myMalloc(1 * sizeof(float));; x465[0] = 0.0f; float* x467 = (float*)myMalloc(1 * sizeof(float));; x467[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 16, x428, x428)); cudnnFilterDescriptor_t filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 64, 16, 1, 1)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 64, x458, x458)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 0, 0, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionFwdAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionForwardAlgorithm( cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, CUDNN_CONVOLUTION_FWD_PREFER_FASTEST, 0, &algo)); // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionForwardWorkspaceSize( cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); // Execute convolution. CUDNN_CALL(cudnnConvolutionForward( cudnnHandle, x467, in_desc, x449, filt_desc, x128, conv_desc, algo, ws_data, ws_size, x465, out_desc, x464)); }; float* x470 = (float*)myMalloc(1 * sizeof(float));; x470[0] = 1.0f; float* x472 = (float*)myMalloc(1 * sizeof(float));; x472[0] = 1.0f; { cudnnTensorDescriptor_t bias_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&bias_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( bias_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 1, 64, 1, 1)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 64, x458, x458)); CUDNN_CALL(cudnnAddTensor( cudnnHandle, x470, bias_desc, x104, x472, out_desc, x464)); }; float* x475 = (float*)myMalloc(1 * sizeof(float));; x475[0] = 0.0f; float* x477 = (float*)myMalloc(1 * sizeof(float));; x477[0] = 1.0f; float* x479 = (float*)myGpuMalloc(x461 * sizeof(float)); { cudnnTensorDescriptor_t x_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 64, x458, x458)); cudnnActivationDescriptor_t act_desc; CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc)); CUDNN_CALL(cudnnSetActivationDescriptor(act_desc, /*mode=*/ CUDNN_ACTIVATION_RELU, /*reluNanOpt=*/ CUDNN_PROPAGATE_NAN, /*relu_coef=*/ 0)); CUDNN_CALL(cudnnActivationForward( cudnnHandle, act_desc, x477, x_desc, x464, x475, x_desc, x479)); }; if (x483) { } else { assert(false && "ERROR not specified"); } float* x496 = (float*)myGpuMalloc(x495 * sizeof(float)); float* x497 = (float*)myMalloc(1 * sizeof(float));; x497[0] = 0.0f; float* x499 = (float*)myMalloc(1 * sizeof(float));; x499[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 16, x428, x428)); cudnnFilterDescriptor_t filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 64, 16, 3, 3)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 64, x490, x490)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 1, 1, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionFwdAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionForwardAlgorithm( cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, CUDNN_CONVOLUTION_FWD_PREFER_FASTEST, 0, &algo)); // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionForwardWorkspaceSize( cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); // Execute convolution. CUDNN_CALL(cudnnConvolutionForward( cudnnHandle, x499, in_desc, x449, filt_desc, x152, conv_desc, algo, ws_data, ws_size, x497, out_desc, x496)); }; float* x502 = (float*)myMalloc(1 * sizeof(float));; x502[0] = 1.0f; float* x504 = (float*)myMalloc(1 * sizeof(float));; x504[0] = 1.0f; { cudnnTensorDescriptor_t bias_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&bias_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( bias_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 1, 64, 1, 1)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 64, x490, x490)); CUDNN_CALL(cudnnAddTensor( cudnnHandle, x502, bias_desc, x206, x504, out_desc, x496)); }; float* x507 = (float*)myMalloc(1 * sizeof(float));; x507[0] = 0.0f; float* x509 = (float*)myMalloc(1 * sizeof(float));; x509[0] = 1.0f; float* x511 = (float*)myGpuMalloc(x493 * sizeof(float)); { cudnnTensorDescriptor_t x_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 64, x490, x490)); cudnnActivationDescriptor_t act_desc; CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc)); CUDNN_CALL(cudnnSetActivationDescriptor(act_desc, /*mode=*/ CUDNN_ACTIVATION_RELU, /*reluNanOpt=*/ CUDNN_PROPAGATE_NAN, /*relu_coef=*/ 0)); CUDNN_CALL(cudnnActivationForward( cudnnHandle, act_desc, x509, x_desc, x496, x507, x_desc, x511)); }; if (x516) { } else { printf("all dimensions except the concatenation dimension should be the same\n"); assert(false && ""); } // back prop for concat float* x527 = (float*)myGpuMalloc(x526 * sizeof(float)); { dim3 grid(28, 2); concat2D_1D_greg<<<grid, 512>>>(x479, 64, x461, x511, 64, x493, x527, 1, 64, 128, x458, x458, x523, x459, x458, 1); }; if (x530) { } else { assert(false && "ERROR not specified"); } float* x542 = (float*)myGpuMalloc(x541 * sizeof(float)); float* x543 = (float*)myMalloc(1 * sizeof(float));; x543[0] = 0.0f; float* x545 = (float*)myMalloc(1 * sizeof(float));; x545[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 128, x458, x458)); cudnnFilterDescriptor_t filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 32, 128, 1, 1)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 32, x536, x536)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 0, 0, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionFwdAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionForwardAlgorithm( cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, CUDNN_CONVOLUTION_FWD_PREFER_FASTEST, 0, &algo)); // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionForwardWorkspaceSize( cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); // Execute convolution. CUDNN_CALL(cudnnConvolutionForward( cudnnHandle, x545, in_desc, x527, filt_desc, x125, conv_desc, algo, ws_data, ws_size, x543, out_desc, x542)); }; float* x548 = (float*)myMalloc(1 * sizeof(float));; x548[0] = 1.0f; float* x550 = (float*)myMalloc(1 * sizeof(float));; x550[0] = 1.0f; { cudnnTensorDescriptor_t bias_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&bias_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( bias_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 1, 32, 1, 1)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 32, x536, x536)); CUDNN_CALL(cudnnAddTensor( cudnnHandle, x548, bias_desc, x164, x550, out_desc, x542)); }; float* x553 = (float*)myMalloc(1 * sizeof(float));; x553[0] = 0.0f; float* x555 = (float*)myMalloc(1 * sizeof(float));; x555[0] = 1.0f; float* x557 = (float*)myGpuMalloc(x539 * sizeof(float)); { cudnnTensorDescriptor_t x_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 32, x536, x536)); cudnnActivationDescriptor_t act_desc; CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc)); CUDNN_CALL(cudnnSetActivationDescriptor(act_desc, /*mode=*/ CUDNN_ACTIVATION_RELU, /*reluNanOpt=*/ CUDNN_PROPAGATE_NAN, /*relu_coef=*/ 0)); CUDNN_CALL(cudnnActivationForward( cudnnHandle, act_desc, x555, x_desc, x542, x553, x_desc, x557)); }; if (x560) { } else { assert(false && "ERROR not specified"); } float* x572 = (float*)myGpuMalloc(x571 * sizeof(float)); float* x573 = (float*)myMalloc(1 * sizeof(float));; x573[0] = 0.0f; float* x575 = (float*)myMalloc(1 * sizeof(float));; x575[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 32, x536, x536)); cudnnFilterDescriptor_t filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 128, 32, 1, 1)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 128, x566, x566)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 0, 0, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionFwdAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionForwardAlgorithm( cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, CUDNN_CONVOLUTION_FWD_PREFER_FASTEST, 0, &algo)); // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionForwardWorkspaceSize( cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); // Execute convolution. CUDNN_CALL(cudnnConvolutionForward( cudnnHandle, x575, in_desc, x557, filt_desc, x200, conv_desc, algo, ws_data, ws_size, x573, out_desc, x572)); }; float* x578 = (float*)myMalloc(1 * sizeof(float));; x578[0] = 1.0f; float* x580 = (float*)myMalloc(1 * sizeof(float));; x580[0] = 1.0f; { cudnnTensorDescriptor_t bias_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&bias_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( bias_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 1, 128, 1, 1)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 128, x566, x566)); CUDNN_CALL(cudnnAddTensor( cudnnHandle, x578, bias_desc, x230, x580, out_desc, x572)); }; float* x583 = (float*)myMalloc(1 * sizeof(float));; x583[0] = 0.0f; float* x585 = (float*)myMalloc(1 * sizeof(float));; x585[0] = 1.0f; float* x587 = (float*)myGpuMalloc(x569 * sizeof(float)); { cudnnTensorDescriptor_t x_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 128, x566, x566)); cudnnActivationDescriptor_t act_desc; CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc)); CUDNN_CALL(cudnnSetActivationDescriptor(act_desc, /*mode=*/ CUDNN_ACTIVATION_RELU, /*reluNanOpt=*/ CUDNN_PROPAGATE_NAN, /*relu_coef=*/ 0)); CUDNN_CALL(cudnnActivationForward( cudnnHandle, act_desc, x585, x_desc, x572, x583, x_desc, x587)); }; if (x591) { } else { assert(false && "ERROR not specified"); } float* x604 = (float*)myGpuMalloc(x603 * sizeof(float)); float* x605 = (float*)myMalloc(1 * sizeof(float));; x605[0] = 0.0f; float* x607 = (float*)myMalloc(1 * sizeof(float));; x607[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 32, x536, x536)); cudnnFilterDescriptor_t filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 128, 32, 3, 3)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 128, x598, x598)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 1, 1, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionFwdAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionForwardAlgorithm( cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, CUDNN_CONVOLUTION_FWD_PREFER_FASTEST, 0, &algo)); // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionForwardWorkspaceSize( cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); // Execute convolution. CUDNN_CALL(cudnnConvolutionForward( cudnnHandle, x607, in_desc, x557, filt_desc, x113, conv_desc, algo, ws_data, ws_size, x605, out_desc, x604)); }; float* x610 = (float*)myMalloc(1 * sizeof(float));; x610[0] = 1.0f; float* x612 = (float*)myMalloc(1 * sizeof(float));; x612[0] = 1.0f; { cudnnTensorDescriptor_t bias_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&bias_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( bias_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 1, 128, 1, 1)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 128, x598, x598)); CUDNN_CALL(cudnnAddTensor( cudnnHandle, x610, bias_desc, x218, x612, out_desc, x604)); }; float* x615 = (float*)myMalloc(1 * sizeof(float));; x615[0] = 0.0f; float* x617 = (float*)myMalloc(1 * sizeof(float));; x617[0] = 1.0f; float* x619 = (float*)myGpuMalloc(x601 * sizeof(float)); { cudnnTensorDescriptor_t x_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 128, x598, x598)); cudnnActivationDescriptor_t act_desc; CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc)); CUDNN_CALL(cudnnSetActivationDescriptor(act_desc, /*mode=*/ CUDNN_ACTIVATION_RELU, /*reluNanOpt=*/ CUDNN_PROPAGATE_NAN, /*relu_coef=*/ 0)); CUDNN_CALL(cudnnActivationForward( cudnnHandle, act_desc, x617, x_desc, x604, x615, x_desc, x619)); }; if (x624) { } else { printf("all dimensions except the concatenation dimension should be the same\n"); assert(false && ""); } // back prop for concat float* x635 = (float*)myGpuMalloc(x634 * sizeof(float)); { dim3 grid(28, 2); concat2D_1D_greg<<<grid, 512>>>(x587, 128, x569, x619, 128, x601, x635, 1, 64, 256, x566, x566, x631, x567, x566, 1); }; float* x637 = (float*)myMalloc(1 * sizeof(float));; x637[0] = 0.0f; float* x639 = (float*)myMalloc(1 * sizeof(float));; x639[0] = 1.0f; float* x649 = (float*)myGpuMalloc(x648 * sizeof(float)); { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x566, x566) ); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x643, x643)); cudnnPoolingDescriptor_t poolingDesc; CUDNN_CALL(cudnnCreatePoolingDescriptor(&poolingDesc)); CUDNN_CALL(cudnnSetPooling2dDescriptor( poolingDesc, CUDNN_POOLING_MAX, CUDNN_NOT_PROPAGATE_NAN, 2, 2, 0, 0, 2, 2 )); CUDNN_CALL(cudnnPoolingForward( cudnnHandle, poolingDesc, x639, in_desc, x635, x637, out_desc, x649)); }; if (x652) { } else { assert(false && "ERROR not specified"); } float* x664 = (float*)myGpuMalloc(x663 * sizeof(float)); float* x665 = (float*)myMalloc(1 * sizeof(float));; x665[0] = 0.0f; float* x667 = (float*)myMalloc(1 * sizeof(float));; x667[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x643, x643)); cudnnFilterDescriptor_t filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 32, 256, 1, 1)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 32, x658, x658)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 0, 0, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionFwdAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionForwardAlgorithm( cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, CUDNN_CONVOLUTION_FWD_PREFER_FASTEST, 0, &algo)); // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionForwardWorkspaceSize( cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); // Execute convolution. CUDNN_CALL(cudnnConvolutionForward( cudnnHandle, x667, in_desc, x649, filt_desc, x176, conv_desc, algo, ws_data, ws_size, x665, out_desc, x664)); }; float* x670 = (float*)myMalloc(1 * sizeof(float));; x670[0] = 1.0f; float* x672 = (float*)myMalloc(1 * sizeof(float));; x672[0] = 1.0f; { cudnnTensorDescriptor_t bias_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&bias_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( bias_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 1, 32, 1, 1)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 32, x658, x658)); CUDNN_CALL(cudnnAddTensor( cudnnHandle, x670, bias_desc, x140, x672, out_desc, x664)); }; float* x675 = (float*)myMalloc(1 * sizeof(float));; x675[0] = 0.0f; float* x677 = (float*)myMalloc(1 * sizeof(float));; x677[0] = 1.0f; float* x679 = (float*)myGpuMalloc(x661 * sizeof(float)); { cudnnTensorDescriptor_t x_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 32, x658, x658)); cudnnActivationDescriptor_t act_desc; CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc)); CUDNN_CALL(cudnnSetActivationDescriptor(act_desc, /*mode=*/ CUDNN_ACTIVATION_RELU, /*reluNanOpt=*/ CUDNN_PROPAGATE_NAN, /*relu_coef=*/ 0)); CUDNN_CALL(cudnnActivationForward( cudnnHandle, act_desc, x677, x_desc, x664, x675, x_desc, x679)); }; if (x682) { } else { assert(false && "ERROR not specified"); } float* x694 = (float*)myGpuMalloc(x693 * sizeof(float)); float* x695 = (float*)myMalloc(1 * sizeof(float));; x695[0] = 0.0f; float* x697 = (float*)myMalloc(1 * sizeof(float));; x697[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 32, x658, x658)); cudnnFilterDescriptor_t filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 128, 32, 1, 1)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 128, x688, x688)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 0, 0, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionFwdAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionForwardAlgorithm( cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, CUDNN_CONVOLUTION_FWD_PREFER_FASTEST, 0, &algo)); // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionForwardWorkspaceSize( cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); // Execute convolution. CUDNN_CALL(cudnnConvolutionForward( cudnnHandle, x697, in_desc, x679, filt_desc, x116, conv_desc, algo, ws_data, ws_size, x695, out_desc, x694)); }; float* x700 = (float*)myMalloc(1 * sizeof(float));; x700[0] = 1.0f; float* x702 = (float*)myMalloc(1 * sizeof(float));; x702[0] = 1.0f; { cudnnTensorDescriptor_t bias_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&bias_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( bias_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 1, 128, 1, 1)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 128, x688, x688)); CUDNN_CALL(cudnnAddTensor( cudnnHandle, x700, bias_desc, x158, x702, out_desc, x694)); }; float* x705 = (float*)myMalloc(1 * sizeof(float));; x705[0] = 0.0f; float* x707 = (float*)myMalloc(1 * sizeof(float));; x707[0] = 1.0f; float* x709 = (float*)myGpuMalloc(x691 * sizeof(float)); { cudnnTensorDescriptor_t x_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 128, x688, x688)); cudnnActivationDescriptor_t act_desc; CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc)); CUDNN_CALL(cudnnSetActivationDescriptor(act_desc, /*mode=*/ CUDNN_ACTIVATION_RELU, /*reluNanOpt=*/ CUDNN_PROPAGATE_NAN, /*relu_coef=*/ 0)); CUDNN_CALL(cudnnActivationForward( cudnnHandle, act_desc, x707, x_desc, x694, x705, x_desc, x709)); }; if (x713) { } else { assert(false && "ERROR not specified"); } float* x726 = (float*)myGpuMalloc(x725 * sizeof(float)); float* x727 = (float*)myMalloc(1 * sizeof(float));; x727[0] = 0.0f; float* x729 = (float*)myMalloc(1 * sizeof(float));; x729[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 32, x658, x658)); cudnnFilterDescriptor_t filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 128, 32, 3, 3)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 128, x720, x720)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 1, 1, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionFwdAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionForwardAlgorithm( cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, CUDNN_CONVOLUTION_FWD_PREFER_FASTEST, 0, &algo)); // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionForwardWorkspaceSize( cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); // Execute convolution. CUDNN_CALL(cudnnConvolutionForward( cudnnHandle, x729, in_desc, x679, filt_desc, x203, conv_desc, algo, ws_data, ws_size, x727, out_desc, x726)); }; float* x732 = (float*)myMalloc(1 * sizeof(float));; x732[0] = 1.0f; float* x734 = (float*)myMalloc(1 * sizeof(float));; x734[0] = 1.0f; { cudnnTensorDescriptor_t bias_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&bias_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( bias_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 1, 128, 1, 1)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 128, x720, x720)); CUDNN_CALL(cudnnAddTensor( cudnnHandle, x732, bias_desc, x143, x734, out_desc, x726)); }; float* x737 = (float*)myMalloc(1 * sizeof(float));; x737[0] = 0.0f; float* x739 = (float*)myMalloc(1 * sizeof(float));; x739[0] = 1.0f; float* x741 = (float*)myGpuMalloc(x723 * sizeof(float)); { cudnnTensorDescriptor_t x_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 128, x720, x720)); cudnnActivationDescriptor_t act_desc; CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc)); CUDNN_CALL(cudnnSetActivationDescriptor(act_desc, /*mode=*/ CUDNN_ACTIVATION_RELU, /*reluNanOpt=*/ CUDNN_PROPAGATE_NAN, /*relu_coef=*/ 0)); CUDNN_CALL(cudnnActivationForward( cudnnHandle, act_desc, x739, x_desc, x726, x737, x_desc, x741)); }; if (x746) { } else { printf("all dimensions except the concatenation dimension should be the same\n"); assert(false && ""); } // back prop for concat float* x757 = (float*)myGpuMalloc(x756 * sizeof(float)); { dim3 grid(28, 2); concat2D_1D_greg<<<grid, 512>>>(x709, 128, x691, x741, 128, x723, x757, 1, 64, 256, x688, x688, x753, x689, x688, 1); }; if (x760) { } else { assert(false && "ERROR not specified"); } float* x772 = (float*)myGpuMalloc(x771 * sizeof(float)); float* x773 = (float*)myMalloc(1 * sizeof(float));; x773[0] = 0.0f; float* x775 = (float*)myMalloc(1 * sizeof(float));; x775[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x688, x688)); cudnnFilterDescriptor_t filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 48, 256, 1, 1)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 48, x766, x766)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 0, 0, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionFwdAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionForwardAlgorithm( cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, CUDNN_CONVOLUTION_FWD_PREFER_FASTEST, 0, &algo)); // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionForwardWorkspaceSize( cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); // Execute convolution. CUDNN_CALL(cudnnConvolutionForward( cudnnHandle, x775, in_desc, x757, filt_desc, x221, conv_desc, algo, ws_data, ws_size, x773, out_desc, x772)); }; float* x778 = (float*)myMalloc(1 * sizeof(float));; x778[0] = 1.0f; float* x780 = (float*)myMalloc(1 * sizeof(float));; x780[0] = 1.0f; { cudnnTensorDescriptor_t bias_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&bias_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( bias_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 1, 48, 1, 1)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 48, x766, x766)); CUDNN_CALL(cudnnAddTensor( cudnnHandle, x778, bias_desc, x251, x780, out_desc, x772)); }; float* x783 = (float*)myMalloc(1 * sizeof(float));; x783[0] = 0.0f; float* x785 = (float*)myMalloc(1 * sizeof(float));; x785[0] = 1.0f; float* x787 = (float*)myGpuMalloc(x769 * sizeof(float)); { cudnnTensorDescriptor_t x_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 48, x766, x766)); cudnnActivationDescriptor_t act_desc; CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc)); CUDNN_CALL(cudnnSetActivationDescriptor(act_desc, /*mode=*/ CUDNN_ACTIVATION_RELU, /*reluNanOpt=*/ CUDNN_PROPAGATE_NAN, /*relu_coef=*/ 0)); CUDNN_CALL(cudnnActivationForward( cudnnHandle, act_desc, x785, x_desc, x772, x783, x_desc, x787)); }; if (x790) { } else { assert(false && "ERROR not specified"); } float* x802 = (float*)myGpuMalloc(x801 * sizeof(float)); float* x803 = (float*)myMalloc(1 * sizeof(float));; x803[0] = 0.0f; float* x805 = (float*)myMalloc(1 * sizeof(float));; x805[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 48, x766, x766)); cudnnFilterDescriptor_t filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 192, 48, 1, 1)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 192, x796, x796)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 0, 0, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionFwdAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionForwardAlgorithm( cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, CUDNN_CONVOLUTION_FWD_PREFER_FASTEST, 0, &algo)); // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionForwardWorkspaceSize( cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); // Execute convolution. CUDNN_CALL(cudnnConvolutionForward( cudnnHandle, x805, in_desc, x787, filt_desc, x239, conv_desc, algo, ws_data, ws_size, x803, out_desc, x802)); }; float* x808 = (float*)myMalloc(1 * sizeof(float));; x808[0] = 1.0f; float* x810 = (float*)myMalloc(1 * sizeof(float));; x810[0] = 1.0f; { cudnnTensorDescriptor_t bias_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&bias_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( bias_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 1, 192, 1, 1)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 192, x796, x796)); CUDNN_CALL(cudnnAddTensor( cudnnHandle, x808, bias_desc, x233, x810, out_desc, x802)); }; float* x813 = (float*)myMalloc(1 * sizeof(float));; x813[0] = 0.0f; float* x815 = (float*)myMalloc(1 * sizeof(float));; x815[0] = 1.0f; float* x817 = (float*)myGpuMalloc(x799 * sizeof(float)); { cudnnTensorDescriptor_t x_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 192, x796, x796)); cudnnActivationDescriptor_t act_desc; CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc)); CUDNN_CALL(cudnnSetActivationDescriptor(act_desc, /*mode=*/ CUDNN_ACTIVATION_RELU, /*reluNanOpt=*/ CUDNN_PROPAGATE_NAN, /*relu_coef=*/ 0)); CUDNN_CALL(cudnnActivationForward( cudnnHandle, act_desc, x815, x_desc, x802, x813, x_desc, x817)); }; if (x821) { } else { assert(false && "ERROR not specified"); } float* x834 = (float*)myGpuMalloc(x833 * sizeof(float)); float* x835 = (float*)myMalloc(1 * sizeof(float));; x835[0] = 0.0f; float* x837 = (float*)myMalloc(1 * sizeof(float));; x837[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 48, x766, x766)); cudnnFilterDescriptor_t filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 192, 48, 3, 3)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 192, x828, x828)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 1, 1, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionFwdAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionForwardAlgorithm( cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, CUDNN_CONVOLUTION_FWD_PREFER_FASTEST, 0, &algo)); // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionForwardWorkspaceSize( cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); // Execute convolution. CUDNN_CALL(cudnnConvolutionForward( cudnnHandle, x837, in_desc, x787, filt_desc, x212, conv_desc, algo, ws_data, ws_size, x835, out_desc, x834)); }; float* x840 = (float*)myMalloc(1 * sizeof(float));; x840[0] = 1.0f; float* x842 = (float*)myMalloc(1 * sizeof(float));; x842[0] = 1.0f; { cudnnTensorDescriptor_t bias_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&bias_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( bias_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 1, 192, 1, 1)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 192, x828, x828)); CUDNN_CALL(cudnnAddTensor( cudnnHandle, x840, bias_desc, x182, x842, out_desc, x834)); }; float* x845 = (float*)myMalloc(1 * sizeof(float));; x845[0] = 0.0f; float* x847 = (float*)myMalloc(1 * sizeof(float));; x847[0] = 1.0f; float* x849 = (float*)myGpuMalloc(x831 * sizeof(float)); { cudnnTensorDescriptor_t x_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 192, x828, x828)); cudnnActivationDescriptor_t act_desc; CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc)); CUDNN_CALL(cudnnSetActivationDescriptor(act_desc, /*mode=*/ CUDNN_ACTIVATION_RELU, /*reluNanOpt=*/ CUDNN_PROPAGATE_NAN, /*relu_coef=*/ 0)); CUDNN_CALL(cudnnActivationForward( cudnnHandle, act_desc, x847, x_desc, x834, x845, x_desc, x849)); }; if (x854) { } else { printf("all dimensions except the concatenation dimension should be the same\n"); assert(false && ""); } // back prop for concat float* x865 = (float*)myGpuMalloc(x864 * sizeof(float)); { dim3 grid(28, 2); concat2D_1D_greg<<<grid, 512>>>(x817, 192, x799, x849, 192, x831, x865, 1, 64, 384, x796, x796, x861, x797, x796, 1); }; if (x868) { } else { assert(false && "ERROR not specified"); } float* x880 = (float*)myGpuMalloc(x879 * sizeof(float)); float* x881 = (float*)myMalloc(1 * sizeof(float));; x881[0] = 0.0f; float* x883 = (float*)myMalloc(1 * sizeof(float));; x883[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 384, x796, x796)); cudnnFilterDescriptor_t filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 48, 384, 1, 1)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 48, x874, x874)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 0, 0, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionFwdAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionForwardAlgorithm( cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, CUDNN_CONVOLUTION_FWD_PREFER_FASTEST, 0, &algo)); // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionForwardWorkspaceSize( cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); // Execute convolution. CUDNN_CALL(cudnnConvolutionForward( cudnnHandle, x883, in_desc, x865, filt_desc, x137, conv_desc, algo, ws_data, ws_size, x881, out_desc, x880)); }; float* x886 = (float*)myMalloc(1 * sizeof(float));; x886[0] = 1.0f; float* x888 = (float*)myMalloc(1 * sizeof(float));; x888[0] = 1.0f; { cudnnTensorDescriptor_t bias_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&bias_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( bias_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 1, 48, 1, 1)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 48, x874, x874)); CUDNN_CALL(cudnnAddTensor( cudnnHandle, x886, bias_desc, x101, x888, out_desc, x880)); }; float* x891 = (float*)myMalloc(1 * sizeof(float));; x891[0] = 0.0f; float* x893 = (float*)myMalloc(1 * sizeof(float));; x893[0] = 1.0f; float* x895 = (float*)myGpuMalloc(x877 * sizeof(float)); { cudnnTensorDescriptor_t x_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 48, x874, x874)); cudnnActivationDescriptor_t act_desc; CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc)); CUDNN_CALL(cudnnSetActivationDescriptor(act_desc, /*mode=*/ CUDNN_ACTIVATION_RELU, /*reluNanOpt=*/ CUDNN_PROPAGATE_NAN, /*relu_coef=*/ 0)); CUDNN_CALL(cudnnActivationForward( cudnnHandle, act_desc, x893, x_desc, x880, x891, x_desc, x895)); }; if (x898) { } else { assert(false && "ERROR not specified"); } float* x910 = (float*)myGpuMalloc(x909 * sizeof(float)); float* x911 = (float*)myMalloc(1 * sizeof(float));; x911[0] = 0.0f; float* x913 = (float*)myMalloc(1 * sizeof(float));; x913[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 48, x874, x874)); cudnnFilterDescriptor_t filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 192, 48, 1, 1)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 192, x904, x904)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 0, 0, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionFwdAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionForwardAlgorithm( cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, CUDNN_CONVOLUTION_FWD_PREFER_FASTEST, 0, &algo)); // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionForwardWorkspaceSize( cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); // Execute convolution. CUDNN_CALL(cudnnConvolutionForward( cudnnHandle, x913, in_desc, x895, filt_desc, x161, conv_desc, algo, ws_data, ws_size, x911, out_desc, x910)); }; float* x916 = (float*)myMalloc(1 * sizeof(float));; x916[0] = 1.0f; float* x918 = (float*)myMalloc(1 * sizeof(float));; x918[0] = 1.0f; { cudnnTensorDescriptor_t bias_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&bias_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( bias_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 1, 192, 1, 1)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 192, x904, x904)); CUDNN_CALL(cudnnAddTensor( cudnnHandle, x916, bias_desc, x191, x918, out_desc, x910)); }; float* x921 = (float*)myMalloc(1 * sizeof(float));; x921[0] = 0.0f; float* x923 = (float*)myMalloc(1 * sizeof(float));; x923[0] = 1.0f; float* x925 = (float*)myGpuMalloc(x907 * sizeof(float)); { cudnnTensorDescriptor_t x_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 192, x904, x904)); cudnnActivationDescriptor_t act_desc; CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc)); CUDNN_CALL(cudnnSetActivationDescriptor(act_desc, /*mode=*/ CUDNN_ACTIVATION_RELU, /*reluNanOpt=*/ CUDNN_PROPAGATE_NAN, /*relu_coef=*/ 0)); CUDNN_CALL(cudnnActivationForward( cudnnHandle, act_desc, x923, x_desc, x910, x921, x_desc, x925)); }; if (x929) { } else { assert(false && "ERROR not specified"); } float* x942 = (float*)myGpuMalloc(x941 * sizeof(float)); float* x943 = (float*)myMalloc(1 * sizeof(float));; x943[0] = 0.0f; float* x945 = (float*)myMalloc(1 * sizeof(float));; x945[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 48, x874, x874)); cudnnFilterDescriptor_t filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 192, 48, 3, 3)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 192, x936, x936)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 1, 1, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionFwdAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionForwardAlgorithm( cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, CUDNN_CONVOLUTION_FWD_PREFER_FASTEST, 0, &algo)); // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionForwardWorkspaceSize( cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); // Execute convolution. CUDNN_CALL(cudnnConvolutionForward( cudnnHandle, x945, in_desc, x895, filt_desc, x149, conv_desc, algo, ws_data, ws_size, x943, out_desc, x942)); }; float* x948 = (float*)myMalloc(1 * sizeof(float));; x948[0] = 1.0f; float* x950 = (float*)myMalloc(1 * sizeof(float));; x950[0] = 1.0f; { cudnnTensorDescriptor_t bias_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&bias_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( bias_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 1, 192, 1, 1)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 192, x936, x936)); CUDNN_CALL(cudnnAddTensor( cudnnHandle, x948, bias_desc, x227, x950, out_desc, x942)); }; float* x953 = (float*)myMalloc(1 * sizeof(float));; x953[0] = 0.0f; float* x955 = (float*)myMalloc(1 * sizeof(float));; x955[0] = 1.0f; float* x957 = (float*)myGpuMalloc(x939 * sizeof(float)); { cudnnTensorDescriptor_t x_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 192, x936, x936)); cudnnActivationDescriptor_t act_desc; CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc)); CUDNN_CALL(cudnnSetActivationDescriptor(act_desc, /*mode=*/ CUDNN_ACTIVATION_RELU, /*reluNanOpt=*/ CUDNN_PROPAGATE_NAN, /*relu_coef=*/ 0)); CUDNN_CALL(cudnnActivationForward( cudnnHandle, act_desc, x955, x_desc, x942, x953, x_desc, x957)); }; if (x962) { } else { printf("all dimensions except the concatenation dimension should be the same\n"); assert(false && ""); } // back prop for concat float* x973 = (float*)myGpuMalloc(x972 * sizeof(float)); { dim3 grid(28, 2); concat2D_1D_greg<<<grid, 512>>>(x925, 192, x907, x957, 192, x939, x973, 1, 64, 384, x904, x904, x969, x905, x904, 1); }; if (x976) { } else { assert(false && "ERROR not specified"); } float* x988 = (float*)myGpuMalloc(x987 * sizeof(float)); float* x989 = (float*)myMalloc(1 * sizeof(float));; x989[0] = 0.0f; float* x991 = (float*)myMalloc(1 * sizeof(float));; x991[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 384, x904, x904)); cudnnFilterDescriptor_t filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 64, 384, 1, 1)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 64, x982, x982)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 0, 0, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionFwdAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionForwardAlgorithm( cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, CUDNN_CONVOLUTION_FWD_PREFER_FASTEST, 0, &algo)); // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionForwardWorkspaceSize( cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); // Execute convolution. CUDNN_CALL(cudnnConvolutionForward( cudnnHandle, x991, in_desc, x973, filt_desc, x197, conv_desc, algo, ws_data, ws_size, x989, out_desc, x988)); }; float* x994 = (float*)myMalloc(1 * sizeof(float));; x994[0] = 1.0f; float* x996 = (float*)myMalloc(1 * sizeof(float));; x996[0] = 1.0f; { cudnnTensorDescriptor_t bias_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&bias_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( bias_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 1, 64, 1, 1)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 64, x982, x982)); CUDNN_CALL(cudnnAddTensor( cudnnHandle, x994, bias_desc, x122, x996, out_desc, x988)); }; float* x999 = (float*)myMalloc(1 * sizeof(float));; x999[0] = 0.0f; float* x1001 = (float*)myMalloc(1 * sizeof(float));; x1001[0] = 1.0f; float* x1003 = (float*)myGpuMalloc(x985 * sizeof(float)); { cudnnTensorDescriptor_t x_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 64, x982, x982)); cudnnActivationDescriptor_t act_desc; CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc)); CUDNN_CALL(cudnnSetActivationDescriptor(act_desc, /*mode=*/ CUDNN_ACTIVATION_RELU, /*reluNanOpt=*/ CUDNN_PROPAGATE_NAN, /*relu_coef=*/ 0)); CUDNN_CALL(cudnnActivationForward( cudnnHandle, act_desc, x1001, x_desc, x988, x999, x_desc, x1003)); }; if (x1006) { } else { assert(false && "ERROR not specified"); } float* x1018 = (float*)myGpuMalloc(x1017 * sizeof(float)); float* x1019 = (float*)myMalloc(1 * sizeof(float));; x1019[0] = 0.0f; float* x1021 = (float*)myMalloc(1 * sizeof(float));; x1021[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 64, x982, x982)); cudnnFilterDescriptor_t filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 256, 64, 1, 1)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x1012, x1012)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 0, 0, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionFwdAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionForwardAlgorithm( cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, CUDNN_CONVOLUTION_FWD_PREFER_FASTEST, 0, &algo)); // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionForwardWorkspaceSize( cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); // Execute convolution. CUDNN_CALL(cudnnConvolutionForward( cudnnHandle, x1021, in_desc, x1003, filt_desc, x242, conv_desc, algo, ws_data, ws_size, x1019, out_desc, x1018)); }; float* x1024 = (float*)myMalloc(1 * sizeof(float));; x1024[0] = 1.0f; float* x1026 = (float*)myMalloc(1 * sizeof(float));; x1026[0] = 1.0f; { cudnnTensorDescriptor_t bias_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&bias_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( bias_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 1, 256, 1, 1)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x1012, x1012)); CUDNN_CALL(cudnnAddTensor( cudnnHandle, x1024, bias_desc, x215, x1026, out_desc, x1018)); }; float* x1029 = (float*)myMalloc(1 * sizeof(float));; x1029[0] = 0.0f; float* x1031 = (float*)myMalloc(1 * sizeof(float));; x1031[0] = 1.0f; float* x1033 = (float*)myGpuMalloc(x1015 * sizeof(float)); { cudnnTensorDescriptor_t x_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x1012, x1012)); cudnnActivationDescriptor_t act_desc; CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc)); CUDNN_CALL(cudnnSetActivationDescriptor(act_desc, /*mode=*/ CUDNN_ACTIVATION_RELU, /*reluNanOpt=*/ CUDNN_PROPAGATE_NAN, /*relu_coef=*/ 0)); CUDNN_CALL(cudnnActivationForward( cudnnHandle, act_desc, x1031, x_desc, x1018, x1029, x_desc, x1033)); }; if (x1037) { } else { assert(false && "ERROR not specified"); } float* x1050 = (float*)myGpuMalloc(x1049 * sizeof(float)); float* x1051 = (float*)myMalloc(1 * sizeof(float));; x1051[0] = 0.0f; float* x1053 = (float*)myMalloc(1 * sizeof(float));; x1053[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 64, x982, x982)); cudnnFilterDescriptor_t filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 256, 64, 3, 3)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x1044, x1044)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 1, 1, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionFwdAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionForwardAlgorithm( cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, CUDNN_CONVOLUTION_FWD_PREFER_FASTEST, 0, &algo)); // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionForwardWorkspaceSize( cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); // Execute convolution. CUDNN_CALL(cudnnConvolutionForward( cudnnHandle, x1053, in_desc, x1003, filt_desc, x179, conv_desc, algo, ws_data, ws_size, x1051, out_desc, x1050)); }; float* x1056 = (float*)myMalloc(1 * sizeof(float));; x1056[0] = 1.0f; float* x1058 = (float*)myMalloc(1 * sizeof(float));; x1058[0] = 1.0f; { cudnnTensorDescriptor_t bias_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&bias_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( bias_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 1, 256, 1, 1)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x1044, x1044)); CUDNN_CALL(cudnnAddTensor( cudnnHandle, x1056, bias_desc, x134, x1058, out_desc, x1050)); }; float* x1061 = (float*)myMalloc(1 * sizeof(float));; x1061[0] = 0.0f; float* x1063 = (float*)myMalloc(1 * sizeof(float));; x1063[0] = 1.0f; float* x1065 = (float*)myGpuMalloc(x1047 * sizeof(float)); { cudnnTensorDescriptor_t x_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x1044, x1044)); cudnnActivationDescriptor_t act_desc; CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc)); CUDNN_CALL(cudnnSetActivationDescriptor(act_desc, /*mode=*/ CUDNN_ACTIVATION_RELU, /*reluNanOpt=*/ CUDNN_PROPAGATE_NAN, /*relu_coef=*/ 0)); CUDNN_CALL(cudnnActivationForward( cudnnHandle, act_desc, x1063, x_desc, x1050, x1061, x_desc, x1065)); }; if (x1070) { } else { printf("all dimensions except the concatenation dimension should be the same\n"); assert(false && ""); } // back prop for concat float* x1081 = (float*)myGpuMalloc(x1080 * sizeof(float)); { dim3 grid(28, 2); concat2D_1D_greg<<<grid, 512>>>(x1033, 256, x1015, x1065, 256, x1047, x1081, 1, 64, 512, x1012, x1012, x1077, x1013, x1012, 1); }; float* x1083 = (float*)myMalloc(1 * sizeof(float));; x1083[0] = 0.0f; float* x1085 = (float*)myMalloc(1 * sizeof(float));; x1085[0] = 1.0f; float* x1095 = (float*)myGpuMalloc(x1094 * sizeof(float)); { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 512, x1012, x1012) ); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 512, x1089, x1089)); cudnnPoolingDescriptor_t poolingDesc; CUDNN_CALL(cudnnCreatePoolingDescriptor(&poolingDesc)); CUDNN_CALL(cudnnSetPooling2dDescriptor( poolingDesc, CUDNN_POOLING_MAX, CUDNN_NOT_PROPAGATE_NAN, 2, 2, 0, 0, 2, 2 )); CUDNN_CALL(cudnnPoolingForward( cudnnHandle, poolingDesc, x1085, in_desc, x1081, x1083, out_desc, x1095)); }; if (x1098) { } else { assert(false && "ERROR not specified"); } float* x1110 = (float*)myGpuMalloc(x1109 * sizeof(float)); float* x1111 = (float*)myMalloc(1 * sizeof(float));; x1111[0] = 0.0f; float* x1113 = (float*)myMalloc(1 * sizeof(float));; x1113[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 512, x1089, x1089)); cudnnFilterDescriptor_t filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 64, 512, 1, 1)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 64, x1104, x1104)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 0, 0, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionFwdAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionForwardAlgorithm( cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, CUDNN_CONVOLUTION_FWD_PREFER_FASTEST, 0, &algo)); // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionForwardWorkspaceSize( cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); // Execute convolution. CUDNN_CALL(cudnnConvolutionForward( cudnnHandle, x1113, in_desc, x1095, filt_desc, x98, conv_desc, algo, ws_data, ws_size, x1111, out_desc, x1110)); }; float* x1116 = (float*)myMalloc(1 * sizeof(float));; x1116[0] = 1.0f; float* x1118 = (float*)myMalloc(1 * sizeof(float));; x1118[0] = 1.0f; { cudnnTensorDescriptor_t bias_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&bias_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( bias_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 1, 64, 1, 1)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 64, x1104, x1104)); CUDNN_CALL(cudnnAddTensor( cudnnHandle, x1116, bias_desc, x155, x1118, out_desc, x1110)); }; float* x1121 = (float*)myMalloc(1 * sizeof(float));; x1121[0] = 0.0f; float* x1123 = (float*)myMalloc(1 * sizeof(float));; x1123[0] = 1.0f; float* x1125 = (float*)myGpuMalloc(x1107 * sizeof(float)); { cudnnTensorDescriptor_t x_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 64, x1104, x1104)); cudnnActivationDescriptor_t act_desc; CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc)); CUDNN_CALL(cudnnSetActivationDescriptor(act_desc, /*mode=*/ CUDNN_ACTIVATION_RELU, /*reluNanOpt=*/ CUDNN_PROPAGATE_NAN, /*relu_coef=*/ 0)); CUDNN_CALL(cudnnActivationForward( cudnnHandle, act_desc, x1123, x_desc, x1110, x1121, x_desc, x1125)); }; if (x1128) { } else { assert(false && "ERROR not specified"); } float* x1140 = (float*)myGpuMalloc(x1139 * sizeof(float)); float* x1141 = (float*)myMalloc(1 * sizeof(float));; x1141[0] = 0.0f; float* x1143 = (float*)myMalloc(1 * sizeof(float));; x1143[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 64, x1104, x1104)); cudnnFilterDescriptor_t filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 256, 64, 1, 1)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x1134, x1134)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 0, 0, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionFwdAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionForwardAlgorithm( cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, CUDNN_CONVOLUTION_FWD_PREFER_FASTEST, 0, &algo)); // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionForwardWorkspaceSize( cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); // Execute convolution. CUDNN_CALL(cudnnConvolutionForward( cudnnHandle, x1143, in_desc, x1125, filt_desc, x209, conv_desc, algo, ws_data, ws_size, x1141, out_desc, x1140)); }; float* x1146 = (float*)myMalloc(1 * sizeof(float));; x1146[0] = 1.0f; float* x1148 = (float*)myMalloc(1 * sizeof(float));; x1148[0] = 1.0f; { cudnnTensorDescriptor_t bias_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&bias_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( bias_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 1, 256, 1, 1)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x1134, x1134)); CUDNN_CALL(cudnnAddTensor( cudnnHandle, x1146, bias_desc, x173, x1148, out_desc, x1140)); }; float* x1151 = (float*)myMalloc(1 * sizeof(float));; x1151[0] = 0.0f; float* x1153 = (float*)myMalloc(1 * sizeof(float));; x1153[0] = 1.0f; float* x1155 = (float*)myGpuMalloc(x1137 * sizeof(float)); { cudnnTensorDescriptor_t x_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x1134, x1134)); cudnnActivationDescriptor_t act_desc; CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc)); CUDNN_CALL(cudnnSetActivationDescriptor(act_desc, /*mode=*/ CUDNN_ACTIVATION_RELU, /*reluNanOpt=*/ CUDNN_PROPAGATE_NAN, /*relu_coef=*/ 0)); CUDNN_CALL(cudnnActivationForward( cudnnHandle, act_desc, x1153, x_desc, x1140, x1151, x_desc, x1155)); }; if (x1159) { } else { assert(false && "ERROR not specified"); } float* x1172 = (float*)myGpuMalloc(x1171 * sizeof(float)); float* x1173 = (float*)myMalloc(1 * sizeof(float));; x1173[0] = 0.0f; float* x1175 = (float*)myMalloc(1 * sizeof(float));; x1175[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 64, x1104, x1104)); cudnnFilterDescriptor_t filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 256, 64, 3, 3)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x1166, x1166)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 1, 1, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionFwdAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionForwardAlgorithm( cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, CUDNN_CONVOLUTION_FWD_PREFER_FASTEST, 0, &algo)); // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionForwardWorkspaceSize( cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); // Execute convolution. CUDNN_CALL(cudnnConvolutionForward( cudnnHandle, x1175, in_desc, x1125, filt_desc, x185, conv_desc, algo, ws_data, ws_size, x1173, out_desc, x1172)); }; float* x1178 = (float*)myMalloc(1 * sizeof(float));; x1178[0] = 1.0f; float* x1180 = (float*)myMalloc(1 * sizeof(float));; x1180[0] = 1.0f; { cudnnTensorDescriptor_t bias_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&bias_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( bias_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 1, 256, 1, 1)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x1166, x1166)); CUDNN_CALL(cudnnAddTensor( cudnnHandle, x1178, bias_desc, x146, x1180, out_desc, x1172)); }; float* x1183 = (float*)myMalloc(1 * sizeof(float));; x1183[0] = 0.0f; float* x1185 = (float*)myMalloc(1 * sizeof(float));; x1185[0] = 1.0f; float* x1187 = (float*)myGpuMalloc(x1169 * sizeof(float)); { cudnnTensorDescriptor_t x_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( x_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 256, x1166, x1166)); cudnnActivationDescriptor_t act_desc; CUDNN_CALL(cudnnCreateActivationDescriptor(&act_desc)); CUDNN_CALL(cudnnSetActivationDescriptor(act_desc, /*mode=*/ CUDNN_ACTIVATION_RELU, /*reluNanOpt=*/ CUDNN_PROPAGATE_NAN, /*relu_coef=*/ 0)); CUDNN_CALL(cudnnActivationForward( cudnnHandle, act_desc, x1185, x_desc, x1172, x1183, x_desc, x1187)); }; if (x1192) { } else { printf("all dimensions except the concatenation dimension should be the same\n"); assert(false && ""); } // back prop for concat float* x1203 = (float*)myGpuMalloc(x1202 * sizeof(float)); { dim3 grid(28, 2); concat2D_1D_greg<<<grid, 512>>>(x1155, 256, x1137, x1187, 256, x1169, x1203, 1, 64, 512, x1134, x1134, x1199, x1135, x1134, 1); }; if (x1206) { } else { assert(false && "ERROR not specified"); } float* x1219 = (float*)myGpuMalloc(x1218 * sizeof(float)); float* x1220 = (float*)myMalloc(1 * sizeof(float));; x1220[0] = 0.0f; float* x1222 = (float*)myMalloc(1 * sizeof(float));; x1222[0] = 1.0f; { cudnnTensorDescriptor_t in_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&in_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( in_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 512, x1134, x1134)); cudnnFilterDescriptor_t filt_desc; CUDNN_CALL(cudnnCreateFilterDescriptor(&filt_desc)); CUDNN_CALL(cudnnSetFilter4dDescriptor( filt_desc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, 10, 512, 4, 4)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 10, x1213, x1213)); cudnnConvolutionDescriptor_t conv_desc; CUDNN_CALL(cudnnCreateConvolutionDescriptor(&conv_desc)); CUDNN_CALL(cudnnSetConvolution2dDescriptor( conv_desc, 0, 0, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT)); CUDNN_CALL(cudnnSetConvolutionMathType(conv_desc, CUDNN_TENSOR_OP_MATH)); // Algorithm. cudnnConvolutionFwdAlgo_t algo; CUDNN_CALL(cudnnGetConvolutionForwardAlgorithm( cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, CUDNN_CONVOLUTION_FWD_PREFER_FASTEST, 0, &algo)); // Workspace. size_t ws_size; CUDNN_CALL(cudnnGetConvolutionForwardWorkspaceSize( cudnnHandle, in_desc, filt_desc, conv_desc, out_desc, algo, &ws_size)); void *ws_data = myGpuMalloc(ws_size); // Execute convolution. CUDNN_CALL(cudnnConvolutionForward( cudnnHandle, x1222, in_desc, x1203, filt_desc, x107, conv_desc, algo, ws_data, ws_size, x1220, out_desc, x1219)); }; float* x1225 = (float*)myMalloc(1 * sizeof(float));; x1225[0] = 1.0f; float* x1227 = (float*)myMalloc(1 * sizeof(float));; x1227[0] = 1.0f; { cudnnTensorDescriptor_t bias_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&bias_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( bias_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 1, 10, 1, 1)); cudnnTensorDescriptor_t out_desc; CUDNN_CALL(cudnnCreateTensorDescriptor(&out_desc)); CUDNN_CALL(cudnnSetTensor4dDescriptor( out_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 64, 10, x1213, x1213)); CUDNN_CALL(cudnnAddTensor( cudnnHandle, x1225, bias_desc, x248, x1227, out_desc, x1219)); }; int32_t x1230 = 0; int32_t x1231 = 1; x1231 *= 64; x1231 *= 10; int32_t x1234 = x1230; bool x1235 = x1234 >= 2; if (x1235) { printf("cannot have 2 or more -1s in resize!!\n"); assert(false && ""); } else { } bool x1241 = x1234 == 0; if (x1241) { int32_t x1242 = x1231; bool x1243 = x1242 == x1216; if (x1243) { } else { assert(false && "must same size!!"); } } else { } int64_t x1250 = (long)mallocAddr; int64_t x1251 = x1250 - x253; memset((void*)x253, 0, x1251); mallocAddr = (void*)x253; int64_t x1254 = (long)gpuMallocAddr; int64_t x1255 = x1254 - x254; cudaMemset((void*)x254, 0, x1255); gpuMallocAddr = (void*)x254; } gettimeofday(&end_1, NULL); timeval_subtract(&diff_1, &end_1, &begin_1);; int64_t x1262 = ((diff_1.tv_sec * 1000000L) + (diff_1.tv_usec)); int64_t x1263 = x1262 / 1000LL; int64_t x1265 = x1262 / x1264; printf("Inferencing completed in %ldms (%ld us/images)\n",x1263,x1265); } // Backend cleanup. CUBLAS_CALL(cublasDestroy(cublasHandle)); CUDA_CALL(cudaFree(gpuMallocBase)); CUDNN_CALL(cudnnDestroy(cudnnHandle)); } /***************************************** End of C Generated Code *******************************************/
the_stack
#include <cuda.h> #include "../../engines/cuda/utils.hh" #include <stdint.h> #include <assert.h> #include <stdio.h> /******************************************************************* HMAC-SHA1 kernel ******************************************************************/ #ifdef __DEVICE_EMULATION__ #define debugprint printf #define EMUSYNC __syncthreads() #else __device__ void _NOOPfunction(char *format) { } __device__ void _NOOPfunction(char *format, unsigned int onearg) { } __device__ void _NOOPfunction(char *format, unsigned int onearg, unsigned int twoargs) { } __device__ void _NOOPfunction(char *format, char *onearg) { } #define EMUSYNC do {} while (0) #define debugprint _NOOPfunction #endif #define SHA1_THREADS_PER_BLK 32 //__global__ uint32_t d_pad_buffer[16 * 2 * MAX_CHUNK_SIZE * MAX_GROUP_SIZE]; __device__ uint32_t swap(uint32_t v) { return ((v & 0x000000ffU) << 24) | ((v & 0x0000ff00U) << 8) | ((v & 0x00ff0000U) >> 8) | ((v & 0xff000000U) >> 24); } typedef struct hash_digest { uint32_t h1; uint32_t h2; uint32_t h3; uint32_t h4; uint32_t h5; } hash_digest_t; #define HMAC __inline__ __device__ void getBlock(char* buf, int offset, int len, uint32_t* dest) { uint32_t *tmp; unsigned int tempbuf[16]; tmp = (uint32_t*) (buf + offset); debugprint("%d %d\n", offset, len); if (offset + 64 <= len) { debugprint("--0--\n"); #pragma unroll 16 for (int i = 0; i < 16; i++) { dest[i] = swap(tmp[i]); } } else if (len > offset && (len - offset) < 56) { //case 1 enough space in last block for padding debugprint("--1--\n"); int i; for (i = 0; i < (len - offset) / 4; i++) { //debugprint("%d %d\n",offset,i); //debugprint("%p %p\n", buf, dest); //tempbuf[i] = buf[i]; tempbuf[i] = swap(tmp[i]); } //printf("len%%4 %d\n",len%4); switch (len % 4) { case 0: tempbuf[i] = swap(0x00000080); i++; break; case 1: tempbuf[i] = swap(0x00008000 | (tmp[i] & 0x000000FF)); i++; break; case 2: tempbuf[i] = swap(0x00800000 | (tmp[i] & 0x0000FFFF)); i++; break; case 3: tempbuf[i] = swap(0x80000000 | (tmp[i] & 0x00FFFFFF)); i++; break; }; for (; i < 14; i++) { tempbuf[i] = 0; } #pragma unroll 14 for (i = 0; i < 14; i++) { dest[i] = tempbuf[i]; } dest[14] = 0x00000000; #ifndef HMAC dest[15] = len * 8; #else dest[15] = (len + 64) * 8; #endif } else if (len > offset && (len - offset) >= 56) { //case 2 not enough space in last block (containing message) for padding debugprint("--2--\n"); int i; for (i = 0; i < (len - offset) / 4; i++) { tempbuf[i] = swap(tmp[i]); } switch (len % 4) { case 0: tempbuf[i] = swap(0x00000080); i++; break; case 1: tempbuf[i] = swap(0x00008000 | (tmp[i] & 0x000000FF)); i++; break; case 2: tempbuf[i] = swap(0x00800000 | (tmp[i] & 0x0000FFFF)); i++; break; case 3: tempbuf[i] = swap(0x80000000 | (tmp[i] & 0x00FFFFFF)); i++; break; }; for (; i < 16; i++) { tempbuf[i] = 0x00000000; } #pragma unroll 16 for (i = 0; i < 16; i++) { dest[i] = tempbuf[i]; } } else if (offset == len) { //message end is aligned in 64 bytes debugprint("--3--\n"); dest[0] = swap(0x00000080); #pragma unroll 13 for (int i = 1; i < 14; i++) dest[i] = 0x00000000; dest[14] = 0x00000000; #ifndef HMAC dest[15] = len * 8; #else dest[15] = (len + 64) * 8; #endif } else if (offset > len) { //the last block in case 2 debugprint("--4--\n"); #pragma unroll 14 for (int i = 0; i < 14; i++) dest[i] = 0x00000000; dest[14] = 0x00000000; #ifndef HMAC dest[15] = len * 8; #else dest[15] = (len + 64) * 8; #endif } else { debugprint("Not supposed to happen\n"); } } __device__ void computeSHA1Block(char* in, uint32_t* w, int offset, int len, hash_digest_t &h) { uint32_t a = h.h1; uint32_t b = h.h2; uint32_t c = h.h3; uint32_t d = h.h4; uint32_t e = h.h5; uint32_t f; uint32_t k; uint32_t temp; getBlock(in, offset, len, w); //for (int i = 0; i < 16 ; i++) { // debugprint("%0X\n", w[i]); //} //debugprint("\n"); k = 0x5A827999; //0 of 0-20 f = (b & c) | ((~b) & d); temp = ((a << 5) | (a >> 27)) + f + e + k + w[0]; e = d; d = c; c = (b << 30) | (b >> 2); b = a; a = temp; w[0] = w[13] ^ w[8] ^ w[2] ^ w[0]; w[0] = w[0] << 1 | w[0] >> 31; //1 of 0-20 f = (b & c) | ((~b) & d); temp = ((a << 5) | (a >> 27)) + f + e + k + w[1]; e = d; d = c; c = (b << 30) | (b >> 2); b = a; a = temp; w[1] = w[14] ^ w[9] ^ w[3] ^ w[1]; w[1] = w[1] << 1 | w[1] >> 31; //2 of 0-20 f = (b & c) | ((~b) & d); temp = ((a << 5) | (a >> 27)) + f + e + k + w[2]; e = d; d = c; c = (b << 30) | (b >> 2); b = a; a = temp; w[2] = w[15] ^ w[10] ^ w[4] ^ w[2]; w[2] = w[2] << 1 | w[2] >> 31; //3 of 0-20 f = (b & c) | ((~b) & d); temp = ((a << 5) | (a >> 27)) + f + e + k + w[3]; e = d; d = c; c = (b << 30) | (b >> 2); b = a; a = temp; w[3] = w[0] ^ w[11] ^ w[5] ^ w[3]; w[3] = w[3] << 1 | w[3] >> 31; //4 of 0-20 f = (b & c) | ((~b) & d); temp = ((a << 5) | (a >> 27)) + f + e + k + w[4]; e = d; d = c; c = (b << 30) | (b >> 2); b = a; a = temp; w[4] = w[1] ^ w[12] ^ w[6] ^ w[4]; w[4] = w[4] << 1 | w[4] >> 31; //5 of 0-20 f = (b & c) | ((~b) & d); temp = ((a << 5) | (a >> 27)) + f + e + k + w[5]; e = d; d = c; c = (b << 30) | (b >> 2); b = a; a = temp; w[5] = w[2] ^ w[13] ^ w[7] ^ w[5]; w[5] = w[5] << 1 | w[5] >> 31; //6 of 0-20 f = (b & c) | ((~b) & d); temp = ((a << 5) | (a >> 27)) + f + e + k + w[6]; e = d; d = c; c = (b << 30) | (b >> 2); b = a; a = temp; w[6] = w[3] ^ w[14] ^ w[8] ^ w[6]; w[6] = w[6] << 1 | w[6] >> 31; //7 of 0-20 f = (b & c) | ((~b) & d); temp = ((a << 5) | (a >> 27)) + f + e + k + w[7]; e = d; d = c; c = (b << 30) | (b >> 2); b = a; a = temp; w[7] = w[4] ^ w[15] ^ w[9] ^ w[7]; w[7] = w[7] << 1 | w[7] >> 31; //8 of 0-20 f = (b & c) | ((~b) & d); temp = ((a << 5) | (a >> 27)) + f + e + k + w[8]; e = d; d = c; c = (b << 30) | (b >> 2); b = a; a = temp; w[8] = w[5] ^ w[0] ^ w[10] ^ w[8]; w[8] = w[8] << 1 | w[8] >> 31; //9 of 0-20 f = (b & c) | ((~b) & d); temp = ((a << 5) | (a >> 27)) + f + e + k + w[9]; e = d; d = c; c = (b << 30) | (b >> 2); b = a; a = temp; w[9] = w[6] ^ w[1] ^ w[11] ^ w[9]; w[9] = w[9] << 1 | w[9] >> 31; //10 of 0-20 f = (b & c) | ((~b) & d); temp = ((a << 5) | (a >> 27)) + f + e + k + w[10]; e = d; d = c; c = (b << 30) | (b >> 2); b = a; a = temp; w[10] = w[7] ^ w[2] ^ w[12] ^ w[10]; w[10] = w[10] << 1 | w[10] >> 31; //11 of 0-20 f = (b & c) | ((~b) & d); temp = ((a << 5) | (a >> 27)) + f + e + k + w[11]; e = d; d = c; c = (b << 30) | (b >> 2); b = a; a = temp; w[11] = w[8] ^ w[3] ^ w[13] ^ w[11]; w[11] = w[11] << 1 | w[11] >> 31; //12 of 0-20 f = (b & c) | ((~b) & d); temp = ((a << 5) | (a >> 27)) + f + e + k + w[12]; e = d; d = c; c = (b << 30) | (b >> 2); b = a; a = temp; w[12] = w[9] ^ w[4] ^ w[14] ^ w[12]; w[12] = w[12] << 1 | w[12] >> 31; //13 of 0-20 f = (b & c) | ((~b) & d); temp = ((a << 5) | (a >> 27)) + f + e + k + w[13]; e = d; d = c; c = (b << 30) | (b >> 2); b = a; a = temp; w[13] = w[10] ^ w[5] ^ w[15] ^ w[13]; w[13] = w[13] << 1 | w[13] >> 31; //14 of 0-20 f = (b & c) | ((~b) & d); temp = ((a << 5) | (a >> 27)) + f + e + k + w[14]; e = d; d = c; c = (b << 30) | (b >> 2); b = a; a = temp; w[14] = w[11] ^ w[6] ^ w[0] ^ w[14]; w[14] = w[14] << 1 | w[14] >> 31; //15 of 0-20 f = (b & c) | ((~b) & d); temp = ((a << 5) | (a >> 27)) + f + e + k + w[15]; e = d; d = c; c = (b << 30) | (b >> 2); b = a; a = temp; w[15] = w[12] ^ w[7] ^ w[1] ^ w[15]; w[15] = w[15] << 1 | w[15] >> 31; //16 of 0-20 f = (b & c) | ((~b) & d); temp = ((a << 5) | (a >> 27)) + f + e + k + w[0]; e = d; d = c; c = (b << 30) | (b >> 2); b = a; a = temp; w[0] = w[13] ^ w[8] ^ w[2] ^ w[0]; w[0] = w[0] << 1 | w[0] >> 31; //17 of 0-20 f = (b & c) | ((~b) & d); temp = ((a << 5) | (a >> 27)) + f + e + k + w[1]; e = d; d = c; c = (b << 30) | (b >> 2); b = a; a = temp; w[1] = w[14] ^ w[9] ^ w[3] ^ w[1]; w[1] = w[1] << 1 | w[1] >> 31; //18 of 0-20 f = (b & c) | ((~b) & d); temp = ((a << 5) | (a >> 27)) + f + e + k + w[2]; e = d; d = c; c = (b << 30) | (b >> 2); b = a; a = temp; w[2] = w[15] ^ w[10] ^ w[4] ^ w[2]; w[2] = w[2] << 1 | w[2] >> 31; //19 of 0-20 f = (b & c) | ((~b) & d); temp = ((a << 5) | (a >> 27)) + f + e + k + w[3]; e = d; d = c; c = (b << 30) | (b >> 2); b = a; a = temp; w[3] = w[0] ^ w[11] ^ w[5] ^ w[3]; w[3] = w[3] << 1 | w[3] >> 31; k = 0x6ED9EBA1; //20 of 20-40 f = b ^ c ^ d; temp = ((a << 5) | (a >> 27)) + f + e + k + w[4]; e = d; d = c; c = (b << 30) | (b >> 2); b = a; a = temp; w[4] = w[1] ^ w[12] ^ w[6] ^ w[4]; w[4] = w[4] << 1 | w[4] >> 31; //21 of 20-40 f = b ^ c ^ d; temp = ((a << 5) | (a >> 27)) + f + e + k + w[5]; e = d; d = c; c = (b << 30) | (b >> 2); b = a; a = temp; w[5] = w[2] ^ w[13] ^ w[7] ^ w[5]; w[5] = w[5] << 1 | w[5] >> 31; //22 of 20-40 f = b ^ c ^ d; temp = ((a << 5) | (a >> 27)) + f + e + k + w[6]; e = d; d = c; c = (b << 30) | (b >> 2); b = a; a = temp; w[6] = w[3] ^ w[14] ^ w[8] ^ w[6]; w[6] = w[6] << 1 | w[6] >> 31; //23 of 20-40 f = b ^ c ^ d; temp = ((a << 5) | (a >> 27)) + f + e + k + w[7]; e = d; d = c; c = (b << 30) | (b >> 2); b = a; a = temp; w[7] = w[4] ^ w[15] ^ w[9] ^ w[7]; w[7] = w[7] << 1 | w[7] >> 31; //24 of 20-40 f = b ^ c ^ d; temp = ((a << 5) | (a >> 27)) + f + e + k + w[8]; e = d; d = c; c = (b << 30) | (b >> 2); b = a; a = temp; w[8] = w[5] ^ w[0] ^ w[10] ^ w[8]; w[8] = w[8] << 1 | w[8] >> 31; //25 of 20-40 f = b ^ c ^ d; temp = ((a << 5) | (a >> 27)) + f + e + k + w[9]; e = d; d = c; c = (b << 30) | (b >> 2); b = a; a = temp; w[9] = w[6] ^ w[1] ^ w[11] ^ w[9]; w[9] = w[9] << 1 | w[9] >> 31; //26 of 20-40 f = b ^ c ^ d; temp = ((a << 5) | (a >> 27)) + f + e + k + w[10]; e = d; d = c; c = (b << 30) | (b >> 2); b = a; a = temp; w[10] = w[7] ^ w[2] ^ w[12] ^ w[10]; w[10] = w[10] << 1 | w[10] >> 31; //27 of 20-40 f = b ^ c ^ d; temp = ((a << 5) | (a >> 27)) + f + e + k + w[11]; e = d; d = c; c = (b << 30) | (b >> 2); b = a; a = temp; w[11] = w[8] ^ w[3] ^ w[13] ^ w[11]; w[11] = w[11] << 1 | w[11] >> 31; //28 of 20-40 f = b ^ c ^ d; temp = ((a << 5) | (a >> 27)) + f + e + k + w[12]; e = d; d = c; c = (b << 30) | (b >> 2); b = a; a = temp; w[12] = w[9] ^ w[4] ^ w[14] ^ w[12]; w[12] = w[12] << 1 | w[12] >> 31; //29 of 20-40 f = b ^ c ^ d; temp = ((a << 5) | (a >> 27)) + f + e + k + w[13]; e = d; d = c; c = (b << 30) | (b >> 2); b = a; a = temp; w[13] = w[10] ^ w[5] ^ w[15] ^ w[13]; w[13] = w[13] << 1 | w[13] >> 31; //30 of 20-40 f = b ^ c ^ d; temp = ((a << 5) | (a >> 27)) + f + e + k + w[14]; e = d; d = c; c = (b << 30) | (b >> 2); b = a; a = temp; w[14] = w[11] ^ w[6] ^ w[0] ^ w[14]; w[14] = w[14] << 1 | w[14] >> 31; //31 of 20-40 f = b ^ c ^ d; temp = ((a << 5) | (a >> 27)) + f + e + k + w[15]; e = d; d = c; c = (b << 30) | (b >> 2); b = a; a = temp; w[15] = w[12] ^ w[7] ^ w[1] ^ w[15]; w[15] = w[15] << 1 | w[15] >> 31; //32 of 20-40 f = b ^ c ^ d; temp = ((a << 5) | (a >> 27)) + f + e + k + w[0]; e = d; d = c; c = (b << 30) | (b >> 2); b = a; a = temp; w[0] = w[13] ^ w[8] ^ w[2] ^ w[0]; w[0] = w[0] << 1 | w[0] >> 31; //33 of 20-40 f = b ^ c ^ d; temp = ((a << 5) | (a >> 27)) + f + e + k + w[1]; e = d; d = c; c = (b << 30) | (b >> 2); b = a; a = temp; w[1] = w[14] ^ w[9] ^ w[3] ^ w[1]; w[1] = w[1] << 1 | w[1] >> 31; //34 of 20-40 f = b ^ c ^ d; temp = ((a << 5) | (a >> 27)) + f + e + k + w[2]; e = d; d = c; c = (b << 30) | (b >> 2); b = a; a = temp; w[2] = w[15] ^ w[10] ^ w[4] ^ w[2]; w[2] = w[2] << 1 | w[2] >> 31; //35 of 20-40 f = b ^ c ^ d; temp = ((a << 5) | (a >> 27)) + f + e + k + w[3]; e = d; d = c; c = (b << 30) | (b >> 2); b = a; a = temp; w[3] = w[0] ^ w[11] ^ w[5] ^ w[3]; w[3] = w[3] << 1 | w[3] >> 31; //36 of 20-40 f = b ^ c ^ d; temp = ((a << 5) | (a >> 27)) + f + e + k + w[4]; e = d; d = c; c = (b << 30) | (b >> 2); b = a; a = temp; w[4] = w[1] ^ w[12] ^ w[6] ^ w[4]; w[4] = w[4] << 1 | w[4] >> 31; //37 of 20-40 f = b ^ c ^ d; temp = ((a << 5) | (a >> 27)) + f + e + k + w[5]; e = d; d = c; c = (b << 30) | (b >> 2); b = a; a = temp; w[5] = w[2] ^ w[13] ^ w[7] ^ w[5]; w[5] = w[5] << 1 | w[5] >> 31; //38 of 20-40 f = b ^ c ^ d; temp = ((a << 5) | (a >> 27)) + f + e + k + w[6]; e = d; d = c; c = (b << 30) | (b >> 2); b = a; a = temp; w[6] = w[3] ^ w[14] ^ w[8] ^ w[6]; w[6] = w[6] << 1 | w[6] >> 31; //39 of 20-40 f = b ^ c ^ d; temp = ((a << 5) | (a >> 27)) + f + e + k + w[7]; e = d; d = c; c = (b << 30) | (b >> 2); b = a; a = temp; w[7] = w[4] ^ w[15] ^ w[9] ^ w[7]; w[7] = w[7] << 1 | w[7] >> 31; k = 0x8F1BBCDC; //40 of 40-60 f = (b & c) | (b & d) | (c & d); temp = ((a << 5) | (a >> 27)) + f + e + k + w[8]; e = d; d = c; c = (b << 30) | (b >> 2); b = a; a = temp; w[8] = w[5] ^ w[0] ^ w[10] ^ w[8]; w[8] = w[8] << 1 | w[8] >> 31; //41 of 40-60 f = (b & c) | (b & d) | (c & d); temp = ((a << 5) | (a >> 27)) + f + e + k + w[9]; e = d; d = c; c = (b << 30) | (b >> 2); b = a; a = temp; w[9] = w[6] ^ w[1] ^ w[11] ^ w[9]; w[9] = w[9] << 1 | w[9] >> 31; //42 of 40-60 f = (b & c) | (b & d) | (c & d); temp = ((a << 5) | (a >> 27)) + f + e + k + w[10]; e = d; d = c; c = (b << 30) | (b >> 2); b = a; a = temp; w[10] = w[7] ^ w[2] ^ w[12] ^ w[10]; w[10] = w[10] << 1 | w[10] >> 31; //43 of 40-60 f = (b & c) | (b & d) | (c & d); temp = ((a << 5) | (a >> 27)) + f + e + k + w[11]; e = d; d = c; c = (b << 30) | (b >> 2); b = a; a = temp; w[11] = w[8] ^ w[3] ^ w[13] ^ w[11]; w[11] = w[11] << 1 | w[11] >> 31; //44 of 40-60 f = (b & c) | (b & d) | (c & d); temp = ((a << 5) | (a >> 27)) + f + e + k + w[12]; e = d; d = c; c = (b << 30) | (b >> 2); b = a; a = temp; w[12] = w[9] ^ w[4] ^ w[14] ^ w[12]; w[12] = w[12] << 1 | w[12] >> 31; //45 of 40-60 f = (b & c) | (b & d) | (c & d); temp = ((a << 5) | (a >> 27)) + f + e + k + w[13]; e = d; d = c; c = (b << 30) | (b >> 2); b = a; a = temp; w[13] = w[10] ^ w[5] ^ w[15] ^ w[13]; w[13] = w[13] << 1 | w[13] >> 31; //46 of 40-60 f = (b & c) | (b & d) | (c & d); temp = ((a << 5) | (a >> 27)) + f + e + k + w[14]; e = d; d = c; c = (b << 30) | (b >> 2); b = a; a = temp; w[14] = w[11] ^ w[6] ^ w[0] ^ w[14]; w[14] = w[14] << 1 | w[14] >> 31; //47 of 40-60 f = (b & c) | (b & d) | (c & d); temp = ((a << 5) | (a >> 27)) + f + e + k + w[15]; e = d; d = c; c = (b << 30) | (b >> 2); b = a; a = temp; w[15] = w[12] ^ w[7] ^ w[1] ^ w[15]; w[15] = w[15] << 1 | w[15] >> 31; //48 of 40-60 f = (b & c) | (b & d) | (c & d); temp = ((a << 5) | (a >> 27)) + f + e + k + w[0]; e = d; d = c; c = (b << 30) | (b >> 2); b = a; a = temp; w[0] = w[13] ^ w[8] ^ w[2] ^ w[0]; w[0] = w[0] << 1 | w[0] >> 31; //49 of 40-60 f = (b & c) | (b & d) | (c & d); temp = ((a << 5) | (a >> 27)) + f + e + k + w[1]; e = d; d = c; c = (b << 30) | (b >> 2); b = a; a = temp; w[1] = w[14] ^ w[9] ^ w[3] ^ w[1]; w[1] = w[1] << 1 | w[1] >> 31; //50 of 40-60 f = (b & c) | (b & d) | (c & d); temp = ((a << 5) | (a >> 27)) + f + e + k + w[2]; e = d; d = c; c = (b << 30) | (b >> 2); b = a; a = temp; w[2] = w[15] ^ w[10] ^ w[4] ^ w[2]; w[2] = w[2] << 1 | w[2] >> 31; //51 of 40-60 f = (b & c) | (b & d) | (c & d); temp = ((a << 5) | (a >> 27)) + f + e + k + w[3]; e = d; d = c; c = (b << 30) | (b >> 2); b = a; a = temp; w[3] = w[0] ^ w[11] ^ w[5] ^ w[3]; w[3] = w[3] << 1 | w[3] >> 31; //52 of 40-60 f = (b & c) | (b & d) | (c & d); temp = ((a << 5) | (a >> 27)) + f + e + k + w[4]; e = d; d = c; c = (b << 30) | (b >> 2); b = a; a = temp; w[4] = w[1] ^ w[12] ^ w[6] ^ w[4]; w[4] = w[4] << 1 | w[4] >> 31; //53 of 40-60 f = (b & c) | (b & d) | (c & d); temp = ((a << 5) | (a >> 27)) + f + e + k + w[5]; e = d; d = c; c = (b << 30) | (b >> 2); b = a; a = temp; w[5] = w[2] ^ w[13] ^ w[7] ^ w[5]; w[5] = w[5] << 1 | w[5] >> 31; //54 of 40-60 f = (b & c) | (b & d) | (c & d); temp = ((a << 5) | (a >> 27)) + f + e + k + w[6]; e = d; d = c; c = (b << 30) | (b >> 2); b = a; a = temp; w[6] = w[3] ^ w[14] ^ w[8] ^ w[6]; w[6] = w[6] << 1 | w[6] >> 31; //55 of 40-60 f = (b & c) | (b & d) | (c & d); temp = ((a << 5) | (a >> 27)) + f + e + k + w[7]; e = d; d = c; c = (b << 30) | (b >> 2); b = a; a = temp; w[7] = w[4] ^ w[15] ^ w[9] ^ w[7]; w[7] = w[7] << 1 | w[7] >> 31; //56 of 40-60 f = (b & c) | (b & d) | (c & d); temp = ((a << 5) | (a >> 27)) + f + e + k + w[8]; e = d; d = c; c = (b << 30) | (b >> 2); b = a; a = temp; w[8] = w[5] ^ w[0] ^ w[10] ^ w[8]; w[8] = w[8] << 1 | w[8] >> 31; //57 of 40-60 f = (b & c) | (b & d) | (c & d); temp = ((a << 5) | (a >> 27)) + f + e + k + w[9]; e = d; d = c; c = (b << 30) | (b >> 2); b = a; a = temp; w[9] = w[6] ^ w[1] ^ w[11] ^ w[9]; w[9] = w[9] << 1 | w[9] >> 31; //58 of 40-60 f = (b & c) | (b & d) | (c & d); temp = ((a << 5) | (a >> 27)) + f + e + k + w[10]; e = d; d = c; c = (b << 30) | (b >> 2); b = a; a = temp; w[10] = w[7] ^ w[2] ^ w[12] ^ w[10]; w[10] = w[10] << 1 | w[10] >> 31; //59 of 40-60 f = (b & c) | (b & d) | (c & d); temp = ((a << 5) | (a >> 27)) + f + e + k + w[11]; e = d; d = c; c = (b << 30) | (b >> 2); b = a; a = temp; w[11] = w[8] ^ w[3] ^ w[13] ^ w[11]; w[11] = w[11] << 1 | w[11] >> 31; k = 0xCA62C1D6; //60 of 60-64 f = b ^ c ^ d; temp = ((a << 5) | (a >> 27)) + f + e + k + w[12]; e = d; d = c; c = (b << 30) | (b >> 2); b = a; a = temp; w[12] = w[9] ^ w[4] ^ w[14] ^ w[12]; w[12] = w[12] << 1 | w[12] >> 31; //61 of 60-64 f = b ^ c ^ d; temp = ((a << 5) | (a >> 27)) + f + e + k + w[13]; e = d; d = c; c = (b << 30) | (b >> 2); b = a; a = temp; w[13] = w[10] ^ w[5] ^ w[15] ^ w[13]; w[13] = w[13] << 1 | w[13] >> 31; //62 of 60-64 f = b ^ c ^ d; temp = ((a << 5) | (a >> 27)) + f + e + k + w[14]; e = d; d = c; c = (b << 30) | (b >> 2); b = a; a = temp; w[14] = w[11] ^ w[6] ^ w[0] ^ w[14]; w[14] = w[14] << 1 | w[14] >> 31; //63 of 60-64 f = b ^ c ^ d; temp = ((a << 5) | (a >> 27)) + f + e + k + w[15]; e = d; d = c; c = (b << 30) | (b >> 2); b = a; a = temp; w[15] = w[12] ^ w[7] ^ w[1] ^ w[15]; w[15] = w[15] << 1 | w[15] >> 31; //64 of 64-80 f = b ^ c ^ d; temp = ((a << 5) | (a >> 27)) + f + e + k + w[0]; e = d; d = c; c = (b << 30) | (b >> 2); b = a; a = temp; //65 of 64-80 f = b ^ c ^ d; temp = ((a << 5) | (a >> 27)) + f + e + k + w[1]; e = d; d = c; c = (b << 30) | (b >> 2); b = a; a = temp; //66 of 64-80 f = b ^ c ^ d; temp = ((a << 5) | (a >> 27)) + f + e + k + w[2]; e = d; d = c; c = (b << 30) | (b >> 2); b = a; a = temp; //67 of 64-80 f = b ^ c ^ d; temp = ((a << 5) | (a >> 27)) + f + e + k + w[3]; e = d; d = c; c = (b << 30) | (b >> 2); b = a; a = temp; //68 of 64-80 f = b ^ c ^ d; temp = ((a << 5) | (a >> 27)) + f + e + k + w[4]; e = d; d = c; c = (b << 30) | (b >> 2); b = a; a = temp; //69 of 64-80 f = b ^ c ^ d; temp = ((a << 5) | (a >> 27)) + f + e + k + w[5]; e = d; d = c; c = (b << 30) | (b >> 2); b = a; a = temp; //70 of 64-80 f = b ^ c ^ d; temp = ((a << 5) | (a >> 27)) + f + e + k + w[6]; e = d; d = c; c = (b << 30) | (b >> 2); b = a; a = temp; //71 of 64-80 f = b ^ c ^ d; temp = ((a << 5) | (a >> 27)) + f + e + k + w[7]; e = d; d = c; c = (b << 30) | (b >> 2); b = a; a = temp; //72 of 64-80 f = b ^ c ^ d; temp = ((a << 5) | (a >> 27)) + f + e + k + w[8]; e = d; d = c; c = (b << 30) | (b >> 2); b = a; a = temp; //73 of 64-80 f = b ^ c ^ d; temp = ((a << 5) | (a >> 27)) + f + e + k + w[9]; e = d; d = c; c = (b << 30) | (b >> 2); b = a; a = temp; //74 of 64-80 f = b ^ c ^ d; temp = ((a << 5) | (a >> 27)) + f + e + k + w[10]; e = d; d = c; c = (b << 30) | (b >> 2); b = a; a = temp; //75 of 64-80 f = b ^ c ^ d; temp = ((a << 5) | (a >> 27)) + f + e + k + w[11]; e = d; d = c; c = (b << 30) | (b >> 2); b = a; a = temp; //76 of 64-80 f = b ^ c ^ d; temp = ((a << 5) | (a >> 27)) + f + e + k + w[12]; e = d; d = c; c = (b << 30) | (b >> 2); b = a; a = temp; //77 of 64-80 f = b ^ c ^ d; temp = ((a << 5) | (a >> 27)) + f + e + k + w[13]; e = d; d = c; c = (b << 30) | (b >> 2); b = a; a = temp; //78 of 64-80 f = b ^ c ^ d; temp = ((a << 5) | (a >> 27)) + f + e + k + w[14]; e = d; d = c; c = (b << 30) | (b >> 2); b = a; a = temp; //79 of 64-80 f = b ^ c ^ d; temp = ((a << 5) | (a >> 27)) + f + e + k + w[15]; e = d; d = c; c = (b << 30) | (b >> 2); b = a; a = temp; h.h1 += a; h.h2 += b; h.h3 += c; h.h4 += d; h.h5 += e; } /* __global__ void computeSHA1(char* buf, int *offsets, int *len, char* output, int N) { //__shared__ uint32_t w_shared[16*SHA1_THREADS_PER_BLK]; uint32_t w_register[16]; int index = blockIdx.x * blockDim.x + threadIdx.x; if (index < N) { uint32_t *w = w_register;//w_shared + 16*threadIdx.x; hash_digest_t h; h.h1 = 0x67452301; h.h2 = 0xEFCDAB89; h.h3 = 0x98BADCFE; h.h4 = 0x10325476; h.h5 = 0xC3D2E1F0; int num_iter = (len[index]+63+9)/64; debugprint("num_iter %d\n", num_iter); for(int i = 0; i < num_iter; i++) computeSHA1Block(buf + offsets[index], w, i*64 , len[index], h); h.h1 = swap(h.h1); h.h2 = swap(h.h2); h.h3 = swap(h.h3); h.h4 = swap(h.h4); h.h5 = swap(h.h5); uint32_t * out = (uint32_t*)(output + index*20); *(out++) = h.h1; *(out++) = h.h2; *(out++) = h.h3; *(out++) = h.h4; *(out++) = h.h5; } }*/ /* some how *pad = *pad++ ^ *key++ was optimized and does not work correctly in GPU oTL. */ __device__ void xorpads(uint32_t *pad, uint32_t* key) { #pragma unroll 16 for (int i = 0; i < 16; i++) *(pad + i) = *(pad + i) ^ *(key + i); } /* uint32_t opad[16] = { 0x5c5c5c5c, 0x5c5c5c5c, 0x5c5c5c5c, 0x5c5c5c5c, 0x5c5c5c5c, 0x5c5c5c5c, 0x5c5c5c5c, 0x5c5c5c5c, 0x5c5c5c5c, 0x5c5c5c5c, 0x5c5c5c5c, 0x5c5c5c5c, 0x5c5c5c5c, 0x5c5c5c5c, 0x5c5c5c5c, 0x5c5c5c5c, }; uint32_t ipad[16] = { 0x36363636, 0x36363636, 0x36363636, 0x36363636, 0x36363636, 0x36363636, 0x36363636, 0x36363636, 0x36363636, 0x36363636, 0x36363636, 0x36363636, 0x36363636, 0x36363636, 0x36363636, 0x36363636, }; */ // in: start pointer of the data to be authenticated by hsha1. // out: start pointer of the data where hsha1 signature will be recorded. // length: length of the data to be authenticated by hsha1. // key: hmac key. __device__ void HMAC_SHA1(uint32_t *in, uint32_t *out, uint32_t length, char *key) { uint32_t w_register[16]; uint32_t *w = w_register; //w_shared + 16*threadIdx.x; hash_digest_t h; for (int i = 0; i < 16; i++) w[i] = 0x36363636; xorpads(w, (uint32_t*) (key)); h.h1 = 0x67452301; h.h2 = 0xEFCDAB89; h.h3 = 0x98BADCFE; h.h4 = 0x10325476; h.h5 = 0xC3D2E1F0; //SHA1 compute on ipad computeSHA1Block((char*) w, w, 0, 64, h); //SHA1 compute on mesage int num_iter = (length + 63 + 9) / 64; for (int i = 0; i < num_iter; i++) computeSHA1Block((char*) in, w, i * 64, length, h); *(out) = swap(h.h1); *(out + 1) = swap(h.h2); *(out + 2) = swap(h.h3); *(out + 3) = swap(h.h4); *(out + 4) = swap(h.h5); h.h1 = 0x67452301; h.h2 = 0xEFCDAB89; h.h3 = 0x98BADCFE; h.h4 = 0x10325476; h.h5 = 0xC3D2E1F0; for (int i = 0; i < 16; i++) w[i] = 0x5c5c5c5c; xorpads(w, (uint32_t*) (key)); //SHA 1 compute on opads computeSHA1Block((char*) w, w, 0, 64, h); //SHA 1 compute on (hash of ipad|m) computeSHA1Block((char*) out, w, 0, 20, h); *(out) = swap(h.h1); *(out + 1) = swap(h.h2); *(out + 2) = swap(h.h3); *(out + 3) = swap(h.h4); *(out + 4) = swap(h.h5); } /******************************************************************* AES CBC kernel ******************************************************************/ /* former prototype __global__ void AES_cbc_128_encrypt_kernel_SharedMem(const uint8_t *in_all, uint8_t *out_all, const uint32_t *pkt_offset, const uint8_t *keys, uint8_t *ivs, const unsigned int num_flows, uint8_t *checkbits = 0) */ __global__ void AES_cbc_128_encrypt_kernel_SharedMem( const uint8_t *in_all, uint8_t *out_all, size_t *input_size_arr, size_t *output_size_arr, int num_flows, uint8_t *checkbits, int *key_idxs, struct aes_sa_entry *key_array, uint8_t *ivs, const uint32_t *pkt_offset ) { __shared__ uint32_t shared_Te0[256]; __shared__ uint32_t shared_Te1[256]; __shared__ uint32_t shared_Te2[256]; __shared__ uint32_t shared_Te3[256]; __shared__ uint32_t shared_Rcon[10]; /* computer the thread id */ int idx = blockDim.x * blockIdx.x + threadIdx.x; if (idx >= num_flows) return; /* initialize T boxes */ for (unsigned i = 0 ; i *blockDim.x < 256 ; i++) { unsigned index = threadIdx.x + i * blockDim.x; if (index >= num_flows) break; shared_Te0[index] = Te0_ConstMem[index]; shared_Te1[index] = Te1_ConstMem[index]; shared_Te2[index] = Te2_ConstMem[index]; shared_Te3[index] = Te3_ConstMem[index]; } for(unsigned i = 0; i * blockDim.x < 10; i++){ int index = threadIdx.x + blockDim.x * i; if(index < 10){ shared_Rcon[index] = rcon[index]; } } /* make sure T boxes have been initialized. */ __syncthreads(); /* Locate data */ const uint8_t *in = pkt_offset[idx] + in_all; uint8_t *out = pkt_offset[idx] + out_all; /* int temp = key_idxs[idx]; assert(temp == key_array[temp].entry_idx); assert(key_array[temp].aes_key != NULL); */ const uint8_t *key = key_array[key_idxs[idx]].aes_key; uint8_t *ivec = idx * AES_BLOCK_SIZE + ivs; /* Encrypt using cbc mode */ unsigned long len = pkt_offset[idx + 1] - pkt_offset[idx]; const unsigned char *iv = ivec; while (len >= AES_BLOCK_SIZE) { *((uint64_t*)out) = *((uint64_t*)in) ^ *((uint64_t*)iv); *(((uint64_t*)out) + 1) = *(((uint64_t*)in) + 1) ^ *(((uint64_t*)iv) + 1); AES_128_encrypt(out, out, key, shared_Te0, shared_Te1, shared_Te2, shared_Te3, shared_Rcon); iv = out; len -= AES_BLOCK_SIZE; in += AES_BLOCK_SIZE; out += AES_BLOCK_SIZE; } if (len) { for(unsigned n = 0; n < len; ++n) out[n] = in[n] ^ iv[n]; for(unsigned n = len; n < AES_BLOCK_SIZE; ++n) out[n] = iv[n]; AES_128_encrypt(out, out, key, shared_Te0, shared_Te1, shared_Te2, shared_Te3, shared_Rcon); iv = out; } *((uint4*)ivec) = *((uint4*)iv); __syncthreads(); if (threadIdx.x == 0 && checkbits != 0) *(checkbits + blockIdx.x) = 1; } __global__ void AES_cbc_128_decrypt_kernel_SharedMem(const uint8_t *in_all, uint8_t *out_all, uint8_t *keys, uint8_t *ivs, uint16_t *pkt_index, unsigned long block_count, uint8_t *checkbits = 0 ) { int idx = blockDim.x * blockIdx.x + threadIdx.x; __shared__ uint32_t shared_Td0[256]; __shared__ uint32_t shared_Td1[256]; __shared__ uint32_t shared_Td2[256]; __shared__ uint32_t shared_Td3[256]; __shared__ uint8_t shared_Td4[256]; __shared__ uint32_t shared_Rcon[10]; __shared__ uint32_t shared_Te0[256]; __shared__ uint32_t shared_Te1[256]; __shared__ uint32_t shared_Te2[256]; __shared__ uint32_t shared_Te3[256]; /* computer the thread id */ /* initialize T boxes */ for (unsigned i = 0 ; i *blockDim.x < 256 ; i++) { unsigned index = threadIdx.x + i * blockDim.x; if (index >= 256) break; shared_Te0[index] = Te0_ConstMem[index]; shared_Te1[index] = Te1_ConstMem[index]; shared_Te2[index] = Te2_ConstMem[index]; shared_Te3[index] = Te3_ConstMem[index]; shared_Td0[index] = Td0_ConstMem[index]; shared_Td1[index] = Td1_ConstMem[index]; shared_Td2[index] = Td2_ConstMem[index]; shared_Td3[index] = Td3_ConstMem[index]; shared_Td4[index] = Td4_ConstMem[index]; } for(unsigned i = 0; i * blockDim.x < 10; i++){ int index = threadIdx.x + blockDim.x * i; if(index < 10){ shared_Rcon[index] = rcon[index]; } } for (unsigned i = 0; i * blockDim.x < 10; i++) { int index = threadIdx.x + blockDim.x * i; if (index < 10) { shared_Rcon[index] = rcon[index]; } } __syncthreads(); if (idx >= block_count) return; /* Locate data */ const uint8_t *in = idx * AES_BLOCK_SIZE + in_all; uint8_t *out = idx * AES_BLOCK_SIZE + out_all; uint16_t packet_index = pkt_index[idx]; uint32_t rk[4]; rk[0] = *((uint32_t*)(keys + 16 * packet_index)); rk[1] = *((uint32_t*)(keys + 16 * packet_index + 4)); rk[2] = *((uint32_t*)(keys + 16 * packet_index + 8)); rk[3] = *((uint32_t*)(keys + 16 * packet_index + 12)); uint8_t *ivec = packet_index * AES_BLOCK_SIZE + ivs; /* Decrypt using cbc mode */ const unsigned char *iv; if (idx == 0 || pkt_index[idx] != pkt_index[idx-1]) iv = ivec; else iv = in - AES_BLOCK_SIZE; AES_128_decrypt(in, out, rk, shared_Td0, shared_Td1, shared_Td2, shared_Td3, shared_Td4, shared_Te0, shared_Te1, shared_Te2, shared_Te3, shared_Rcon); *((uint64_t*)out) = *((uint64_t*)out) ^ *((uint64_t*)iv); *(((uint64_t*)out) + 1) = *(((uint64_t*)out) + 1) ^ *(((uint64_t*)iv) + 1); __syncthreads(); if (threadIdx.x == 0 && checkbits != 0) *(checkbits + blockIdx.x) = 1; } /******************************************************************* AES ECB kernel ******************************************************************/ __global__ void AES_ecb_encrypt_kernel(const uint8_t *in_all, uint8_t *out_all, const uint8_t *keys, uint16_t *pkt_index, unsigned long block_count ) { __shared__ uint32_t shared_Te0[256]; __shared__ uint32_t shared_Te1[256]; __shared__ uint32_t shared_Te2[256]; __shared__ uint32_t shared_Te3[256]; __shared__ uint32_t shared_Rcon[10]; /* computer the thread id */ int idx = blockDim.x * blockIdx.x + threadIdx.x; /* initialize T boxes, #threads in block should be larger than 256 */ for (unsigned i = 0; i * blockDim.x < 256; i++) { unsigned index = i * blockDim.x + threadIdx.x; if (index >= 256) break; shared_Te0[index] = Te0_ConstMem[index]; shared_Te1[index] = Te1_ConstMem[index]; shared_Te2[index] = Te2_ConstMem[index]; shared_Te3[index] = Te3_ConstMem[index]; } for (unsigned i = 0; i * blockDim.x < 10; i++) { unsigned index = threadIdx.x + blockDim.x * i; if (index < 10) { shared_Rcon[index] = rcon[index]; } } if (idx >= block_count) return; /* make sure T boxes have been initialized. */ __syncthreads(); /* Locate data */ const uint8_t *in = idx * AES_BLOCK_SIZE + in_all; uint8_t *out = idx * AES_BLOCK_SIZE + out_all; uint16_t pktIndex = pkt_index[idx]; const uint8_t *key = pktIndex * 16 + keys; AES_128_encrypt(in, out, key, shared_Te0, shared_Te1, shared_Te2, shared_Te3, shared_Rcon); } /************************************************************************** Exported C++ function wrapper function for CUDA kernel ***************************************************************************/ /* * Sangwook: Those wrapper functions are not used in NBA. void AES_cbc_128_decrypt_gpu(const uint8_t *in_d, uint8_t *out_d, uint8_t *keys_d, uint8_t *ivs_d, uint16_t *pkt_index_d, unsigned long block_count, uint8_t *checkbits_d, const unsigned int threads_per_blk, cudaStream_t stream ) { unsigned int num_cuda_blks = (block_count+threads_per_blk - 1) / threads_per_blk; if (stream == 0) { AES_cbc_128_decrypt_kernel_SharedMem<<<num_cuda_blks, threads_per_blk>>>( in_d, out_d, keys_d, ivs_d, pkt_index_d, block_count, checkbits_d); } else { AES_cbc_128_decrypt_kernel_SharedMem<<<num_cuda_blks, threads_per_blk, 0, stream>>>( in_d, out_d, keys_d, ivs_d, pkt_index_d, block_count, checkbits_d); } } void AES_cbc_128_encrypt_gpu(const uint8_t *in_d, uint8_t *out_d, const uint32_t *pkt_offset_d, const uint8_t *keys_d, uint8_t *ivs_d, const unsigned int num_flows, uint8_t *checkbits_d, const unsigned int threads_per_blk, cudaStream_t stream) { unsigned int num_cuda_blks = (num_flows+threads_per_blk - 1) / threads_per_blk; if (stream == 0) { AES_cbc_128_encrypt_kernel_SharedMem<<<num_cuda_blks, threads_per_blk>>>( in_d, out_d, pkt_offset_d, keys_d, ivs_d, num_flows, checkbits_d); } else { AES_cbc_128_encrypt_kernel_SharedMem<<<num_cuda_blks, threads_per_blk, 0, stream>>>( in_d, out_d, pkt_offset_d, keys_d, ivs_d, num_flows, checkbits_d); } } void AES_ecb_128_encrypt_gpu(const uint8_t *in_d, uint8_t *out_d, const uint8_t *keys_d, uint16_t *pkt_index_d, unsigned long block_count, const unsigned int threads_per_blk, cudaStream_t stream) { unsigned int num_cuda_blks = (block_count + threads_per_blk - 1) / threads_per_blk; if (stream == 0) { AES_ecb_encrypt_kernel<<<num_cuda_blks, threads_per_blk>>>( in_d, out_d, keys_d, pkt_index_d, block_count); } else { AES_ecb_encrypt_kernel<<<num_cuda_blks, threads_per_blk, 0, stream>>>( in_d, out_d, keys_d, pkt_index_d, block_count); } } */ /************************************************************************** Key Setup for Decryption ***************************************************************************/ void AES_decrypt_key_prepare(uint8_t *dec_key, const uint8_t *enc_key, unsigned int key_bits) { uint32_t rk_buf[60]; uint32_t *rk = rk_buf; int i = 0; uint32_t temp; rk[0] = GETU32(enc_key ); rk[1] = GETU32(enc_key + 4); rk[2] = GETU32(enc_key + 8); rk[3] = GETU32(enc_key + 12); if (key_bits == 128) { for (;;) { temp = rk[3]; rk[4] = rk[0] ^ (Te4[(temp >> 16) & 0xff] & 0xff000000) ^ (Te4[(temp >> 8) & 0xff] & 0x00ff0000) ^ (Te4[(temp ) & 0xff] & 0x0000ff00) ^ (Te4[(temp >> 24) ] & 0x000000ff) ^ rcon_host[i]; rk[5] = rk[1] ^ rk[4]; rk[6] = rk[2] ^ rk[5]; rk[7] = rk[3] ^ rk[6]; if (++i == 10) { rk += 4; goto end; } rk += 4; } } rk[4] = GETU32(enc_key + 16); rk[5] = GETU32(enc_key + 20); if (key_bits == 192) { for (;;) { temp = rk[ 5]; rk[ 6] = rk[ 0] ^ (Te4[(temp >> 16) & 0xff] & 0xff000000) ^ (Te4[(temp >> 8) & 0xff] & 0x00ff0000) ^ (Te4[(temp ) & 0xff] & 0x0000ff00) ^ (Te4[(temp >> 24) ] & 0x000000ff) ^ rcon_host[i]; rk[ 7] = rk[ 1] ^ rk[ 6]; rk[ 8] = rk[ 2] ^ rk[ 7]; rk[ 9] = rk[ 3] ^ rk[ 8]; if (++i == 8) { rk += 6; goto end; } rk[10] = rk[ 4] ^ rk[ 9]; rk[11] = rk[ 5] ^ rk[10]; rk += 6; } } rk[6] = GETU32(enc_key + 24); rk[7] = GETU32(enc_key + 28); if (key_bits == 256) { for (;;) { temp = rk[ 7]; rk[ 8] = rk[ 0] ^ (Te4[(temp >> 16) & 0xff] & 0xff000000) ^ (Te4[(temp >> 8) & 0xff] & 0x00ff0000) ^ (Te4[(temp ) & 0xff] & 0x0000ff00) ^ (Te4[(temp >> 24) ] & 0x000000ff) ^ rcon_host[i]; rk[ 9] = rk[ 1] ^ rk[ 8]; rk[10] = rk[ 2] ^ rk[ 9]; rk[11] = rk[ 3] ^ rk[10]; if (++i == 7) { rk += 8; goto end; } temp = rk[11]; rk[12] = rk[ 4] ^ (Te4[(temp >> 24) ] & 0xff000000) ^ (Te4[(temp >> 16) & 0xff] & 0x00ff0000) ^ (Te4[(temp >> 8) & 0xff] & 0x0000ff00) ^ (Te4[(temp ) & 0xff] & 0x000000ff); rk[13] = rk[ 5] ^ rk[12]; rk[14] = rk[ 6] ^ rk[13]; rk[15] = rk[ 7] ^ rk[14]; rk += 8; } } end: memcpy(dec_key, rk, 16); } /************************************************************************** Experimental Codes ***************************************************************************/ /* __global__ void computeHMAC_SHA1_AES( uint8_t *input_buf, uint8_t *output, size_t *input_size_arr, size_t *output_size_arr, int N, uint8_t *checkbits_d, int *key_idxs, struct hmac_sa_entry *hmac_aes_key_array, int32_t *offsets) */ __global__ void computeHMAC_SHA1_AES( uint8_t* input_buf, uint8_t *output_buf, size_t *input_size_arr, size_t *output_size_arr, int N, uint8_t *checkbits_d, const uint8_t* __restrict__ ivs, const int32_t* __restrict__ key_idxs, const struct hmac_aes_sa_entry* __restrict__ hmac_aes_key_array, const int32_t* __restrict__ offsets) { /* computer the thread id */ int idx = blockDim.x * blockIdx.x + threadIdx.x; if (idx < N) { /* Locate data */ const uint8_t *in = input_buf + offsets[idx]; uint8_t *out = output_buf + offsets[idx]; /* int temp = key_idxs[idx]; assert(temp == key_array[temp].entry_idx); assert(key_array[temp].aes_key != NULL); */ __shared__ uint32_t shared_Te0[256]; __shared__ uint32_t shared_Te1[256]; __shared__ uint32_t shared_Te2[256]; __shared__ uint32_t shared_Te3[256]; __shared__ uint32_t shared_Rcon[10]; /* initialize T boxes */ for (unsigned i = 0 ; i *blockDim.x < 256 ; i++) { unsigned index = threadIdx.x + i * blockDim.x; if (index >= N) break; shared_Te0[index] = Te0_ConstMem[index]; shared_Te1[index] = Te1_ConstMem[index]; shared_Te2[index] = Te2_ConstMem[index]; shared_Te3[index] = Te3_ConstMem[index]; } for(unsigned i = 0; i * blockDim.x < 10; i++){ int index = threadIdx.x + blockDim.x * i; if(index < 10){ shared_Rcon[index] = rcon[index]; } } /* make sure T boxes have been initialized. */ // __syncthreads(); const uint8_t *key = (const uint8_t*) hmac_aes_key_array[key_idxs[idx]].aes_key; uint8_t *ivec = (uint8_t*) (idx * AES_BLOCK_SIZE + ivs); /* Encrypt using cbc mode */ unsigned long len = (unsigned long) input_size_arr[idx]; const unsigned char *iv = ivec; while (len >= AES_BLOCK_SIZE) { *((uint64_t*)out) = *((uint64_t*)in) ^ *((uint64_t*)iv); *(((uint64_t*)out) + 1) = *(((uint64_t*)in) + 1) ^ *(((uint64_t*)iv) + 1); AES_128_encrypt(out, out, key, shared_Te0, shared_Te1, shared_Te2, shared_Te3, shared_Rcon); iv = out; len -= AES_BLOCK_SIZE; in += AES_BLOCK_SIZE; out += AES_BLOCK_SIZE; } if (len) { for(unsigned n = 0; n < len; ++n) out[n] = in[n] ^ iv[n]; for(unsigned n = len; n < AES_BLOCK_SIZE; ++n) out[n] = iv[n]; AES_128_encrypt(out, out, key, shared_Te0, shared_Te1, shared_Te2, shared_Te3, shared_Rcon); iv = out; } *((uint4*)ivec) = *((uint4*)iv); // __syncthreads(); // HMAC-SHA1 hashing int32_t offset = offsets[idx]; char *hmac_key = (char *) hmac_aes_key_array[key_idxs[idx]].hmac_key; uint16_t length = (uint16_t) input_size_arr[idx]; if (offset != -1) { // printf("TID:%4d \t Offset %10u, Length %10u\n", idx, offset, length); HMAC_SHA1((uint32_t*) (input_buf + offset), (uint32_t*) (output_buf + idx * SHA_DIGEST_LENGTH), length, (char*)hmac_key); // output_size_arr[idx] = SHA_DIGEST_LENGTH; // as output_roi is CUSTOMDATA, output_size_arr is not used. } } __syncthreads(); if (threadIdx.x == 0 && checkbits_d != 0) *(checkbits_d + blockIdx.x) = 1; } /* Among AES_cbc_128_decryption, AES_cbc_128_encryption, * AES_ecb_128_encryption and AES_decrypt_key_prepare(), * AES_cbc_128_encrypt_gpu() is only used in NBA, for now. */ void *nba::ipsec_hmac_sha1_aes_get_cuda_kernel() { return reinterpret_cast<void *> (computeHMAC_SHA1_AES); }
the_stack
#include <nbla/array.hpp> #include <nbla/imperative.hpp> #include <nbla/variable.hpp> #include <nbla/cuda/common.hpp> #include <nbla/cuda/function/sync_batch_normalization.hpp> #include <nbla/cuda/limits.hpp> #include <nbla/function/add2.hpp> #include <nbla/function/concatenate.hpp> #include <nbla/function/slice.hpp> #include "kernel/sync_batch_normalization.cu" namespace nbla { template <typename T> void SyncBatchNormalizationCuda<T>::setup_impl(const Variables &inputs, const Variables &outputs) { this->batch_norm_.setup(inputs, outputs); SyncBatchNormalization<T>::setup_impl(inputs, outputs); int c = this->size1_; //---------------- // Forward setup //---------------- v_local_mean_.reshape(Shape_t{c}, true); v_local_invstd_.reshape(Shape_t{c}, true); v_local_count_.reshape(Shape_t{1}, true); // Store local reduction size to the buffer in order to synchronize between // other processes and get total reduction size. nbla::Context cpu_ctx{{"cpu:float"}, "CpuCachedArray", "0"}; float *local_count = v_local_count_.cast_data_and_get_pointer<float>(cpu_ctx); local_count[0] = this->size02_; int n_workers = this->comm_->size(); v_all_gather_send_.reshape(Shape_t{2 * c + 1}, true); v_all_gather_recv_.reshape(Shape_t{n_workers, (2 * c + 1)}, true); // Concatenate buffers for all_gather. concatenate_ = create_Concatenate(this->ctx_, 0); concatenate_->setup({&v_local_mean_, &v_local_invstd_, &v_local_count_}, {&v_all_gather_send_}); // Slices for extracting the result of all_gather. slice_mean_ = create_Slice(this->ctx_, {0, 0}, {n_workers, c}, {1, 1}); slice_mean_->setup({&v_all_gather_recv_}, {&v_all_mean_}); slice_invstd_ = create_Slice(this->ctx_, {0, c}, {n_workers, 2 * c}, {1, 1}); slice_invstd_->setup({&v_all_gather_recv_}, {&v_all_invstd_}); slice_count_ = create_Slice(this->ctx_, {0, 2 * c}, {n_workers, 2 * c + 1}, {1, 1}); slice_count_->setup({&v_all_gather_recv_}, {&v_all_count_}); //---------------- // Backward setup //---------------- v_sum_dy_o_.reshape(Shape_t{c}, true); v_sum_dy_xmu_o_.reshape(Shape_t{c}, true); v_beta_grad_.reshape(inputs[1]->shape(), true); v_gamma_grad_.reshape(inputs[2]->shape(), true); add2_ = create_Add2(this->ctx_, true /* inplace */); } template <class T> void SyncBatchNormalizationCuda<T>::forward_impl_batch( const Variables &inputs, const Variables &outputs, const bool update_inputs) { Variable *x = inputs[0]; Variable *beta = inputs[1]; Variable *gamma = inputs[2]; Variable *y = outputs[0]; const Size_t size0 = this->size0_; const Size_t size1 = this->size1_; const Size_t size2 = this->size2_; // Check whether it outputs batch mean and var. Variable *batch_mean = &this->mean_; Variable *batch_var = &this->var_; if (outputs.size() == 3) { batch_mean = outputs[1]; batch_var = outputs[2]; } const bool channel_last = this->axes_[0] == inputs[0]->ndim() - 1; // Calculate local mean and variance if (channel_last) { forward_collect_statistics_channels_last<Tc>( size0, size1, size2, x, &v_local_mean_, &v_local_invstd_, &v_staging_data_for_forward_, &v_semaphores_for_forward_, this->eps_, this->ctx_); } else { forward_collect_statistics<Tc>(size0, size1, size2, x, &v_local_mean_, &v_local_invstd_, this->eps_, this->ctx_); } // All gather local mean, variance and count concatenate_->forward({&v_local_mean_, &v_local_invstd_, &v_local_count_}, {&v_all_gather_send_}); const auto send_buffer = v_all_gather_send_.data(); const auto recv_buffer = v_all_gather_recv_.data(); this->comm_->all_gather(send_buffer, {recv_buffer}, this->group_); // Calculate global mean, variance slice_mean_->forward({&v_all_gather_recv_}, {&v_all_mean_}); slice_invstd_->forward({&v_all_gather_recv_}, {&v_all_invstd_}); slice_count_->forward({&v_all_gather_recv_}, {&v_all_count_}); auto r_mean = !update_inputs ? nullptr : inputs[3]; auto r_var = !update_inputs ? nullptr : inputs[4]; const int n_workers = this->comm_->size(); forward_reduce_statistics<Tc>(size0, size1, size2, &v_all_mean_, &v_all_invstd_, &v_all_count_, batch_mean, batch_var, r_mean, r_var, this->eps_, this->decay_rate_, this->ctx_, n_workers); // Batch normalization if (channel_last) { forward_normalization_channel_last<Tc>(size0, size1, size2, x, batch_mean, batch_var, beta, gamma, y, this->eps_, this->ctx_); } else { forward_normalization<Tc>(size0, size1, size2, x, y, batch_mean, batch_var, beta, gamma, this->eps_, this->ctx_); } // Clear internal buffers used only forward. { v_staging_data_for_forward_.data()->array()->clear(); v_semaphores_for_forward_.data()->array()->clear(); v_local_mean_.data()->array()->clear(); v_local_invstd_.data()->array()->clear(); // v_local_count is constant value with size 1. This value is set by CPU // context in setup_impl and cannot be clear during propagation. // v_local_count_.data()->array()->clear(); v_all_gather_send_.data()->array()->clear(); v_all_gather_recv_.data()->array()->clear(); v_all_mean_.data()->array()->clear(); v_all_invstd_.data()->array()->clear(); // v_all_count_ will be used in backward // v_all_count_.data()->array()->clear(); } } template <class T> void SyncBatchNormalizationCuda<T>::forward_impl_global( const Variables &inputs, const Variables &outputs) { this->batch_norm_.forward(inputs, outputs); } template <class T> void SyncBatchNormalizationCuda<T>::backward_impl_batch( const Variables &inputs, const Variables &outputs, const vector<bool> &propagate_down, const vector<bool> &accum) { if (!(propagate_down[0] || propagate_down[1] || propagate_down[2])) { return; } Variable *x = inputs[0]; Variable *beta = inputs[1]; Variable *gamma = inputs[2]; Variable *y = outputs[0]; const Size_t size0 = this->size0_; const Size_t size1 = this->size1_; const Size_t size2 = this->size2_; // Check whether it outputs batch mean/var. Variable *batch_mean = &this->mean_; Variable *batch_var = &this->var_; if (outputs.size() == 3) { batch_mean = outputs[1]; batch_var = outputs[2]; } const bool channel_last = this->axes_[0] == inputs[0]->ndim() - 1; // Reduce channels and calculate grad of beta and gamma to temporally buffers if (channel_last) { backward_reduce_channels_last<Tc>( size0, size1, size2, x, y, batch_mean, batch_var, &v_sum_dy_o_, &v_sum_dy_xmu_o_, &v_beta_grad_, &v_gamma_grad_, &v_staging_data_for_backward_, &v_semaphores_for_backward_, this->eps_, this->ctx_); } else { backward_reduce<Tc>(size0, size1, size2, x, y, batch_mean, batch_var, &v_sum_dy_o_, &v_sum_dy_xmu_o_, &v_beta_grad_, &v_gamma_grad_, this->eps_, this->ctx_); } // All reduce this->comm_->all_reduce({v_sum_dy_o_.data(), v_sum_dy_xmu_o_.data(), v_beta_grad_.data(), v_gamma_grad_.data()}, false, false, this->group_); // Store beta grad and gamma grad auto set_param_grad = [&](Variable *param, Variable *param_grad_global, const bool accum) { if (accum) { // Copy current grad value. Variable tmp_var(param->grad()); // Accumulate gradient by add2 operation. nbla::execute(add2_, {&tmp_var, param_grad_global}, {&tmp_var}); } else { // Just copy grad. const Array *param_from = param_grad_global->data()->get(get_dtype<T>(), this->ctx_); Array *param_to = param->grad()->cast(get_dtype<T>(), this->ctx_, true); param_to->copy_from(param_from); } }; // Beta grad if (propagate_down[1]) { set_param_grad(beta, &v_beta_grad_, accum[1]); } // Gamma grad if (propagate_down[2]) { set_param_grad(gamma, &v_gamma_grad_, accum[2]); } // Calculate x grad if (propagate_down[0]) { const bool output_stat = outputs.size() == 3; const int n_workers = this->comm_->size(); if (channel_last) { if (accum[0]) { backward_dx_post_channels_last<Tc, true>( size0, size1, size2, y, x, batch_mean, batch_var, gamma, &v_sum_dy_o_, &v_sum_dy_xmu_o_, &v_all_count_, output_stat, this->eps_, this->ctx_); } else { backward_dx_post_channels_last<Tc, false>( size0, size1, size2, y, x, batch_mean, batch_var, gamma, &v_sum_dy_o_, &v_sum_dy_xmu_o_, &v_all_count_, output_stat, this->eps_, this->ctx_); } } else { if (accum[0]) { backward_dx_post<Tc, true>(size0, size1, size2, x, y, batch_mean, batch_var, &v_sum_dy_o_, &v_sum_dy_xmu_o_, gamma, &v_all_count_, output_stat, this->eps_, this->ctx_); } else { backward_dx_post<Tc, false>(size0, size1, size2, x, y, batch_mean, batch_var, &v_sum_dy_o_, &v_sum_dy_xmu_o_, gamma, &v_all_count_, output_stat, this->eps_, this->ctx_); } } } // Clear internal buffers used in backward. { v_staging_data_for_backward_.data()->array()->clear(); v_semaphores_for_backward_.data()->array()->clear(); v_sum_dy_o_.data()->array()->clear(); v_sum_dy_xmu_o_.data()->array()->clear(); v_beta_grad_.data()->array()->clear(); v_gamma_grad_.data()->array()->clear(); v_all_count_.data()->array()->clear(); // Calculated in forwrad } } }
the_stack
#include "blaze/common/common_defines.h" #include "blaze/common/exception.h" #include "blaze/math/float16.h" namespace blaze { template <> void Gemm<float16, CUDAContext>(const CBLAS_TRANSPOSE TransA, const CBLAS_TRANSPOSE TransB, const int M, const int N, const int K, const float alpha, const float16* A, const float16* B, const float beta, float16* C, CUDAContext* ctx) { int lda = (TransA == CblasNoTrans) ? K : M; int ldb = (TransB == CblasNoTrans) ? N : K; int ldc = N; float16 alpha_h, beta_h; float2half(&alpha, 1, &alpha_h); float2half(&beta, 1, &beta_h); cublasOperation_t cuTransA = (TransA == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T; cublasOperation_t cuTransB = (TransB == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T; // We now use cublas method for smaller matrix. NOTE: Optimization on small matrix. CUBLAS_CHECK(cublasHgemm(ctx->cublas_handle(), cuTransB, cuTransA, N, M, K, (const __half*)&alpha_h, (const __half*)B, ldb, (const __half*)A, lda, (const __half*)&beta_h, (__half*)C, ldc)); } template <> void Gemm<float, CUDAContext>(const CBLAS_TRANSPOSE TransA, const CBLAS_TRANSPOSE TransB, const int M, const int N, const int K, const float alpha, const float* A, const float* B, const float beta, float* C, CUDAContext* ctx) { int lda = (TransA == CblasNoTrans) ? K : M; int ldb = (TransB == CblasNoTrans) ? N : K; int ldc = N; cublasOperation_t cuTransA = (TransA == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T; cublasOperation_t cuTransB = (TransB == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T; // We now use cublas method for smaller matrix. NOTE: Optimization on small matrix. CUBLAS_CHECK(cublasSgemm(ctx->cublas_handle(), cuTransB, cuTransA, N, M, K, &alpha, B, ldb, A, lda, &beta, C, ldc)); } template <> void Gemm<double, CUDAContext>(const CBLAS_TRANSPOSE TransA, const CBLAS_TRANSPOSE TransB, const int M, const int N, const int K, const float alpha, const double* A, const double* B, const float beta, double* C, CUDAContext* ctx) { int lda = (TransA == CblasNoTrans) ? K : M; int ldb = (TransB == CblasNoTrans) ? N : K; int ldc = N; double alpha_d = alpha, beta_d = beta; cublasOperation_t cuTransA = (TransA == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T; cublasOperation_t cuTransB = (TransB == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T; // We now use cublas method for smaller matrix. NOTE: Optimization on small matrix. CUBLAS_CHECK(cublasDgemm(ctx->cublas_handle(), cuTransB, cuTransA, N, M, K, &alpha_d, B, ldb, A, lda, &beta_d, C, ldc)); } template <> void GemmEx<float16, CUDAContext>(const CBLAS_TRANSPOSE TransA, const CBLAS_TRANSPOSE TransB, const int M, const int N, const int K, const float alpha, const float16* A, const int lda, const float16* B, const int ldb, const float beta, float16* C, const int ldc, CUDAContext* ctx) { float16 alpha_h, beta_h; float2half(&alpha, 1, &alpha_h); float2half(&beta, 1, &beta_h); cublasOperation_t cuTransA = (TransA == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T; cublasOperation_t cuTransB = (TransB == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T; CUBLAS_CHECK(cublasHgemm(ctx->cublas_handle(), cuTransB, cuTransA, N, M, K, (const __half*)&alpha_h, (const __half*)B, ldb, (const __half*)A, lda, (const __half*)&beta_h, (__half*)C, ldc)); } template <> void GemmEx<float, CUDAContext>(const CBLAS_TRANSPOSE TransA, const CBLAS_TRANSPOSE TransB, const int M, const int N, const int K, const float alpha, const float* A, const int lda, const float* B, const int ldb, const float beta, float* C, const int ldc, CUDAContext* ctx) { float alpha_f = alpha, beta_f = beta; cublasOperation_t cuTransA = (TransA == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T; cublasOperation_t cuTransB = (TransB == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T; CUBLAS_CHECK(cublasSgemm(ctx->cublas_handle(), cuTransB, cuTransA, N, M, K, &alpha_f, B, ldb, A, lda, &beta_f, C, ldc)); } template <> void GemmEx<double, CUDAContext>(const CBLAS_TRANSPOSE TransA, const CBLAS_TRANSPOSE TransB, const int M, const int N, const int K, const float alpha, const double* A, const int lda, const double* B, const int ldb, const float beta, double* C, const int ldc, CUDAContext* ctx) { double alpha_d = alpha, beta_d = beta; cublasOperation_t cuTransA = (TransA == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T; cublasOperation_t cuTransB = (TransB == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T; CUBLAS_CHECK(cublasDgemm(ctx->cublas_handle(), cuTransB, cuTransA, N, M, K, &alpha_d, B, ldb, A, lda, &beta_d, C, ldc)); } template <> void GemmStridedBatched<float16, CUDAContext>(const CBLAS_TRANSPOSE TransA, const CBLAS_TRANSPOSE TransB, const int M, const int N, const int K, const float alpha, const float16* A, const long long int stride_a, const float16* B, const long long int stride_b, const float beta, float16* C, const long long int stride_c, int batch_count, CUDAContext* ctx) { int lda = (TransA == CblasNoTrans) ? K : M; int ldb = (TransB == CblasNoTrans) ? N : K; int ldc = N; cublasOperation_t cuTransA = (TransA == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T; cublasOperation_t cuTransB = (TransB == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T; float16 alpha_h, beta_h; float2half(&alpha, 1, &alpha_h); float2half(&beta, 1, &beta_h); // We now use cublas method for smaller matrix. NOTE: Optimization on small matrix. CUBLAS_CHECK(cublasHgemmStridedBatched(ctx->cublas_handle(), cuTransB, cuTransA, N, M, K, (const __half*)&alpha_h, (const __half*)B, ldb, stride_b, (const __half*)A, lda, stride_a, (const __half*)&beta_h, (__half*)C, ldc, stride_c, batch_count)); } template <> void GemmStridedBatched<float, CUDAContext>(const CBLAS_TRANSPOSE TransA, const CBLAS_TRANSPOSE TransB, const int M, const int N, const int K, const float alpha, const float* A, const long long int stride_a, const float* B, const long long int stride_b, const float beta, float* C, const long long int stride_c, int batch_count, CUDAContext* ctx) { int lda = (TransA == CblasNoTrans) ? K : M; int ldb = (TransB == CblasNoTrans) ? N : K; int ldc = N; cublasOperation_t cuTransA = (TransA == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T; cublasOperation_t cuTransB = (TransB == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T; // We now use cublas method for smaller matrix. NOTE: Optimization on small matrix. CUBLAS_CHECK(cublasSgemmStridedBatched(ctx->cublas_handle(), cuTransB, cuTransA, N, M, K, &alpha, B, ldb, stride_b, A, lda, stride_a, &beta, C, ldc, stride_c, batch_count)); } template <> void GemmStridedBatched<double, CUDAContext>(const CBLAS_TRANSPOSE TransA, const CBLAS_TRANSPOSE TransB, const int M, const int N, const int K, const float alpha, const double* A, const long long int stride_a, const double* B, const long long int stride_b, const float beta, double* C, const long long int stride_c, int batch_count, CUDAContext* ctx) { int lda = (TransA == CblasNoTrans) ? K : M; int ldb = (TransB == CblasNoTrans) ? N : K; int ldc = N; cublasOperation_t cuTransA = (TransA == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T; cublasOperation_t cuTransB = (TransB == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T; double alpha_d = alpha, beta_d = beta; // We now use cublas method for smaller matrix. NOTE: Optimization on small matrix. CUBLAS_CHECK(cublasDgemmStridedBatched(ctx->cublas_handle(), cuTransB, cuTransA, N, M, K, &alpha_d, B, ldb, stride_b, A, lda, stride_a, &beta_d, C, ldc, stride_c, batch_count)); } template <> void GemmBatched<float16, CUDAContext>(const CBLAS_TRANSPOSE TransA, const CBLAS_TRANSPOSE TransB, const int M, const int N, const int K, const float alpha, const float16* A_array[], const float16* B_array[], const float beta, float16* C_array[], int batch_count, CUDAContext* ctx) { #if CUDA_VERSION >= 9000 // Only CUDA9.0 Can support int lda = (TransA == CblasNoTrans) ? K : M; int ldb = (TransB == CblasNoTrans) ? N : K; int ldc = N; cublasOperation_t cuTransA = (TransA == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T; cublasOperation_t cuTransB = (TransB == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T; float16 alpha_h, beta_h; float2half(&alpha, 1, &alpha_h); float2half(&beta, 1, &beta_h); // We now use cublas method for smaller matrix. NOTE: Optimization on small matrix. CUBLAS_CHECK(cublasHgemmBatched(ctx->cublas_handle(), cuTransB, cuTransA, N, M, K, (const __half*)&alpha_h, (const __half**)B_array, ldb, (const __half**)A_array, lda, (const __half*)&beta_h, (__half**)C_array, ldc, batch_count)); #else BLAZE_THROW("Not supported, CUDA_VERSION < 9000"); #endif } template <> void GemmBatched<float, CUDAContext>(const CBLAS_TRANSPOSE TransA, const CBLAS_TRANSPOSE TransB, const int M, const int N, const int K, const float alpha, const float* A_array[], const float* B_array[], const float beta, float* C_array[], int batch_count, CUDAContext* ctx) { int lda = (TransA == CblasNoTrans) ? K : M; int ldb = (TransB == CblasNoTrans) ? N : K; int ldc = N; cublasOperation_t cuTransA = (TransA == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T; cublasOperation_t cuTransB = (TransB == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T; // We now use cublas method for smaller matrix. NOTE: Optimization on small matrix. CUBLAS_CHECK(cublasSgemmBatched(ctx->cublas_handle(), cuTransB, cuTransA, N, M, K, &alpha, B_array, ldb, A_array, lda, &beta, C_array, ldc, batch_count)); } template <> void GemmBatched<double, CUDAContext>(const CBLAS_TRANSPOSE TransA, const CBLAS_TRANSPOSE TransB, const int M, const int N, const int K, const float alpha, const double* A_array[], const double* B_array[], const float beta, double* C_array[], int batch_count, CUDAContext* ctx) { double alpha_d = alpha, beta_d = beta; int lda = (TransA == CblasNoTrans) ? K : M; int ldb = (TransB == CblasNoTrans) ? N : K; int ldc = N; cublasOperation_t cuTransA = (TransA == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T; cublasOperation_t cuTransB = (TransB == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T; // We now use cublas method for smaller matrix. NOTE: Optimization on small matrix. CUBLAS_CHECK(cublasDgemmBatched(ctx->cublas_handle(), cuTransB, cuTransA, N, M, K, &alpha_d, B_array, ldb, A_array, lda, &beta_d, C_array, ldc, batch_count)); } template <> void Gemv<float16, CUDAContext>(const CBLAS_TRANSPOSE TransA, const int M, const int N, const float alpha, const float16* A, const float16* x, const float beta, float16* y, CUDAContext* ctx) { BLAZE_THROW("Not implemented!"); } template <> void Gemv<float, CUDAContext>(const CBLAS_TRANSPOSE TransA, const int M, const int N, const float alpha, const float* A, const float* x, const float beta, float* y, CUDAContext* ctx) { cublasOperation_t cuTransA = (TransA == CblasNoTrans) ? CUBLAS_OP_T : CUBLAS_OP_N; CUBLAS_CHECK(cublasSgemv(ctx->cublas_handle(), cuTransA, N, M, &alpha, A, N, x, 1, &beta, y, 1)); } template <> void Gemv<double, CUDAContext>(const CBLAS_TRANSPOSE TransA, const int M, const int N, const float alpha, const double* A, const double* x, const float beta, double* y, CUDAContext* ctx) { cublasOperation_t cuTransA = (TransA == CblasNoTrans) ? CUBLAS_OP_T : CUBLAS_OP_N; double alpha_d = alpha, beta_d = beta; CUBLAS_CHECK(cublasDgemv(ctx->cublas_handle(), cuTransA, N, M, &alpha_d, A, N, x, 1, &beta_d, y, 1)); } } // namespace blaze
the_stack
#include <THC/THCTensorCopy.h> #include <THC/THCReduceApplyUtils.cuh> #include <THC/THCTensorTypeUtils.cuh> #include <THC/THCTensorCopy.hpp> #include <ATen/cuda/CUDAContext.h> // // This file contains pointwise operation functions and kernels that // work on both contiguous and non-contiguous tensor arguments of // arbitrary (up to MAX_CUTORCH_DIMS) dimensioned arguments without // copying or temporary storage. // // Rearrange dimensions for pointwise operations so that strides are in // decreasing order as much as possible, so that kernels have better memory // access patterns. // // For example, consider a binary operation on two "transposed" 2-dim tensors: // sizes: 256 512 // aInfo->strides: 1 256 // bInfo->strides: 1 256 // // Given this, each concurrent memory access inside kernelPointwiseApply2() is // exactly 256 elements apart, resulting in poor performance. // // This function exchanges dimensions so that memory access is contiguous: // sizes: 512 256 // aInfo->strides: 256 1 // bInfo->strides: 256 1 // // (Actually, it becomes even better because now collapseDims() can turn each // input into one contiguous array.) // // In general, given M (<=3) TensorInfo's with N dimensions, we can view each // strides[i] (0 <= i < N) as an M-tuple. Given each pair i < j, we exchange // strides[i] and [j] if // (1) strides[i][k] < strides[j][k] for some k (0 <= k < M) // (exchanging them will benefit input #k), and // (2) strides[i][k] <= strieds[j][k] for all k // (exchanging them will not make any input worse). template <typename T1, typename IndexType, typename T2 = void, typename T3 = void> void rearrangeDims(TensorInfo<T1, IndexType>* aInfo, TensorInfo<T2, IndexType>* bInfo = nullptr, TensorInfo<T3, IndexType>* cInfo = nullptr) { int numInfos = 1; int dims = aInfo->dims; IndexType *sizes[3] = { aInfo->sizes, }; IndexType *strides[3] = { aInfo->strides, }; if (bInfo != nullptr) { ++numInfos; if (bInfo->dims != dims) return; sizes[1] = bInfo->sizes; strides[1] = bInfo->strides; } if (cInfo != nullptr) { ++numInfos; if (cInfo->dims != dims) return; sizes[2] = cInfo->sizes; strides[2] = cInfo->strides; } // Bail out if sizes do not match: we are using "deprecated pointwise // behavior" among tensors of different shapes but same number of elements. for (int i = 1; i < numInfos; ++i) { for (int j = 0; j < dims; ++j) { if (sizes[i][j] != sizes[0][j]) return; } } for (int i = 0; i < dims - 1; ++i) { // No need to consider dimensions of size 1. if (sizes[0][i] == 1) continue; for (int j = i + 1; j < dims; ++j) { if (sizes[0][j] == 1) continue; // Compare the relative sizes of strides between dim #i and dim #j. bool hasIncreasingStrides = false; bool hasDecreasingStrides = false; for (int k = 0; k < numInfos; k++) { IndexType stride_i = strides[k][i]; IndexType stride_j = strides[k][j]; if (stride_i < stride_j) { hasIncreasingStrides = true; } else if (stride_i > stride_j) { hasDecreasingStrides = true; } } if (hasIncreasingStrides && !hasDecreasingStrides) { for (int k = 0; k < numInfos; k++) { IndexType size = sizes[k][i]; sizes[k][i] = sizes[k][j]; sizes[k][j] = size; IndexType stride = strides[k][i]; strides[k][i] = strides[k][j]; strides[k][j] = stride; } } } } } // Threads per block for our apply kernel // FIXME: use occupancy calculator instead #define THC_APPLY_THREADS_PER_BLOCK (32 * 16) #define THC_APPLY_BLOCKS_PER_SM 4 template <typename Op, typename Ta, typename IndexType, int ADims> #if defined __HIP_PLATFORM_HCC__ C10_LAUNCH_BOUNDS_2(THC_APPLY_THREADS_PER_BLOCK, THC_APPLY_BLOCKS_PER_SM) #endif __global__ void kernelPointwiseApply1(const OffsetInfo<Ta, IndexType, ADims> a, IndexType totalElements, Op op) { // NOTE: The two typecasts below are essential when IndexType is 64-bit; // without them, results are silently truncated to 32 bits! for (IndexType linearIndex = (IndexType) blockIdx.x * blockDim.x + threadIdx.x; linearIndex < totalElements; linearIndex += (IndexType) gridDim.x * blockDim.x) { op(a.get(linearIndex)); } } template <typename Op, typename Ta, typename Tb, typename IndexType, int ADims, int BDims> #if defined __HIP_PLATFORM_HCC__ C10_LAUNCH_BOUNDS_2(THC_APPLY_THREADS_PER_BLOCK, THC_APPLY_BLOCKS_PER_SM) #endif __global__ void kernelPointwiseApply2(const OffsetInfo<Ta, IndexType, ADims> a, const OffsetInfo<Tb, IndexType, BDims> b, IndexType totalElements, Op op) { for (IndexType linearIndex = (IndexType) blockIdx.x * blockDim.x + threadIdx.x; linearIndex < totalElements; linearIndex += (IndexType) gridDim.x * blockDim.x) { op(a.get(linearIndex), b.get(linearIndex)); } } template <typename Op, typename Ta, typename Tb, typename Tc, typename IndexType, int ADims, int BDims, int CDims> #if defined __HIP_PLATFORM_HCC__ C10_LAUNCH_BOUNDS_2(THC_APPLY_THREADS_PER_BLOCK, THC_APPLY_BLOCKS_PER_SM) #endif __global__ void kernelPointwiseApply3(const OffsetInfo<Ta, IndexType, ADims> a, const OffsetInfo<Tb, IndexType, BDims> b, const OffsetInfo<Tc, IndexType, CDims> c, IndexType totalElements, Op op) { for (IndexType linearIndex = (IndexType) blockIdx.x * blockDim.x + threadIdx.x; linearIndex < totalElements; linearIndex += (IndexType) gridDim.x * blockDim.x) { op(a.get(linearIndex), b.get(linearIndex), c.get(linearIndex)); } } inline dim3 getApplyBlock() { return dim3(THC_APPLY_THREADS_PER_BLOCK); } inline bool getApplyGrid(THCState* state, uint64_t totalElements, dim3& grid, int curDevice) { if (curDevice == -1) return false; uint64_t numBlocks = THCCeilDiv(totalElements, static_cast<uint64_t>(THC_APPLY_THREADS_PER_BLOCK)); uint64_t maxGridX = at::cuda::getDeviceProperties(curDevice)->maxGridSize[0]; if (numBlocks > maxGridX) numBlocks = maxGridX; // For 32-bit indices, make sure that gridDim.x * blockDim.x fits in 32 bits. if (totalElements <= INT32_MAX && numBlocks > INT32_MAX / THC_APPLY_THREADS_PER_BLOCK) numBlocks = INT32_MAX / THC_APPLY_THREADS_PER_BLOCK; grid = dim3(numBlocks); return true; } template <typename ScalarTypeA, typename TensorTypeA, typename Op> bool THC_pointwiseApply1(THCState* state, TensorTypeA* a, const Op& op, TensorArgType aType = ReadWrite) { if (THCTensor_nDimensionLegacyAll(state, a) > MAX_CUTORCH_DIMS) { return false; } if (THCTensor_nDimensionLegacyAll(state, a) == 0) { // Zero-dim tensor; do nothing return true; } const dim3 block = getApplyBlock(); dim3 grid; ptrdiff_t totalElements = THCTensor_nElement(state, a); int curDevice = -1; cudaGetDevice(&curDevice); if (!getApplyGrid(state, totalElements, grid, curDevice)) { return false; } /* Expands readable/writable tensors whose indices may be "overlapped." This ensures that each element of the tensor is operated on once and only once. */ TensorTypeA* oldA = NULL; if (aType == ReadWrite && THCTensor_maybeOverlappingIndices(state, a)) { // Must perform in contiguous space oldA = a; a = (TensorTypeA*)THCTensor_newContiguous<ScalarTypeA>(state, a); } // It is possible that the tensor dimensions are able to be collapsed, // and thus we can reduce the actual code complexity of the copy by // exploiting this knowledge statically, since the div/mod is the // most expensive part of the operation, more so than memory accesses. // For instance, when copying a non-contiguous to a contiguous tensor // (or vice versa), the contiguous tensor can be collapsed to one // dimension, and the loop to translate the linear index to the array // index can be similarly collapsed. That is what this unrolling is for. #define HANDLE_CASE(TYPE, A) \ kernelPointwiseApply1<Op, \ ScalarTypeA, \ TYPE, A> \ <<<grid, block, 0, c10::cuda::getCurrentCUDAStream(curDevice)>>>( \ OffsetInfo<ScalarTypeA, TYPE, A> \ (aInfo), \ (TYPE) totalElements, op); #define HANDLE_A_CASE(TYPE, A) { \ switch (A) { \ case 1: \ HANDLE_CASE(TYPE, 1); \ break; \ case 2: \ HANDLE_CASE(TYPE, 2); \ break; \ default: \ HANDLE_CASE(TYPE, -1); \ break; \ } \ } // Can we use 32-bit integer math in the kernel (the linear ID for the copy // and the resulting non-linear offset is all computable using 32-bit math?) // We also use unsigned index math in the kernel, as signed div/mod has // additional overhead. if (THCTensor_canUse32BitIndexMath(state, a)) { TensorInfo<ScalarTypeA, unsigned int> aInfo = getTensorInfo<ScalarTypeA, TensorTypeA, unsigned int>(state, a); rearrangeDims(&aInfo); aInfo.collapseDims(); #if CUDA_VERSION < 9000 if (!aInfo.isContiguous()) { grid.x = min(at::cuda::getCurrentDeviceProperties()->multiProcessorCount * THC_APPLY_BLOCKS_PER_SM , grid.x); } #endif HANDLE_A_CASE(unsigned int, aInfo.dims); } else { TensorInfo<ScalarTypeA, uint64_t> aInfo = getTensorInfo<ScalarTypeA, TensorTypeA, uint64_t>(state, a); rearrangeDims(&aInfo); aInfo.collapseDims(); /* Only instantiates the all 1D special case and the fallback all nD case for large (64-bit indexed) tensors to reduce compilation time. */ if (aInfo.dims == 1) { OffsetInfo<ScalarTypeA, uint64_t, 1> aOffset(aInfo); kernelPointwiseApply1<Op, ScalarTypeA, uint64_t, 1> <<<grid, block, 0, c10::cuda::getCurrentCUDAStream()>>>( aOffset, (uint64_t) totalElements, op); } else { #if CUDA_VERSION < 9000 grid.x = min(at::cuda::getCurrentDeviceProperties()->multiProcessorCount * THC_APPLY_BLOCKS_PER_SM , grid.x); #endif OffsetInfo<ScalarTypeA, uint64_t, -1> aOffset(aInfo); kernelPointwiseApply1<Op, ScalarTypeA, uint64_t, -1> <<<grid, block, 0, c10::cuda::getCurrentCUDAStream()>>>( aOffset, (uint64_t) totalElements, op); } } #undef HANDLE_CASE #undef HANDLE_A_CASE if (oldA) { // Ignore overlaps when copying back; if we use THCTensor_copy // instead, it will recursively try and invoke ourselves to make // oldA contiguous. THCTensor_copyIgnoringOverlaps<ScalarTypeA>(state, oldA, a); THCTensor_free(state, a); a = oldA; } return true; } template <typename ScalarTypeA, typename ScalarTypeB, typename TensorTypeA, typename TensorTypeB, typename Op> bool THC_pointwiseApply2(THCState* state, TensorTypeA* a, TensorTypeB* b, const Op& op, TensorArgType aType = ReadWrite, TensorArgType bType = ReadOnly) { ptrdiff_t totalElements = THCTensor_nElement(state, a); if (totalElements != THCTensor_nElement(state, b)) { return false; } if (THCTensor_nDimensionLegacyAll(state, a) > MAX_CUTORCH_DIMS || THCTensor_nDimensionLegacyAll(state, b) > MAX_CUTORCH_DIMS) { return false; } if (THCTensor_nDimensionLegacyAll(state, a) == 0) { // Zero-dim tensor; do nothing return true; } const dim3 block = getApplyBlock(); dim3 grid; int curDevice = -1; cudaGetDevice(&curDevice); if (!getApplyGrid(state, totalElements, grid, curDevice)) { return false; } /* Expands readable/writable tensors whose indices may be "overlapped." This ensures that each element of the tensor is operated on once and only once. */ TensorTypeA* oldA = NULL; TensorTypeB* oldB = NULL; if (aType == ReadWrite && THCTensor_maybeOverlappingIndices(state, a)) { // Must perform in contiguous space oldA = a; a = (TensorTypeA*)THCTensor_newContiguous<ScalarTypeA>(state, a); } if (bType == ReadWrite && THCTensor_maybeOverlappingIndices(state, b)) { // Must perform in contiguous space oldB = b; b = (TensorTypeB*)THCTensor_newContiguous<ScalarTypeB>(state, b); } // It is possible that the tensor dimensions are able to be collapsed, // and thus we can reduce the actual code complexity of the copy by // exploiting this knowledge statically, since the div/mod is the // most expensive part of the operation, more so than memory accesses. // For instance, when copying a non-contiguous to a contiguous tensor // (or vice versa), the contiguous tensor can be collapsed to one // dimension, and the loop to translate the linear index to the array // index can be similarly collapsed. That is what this unrolling is for. #define HANDLE_CASE(TYPE, A, B) \ kernelPointwiseApply2<Op, \ ScalarTypeA, \ ScalarTypeB, \ TYPE, A, B> \ <<<grid, block, 0, c10::cuda::getCurrentCUDAStream(curDevice)>>>( \ OffsetInfo<ScalarTypeA, TYPE, A> \ (aInfo), \ OffsetInfo<ScalarTypeB, TYPE, B> \ (bInfo), \ (TYPE) totalElements, op); #define HANDLE_B_CASE(TYPE, A, B) { \ switch (B) { \ case 1: \ HANDLE_CASE(TYPE, A, 1); \ break; \ case 2: \ HANDLE_CASE(TYPE, A, 2); \ break; \ default: \ HANDLE_CASE(TYPE, A, -1); \ break; \ } \ } #define HANDLE_A_CASE(TYPE, A, B) { \ switch (A) { \ case 1: \ HANDLE_B_CASE(TYPE, 1, B); \ break; \ case 2: \ HANDLE_B_CASE(TYPE, 2, B); \ break; \ default: \ HANDLE_B_CASE(TYPE, -1, B); \ break; \ } \ } if (THCTensor_canUse32BitIndexMath(state, a) && THCTensor_canUse32BitIndexMath(state, b)) { TensorInfo<ScalarTypeA, unsigned int> aInfo = getTensorInfo<ScalarTypeA, TensorTypeA, unsigned int>(state, a); TensorInfo<ScalarTypeB, unsigned int> bInfo = getTensorInfo<ScalarTypeB, TensorTypeB, unsigned int>(state, b); rearrangeDims(&aInfo, &bInfo); aInfo.collapseDims(); bInfo.collapseDims(); #if CUDA_VERSION < 9000 if (!(aInfo.isContiguous() && bInfo.isContiguous())) grid.x = min(at::cuda::getCurrentDeviceProperties()->multiProcessorCount * THC_APPLY_BLOCKS_PER_SM , grid.x); #endif HANDLE_A_CASE(unsigned int, aInfo.dims, bInfo.dims); } else { TensorInfo<ScalarTypeA, uint64_t> aInfo = getTensorInfo<ScalarTypeA, TensorTypeA, uint64_t>(state, a); TensorInfo<ScalarTypeB, uint64_t> bInfo = getTensorInfo<ScalarTypeB, TensorTypeB, uint64_t>(state, b); rearrangeDims(&aInfo, &bInfo); aInfo.collapseDims(); bInfo.collapseDims(); /* Only instantiates the all 1D special case and the fallback all nD case for large (64-bit indexed) tensors to reduce compilation time. */ if (aInfo.dims == 1 && bInfo.dims == 1) { OffsetInfo<ScalarTypeA, uint64_t, 1> aOffset(aInfo); OffsetInfo<ScalarTypeB, uint64_t, 1> bOffset(bInfo); kernelPointwiseApply2<Op, ScalarTypeA, ScalarTypeB, uint64_t, 1, 1> <<<grid, block, 0, c10::cuda::getCurrentCUDAStream()>>>( aOffset, bOffset, (uint64_t) totalElements, op); } else { #if CUDA_VERSION < 9000 grid.x = min(at::cuda::getCurrentDeviceProperties()->multiProcessorCount * THC_APPLY_BLOCKS_PER_SM , grid.x); #endif OffsetInfo<ScalarTypeA, uint64_t, -1> aOffset(aInfo); OffsetInfo<ScalarTypeB, uint64_t, -1> bOffset(bInfo); kernelPointwiseApply2<Op, ScalarTypeA, ScalarTypeB, uint64_t, -1, -1> <<<grid, block, 0, c10::cuda::getCurrentCUDAStream()>>>( aOffset, bOffset, (uint64_t) totalElements, op); } } #undef HANDLE_CASE #undef HANDLE_B_CASE #undef HANDLE_A_CASE if (oldA) { // Ignore overlaps when copying back; if we use THCTensor_copy // instead, it will recursively try and invoke ourselves to make // oldA contiguous. THCTensor_copyIgnoringOverlaps<ScalarTypeA>(state, oldA, a); THCTensor_free(state, a); a = oldA; } if (oldB) { // Ignore overlaps when copying back; if we use THCTensor_copy // instead, it will recursively try and invoke ourselves to make // oldB contiguous. THCTensor_copyIgnoringOverlaps<ScalarTypeB>(state, oldB, b); THCTensor_free(state, b); b = oldB; } return true; } template <typename ScalarTypeA, typename ScalarTypeB, typename ScalarTypeC, typename TensorTypeA, typename TensorTypeB, typename TensorTypeC, typename Op> bool THC_pointwiseApply3(THCState* state, TensorTypeA* a, TensorTypeB* b, TensorTypeC* c, const Op& op, TensorArgType aType = ReadWrite, TensorArgType bType = ReadOnly, TensorArgType cType = ReadOnly) { ptrdiff_t totalElements = THCTensor_nElement(state, a); if (totalElements != THCTensor_nElement(state, b) || totalElements != THCTensor_nElement(state, c)) { return false; } if (THCTensor_nDimensionLegacyAll(state, a) > MAX_CUTORCH_DIMS || THCTensor_nDimensionLegacyAll(state, b) > MAX_CUTORCH_DIMS || THCTensor_nDimensionLegacyAll(state, c) > MAX_CUTORCH_DIMS) { return false; } if (THCTensor_nDimensionLegacyAll(state, a) == 0) { // Zero-dim tensor; do nothing return true; } const dim3 block = getApplyBlock(); dim3 grid; int curDevice = -1; cudaGetDevice(&curDevice); if (!getApplyGrid(state, totalElements, grid, curDevice)) { return false; } /* Expands readable/writable tensors whose indices may be "overlapped." This ensures that each element of the tensor is operated on once and only once. */ TensorTypeA* oldA = NULL; TensorTypeB* oldB = NULL; TensorTypeC* oldC = NULL; if (aType == ReadWrite && THCTensor_maybeOverlappingIndices(state, a)) { // Must perform in contiguous space oldA = a; a = (TensorTypeA*)THCTensor_newContiguous<ScalarTypeA>(state, a); } if (bType == ReadWrite && THCTensor_maybeOverlappingIndices(state, b)) { // Must perform in contiguous space oldB = b; b = (TensorTypeB*)THCTensor_newContiguous<ScalarTypeB>(state, b); } if (cType == ReadWrite && THCTensor_maybeOverlappingIndices(state, c)) { // Must perform in contiguous space oldC = c; c = (TensorTypeC*)THCTensor_newContiguous<ScalarTypeC>(state, c); } #define HANDLE_CASE(TYPE, A, B, C) \ kernelPointwiseApply3<Op, \ ScalarTypeA, \ ScalarTypeB, \ ScalarTypeC, \ TYPE, A, B, C> \ <<<grid, block, 0, c10::cuda::getCurrentCUDAStream(curDevice)>>>( \ OffsetInfo<ScalarTypeA, TYPE, A> \ (aInfo), \ OffsetInfo<ScalarTypeB, TYPE, B> \ (bInfo), \ OffsetInfo<ScalarTypeC, TYPE, C> \ (cInfo), \ (TYPE) totalElements, op); #define HANDLE_C_CASE(TYPE, A, B, C) { \ switch (C) { \ case 1: \ HANDLE_CASE(TYPE, A, B, 1); \ break; \ case 2: \ HANDLE_CASE(TYPE, A, B, 2); \ break; \ default: \ HANDLE_CASE(TYPE, A, B, -1); \ break; \ } \ } #define HANDLE_B_CASE(TYPE, A, B, C) { \ switch (B) { \ case 1: \ HANDLE_C_CASE(TYPE, A, 1, C); \ break; \ case 2: \ HANDLE_C_CASE(TYPE, A, 2, C); \ break; \ default: \ HANDLE_C_CASE(TYPE, A, -1, C); \ break; \ } \ } #define HANDLE_A_CASE(TYPE, A, B, C) { \ switch (A) { \ case 1: \ HANDLE_B_CASE(TYPE, 1, B, C); \ break; \ case 2: \ HANDLE_B_CASE(TYPE, 2, B, C); \ break; \ default: \ HANDLE_B_CASE(TYPE, -1, B, C); \ break; \ } \ } if (THCTensor_canUse32BitIndexMath(state, a) && THCTensor_canUse32BitIndexMath(state, b) && THCTensor_canUse32BitIndexMath(state, c)) { TensorInfo<ScalarTypeA, unsigned int> aInfo = getTensorInfo<ScalarTypeA, TensorTypeA, unsigned int>(state, a); TensorInfo<ScalarTypeB, unsigned int> bInfo = getTensorInfo<ScalarTypeB, TensorTypeB, unsigned int>(state, b); TensorInfo<ScalarTypeC, unsigned int> cInfo = getTensorInfo<ScalarTypeC, TensorTypeC, unsigned int>(state, c); rearrangeDims(&aInfo, &bInfo, &cInfo); aInfo.collapseDims(); bInfo.collapseDims(); cInfo.collapseDims(); #if CUDA_VERSION < 9000 if (!(aInfo.isContiguous() && bInfo.isContiguous() && cInfo.isContiguous())) grid.x = min(at::cuda::getCurrentDeviceProperties()->multiProcessorCount * THC_APPLY_BLOCKS_PER_SM , grid.x); #endif HANDLE_A_CASE(unsigned int, aInfo.dims, bInfo.dims, cInfo.dims); } else { TensorInfo<ScalarTypeA, uint64_t> aInfo = getTensorInfo<ScalarTypeA, TensorTypeA, uint64_t>(state, a); TensorInfo<ScalarTypeB, uint64_t> bInfo = getTensorInfo<ScalarTypeB, TensorTypeB, uint64_t>(state, b); TensorInfo<ScalarTypeC, uint64_t> cInfo = getTensorInfo<ScalarTypeC, TensorTypeC, uint64_t>(state, c); rearrangeDims(&aInfo, &bInfo, &cInfo); aInfo.collapseDims(); bInfo.collapseDims(); cInfo.collapseDims(); /* Only instantiates the all 1D special case and the fallback all nD case for large (64-bit indexed) tensors to reduce compilation time. */ if (aInfo.dims == 1 && bInfo.dims == 1 && cInfo.dims == 1) { OffsetInfo<ScalarTypeA, uint64_t, 1> aOffset(aInfo); OffsetInfo<ScalarTypeB, uint64_t, 1> bOffset(bInfo); OffsetInfo<ScalarTypeC, uint64_t, 1> cOffset(cInfo); kernelPointwiseApply3<Op, ScalarTypeA, ScalarTypeB, ScalarTypeC, uint64_t, 1, 1, 1> <<<grid, block, 0, c10::cuda::getCurrentCUDAStream()>>>( aOffset, bOffset, cOffset, (uint64_t) totalElements, op); } else { #if CUDA_VERSION < 9000 grid.x = min(at::cuda::getCurrentDeviceProperties()->multiProcessorCount * THC_APPLY_BLOCKS_PER_SM , grid.x); #endif OffsetInfo<ScalarTypeA, uint64_t, -1> aOffset(aInfo); OffsetInfo<ScalarTypeB, uint64_t, -1> bOffset(bInfo); OffsetInfo<ScalarTypeC, uint64_t, -1> cOffset(cInfo); kernelPointwiseApply3<Op, ScalarTypeA, ScalarTypeB, ScalarTypeC, uint64_t, -1, -1, -1> <<<grid, block, 0, c10::cuda::getCurrentCUDAStream()>>>( aOffset, bOffset, cOffset, (uint64_t) totalElements, op); } } #undef HANDLE_CASE #undef HANDLE_C_CASE #undef HANDLE_B_CASE #undef HANDLE_A_CASE if (oldA) { // Ignore overlaps when copying back; if we use THCTensor_copy // instead, it will recursively try and invoke ourselves to make // oldA contiguous. THCTensor_copyIgnoringOverlaps<ScalarTypeA>(state, oldA, a); THCTensor_free(state, a); a = oldA; } if (oldB) { // Ignore overlaps when copying back; if we use THCTensor_copy // instead, it will recursively try and invoke ourselves to make // oldB contiguous. THCTensor_copyIgnoringOverlaps<ScalarTypeB>(state, oldB, b); THCTensor_free(state, b); b = oldB; } if (oldC) { // Ignore overlaps when copying back; if we use THCTensor_copy // instead, it will recursively try and invoke ourselves to make // oldC contiguous. THCTensor_copyIgnoringOverlaps<ScalarTypeC>(state, oldC, c); THCTensor_free(state, c); c = oldC; } return true; } #undef THC_APPLY_THREADS_PER_BLOCK #undef THC_APPLY_BLOCKS_PER_SM #endif // THC_APPLY_INC
the_stack
#define LIMIT -999 #include <stdio.h> #include <string.h> #include <stdlib.h> #include <sys/time.h> #include <cuda.h> #include "reference.cpp" // kernel #define SCORE(i, j) input_itemsets_l[j + i * (BLOCK_SIZE+1)] #define REF(i, j) reference_l[j + i * BLOCK_SIZE] __device__ __host__ int maximum( int a, int b, int c){ int k; if( a <= b ) k = b; else k = a; if( k <=c ) return(c); else return(k); } //global variables int blosum62[24][24] = { { 4, -1, -2, -2, 0, -1, -1, 0, -2, -1, -1, -1, -1, -2, -1, 1, 0, -3, -2, 0, -2, -1, 0, -4}, {-1, 5, 0, -2, -3, 1, 0, -2, 0, -3, -2, 2, -1, -3, -2, -1, -1, -3, -2, -3, -1, 0, -1, -4}, {-2, 0, 6, 1, -3, 0, 0, 0, 1, -3, -3, 0, -2, -3, -2, 1, 0, -4, -2, -3, 3, 0, -1, -4}, {-2, -2, 1, 6, -3, 0, 2, -1, -1, -3, -4, -1, -3, -3, -1, 0, -1, -4, -3, -3, 4, 1, -1, -4}, { 0, -3, -3, -3, 9, -3, -4, -3, -3, -1, -1, -3, -1, -2, -3, -1, -1, -2, -2, -1, -3, -3, -2, -4}, {-1, 1, 0, 0, -3, 5, 2, -2, 0, -3, -2, 1, 0, -3, -1, 0, -1, -2, -1, -2, 0, 3, -1, -4}, {-1, 0, 0, 2, -4, 2, 5, -2, 0, -3, -3, 1, -2, -3, -1, 0, -1, -3, -2, -2, 1, 4, -1, -4}, { 0, -2, 0, -1, -3, -2, -2, 6, -2, -4, -4, -2, -3, -3, -2, 0, -2, -2, -3, -3, -1, -2, -1, -4}, {-2, 0, 1, -1, -3, 0, 0, -2, 8, -3, -3, -1, -2, -1, -2, -1, -2, -2, 2, -3, 0, 0, -1, -4}, {-1, -3, -3, -3, -1, -3, -3, -4, -3, 4, 2, -3, 1, 0, -3, -2, -1, -3, -1, 3, -3, -3, -1, -4}, {-1, -2, -3, -4, -1, -2, -3, -4, -3, 2, 4, -2, 2, 0, -3, -2, -1, -2, -1, 1, -4, -3, -1, -4}, {-1, 2, 0, -1, -3, 1, 1, -2, -1, -3, -2, 5, -1, -3, -1, 0, -1, -3, -2, -2, 0, 1, -1, -4}, {-1, -1, -2, -3, -1, 0, -2, -3, -2, 1, 2, -1, 5, 0, -2, -1, -1, -1, -1, 1, -3, -1, -1, -4}, {-2, -3, -3, -3, -2, -3, -3, -3, -1, 0, 0, -3, 0, 6, -4, -2, -2, 1, 3, -1, -3, -3, -1, -4}, {-1, -2, -2, -1, -3, -1, -1, -2, -2, -3, -3, -1, -2, -4, 7, -1, -1, -4, -3, -2, -2, -1, -2, -4}, { 1, -1, 1, 0, -1, 0, 0, 0, -1, -2, -2, 0, -1, -2, -1, 4, 1, -3, -2, -2, 0, 0, 0, -4}, { 0, -1, 0, -1, -1, -1, -1, -2, -2, -1, -1, -1, -1, -2, -1, 1, 5, -2, -2, 0, -1, -1, 0, -4}, {-3, -3, -4, -4, -2, -2, -3, -2, -2, -3, -2, -3, -1, 1, -4, -3, -2, 11, 2, -3, -4, -3, -2, -4}, {-2, -2, -2, -3, -2, -1, -2, -3, 2, -1, -1, -2, -1, 3, -3, -2, -2, 2, 7, -1, -3, -2, -1, -4}, { 0, -3, -3, -3, -1, -2, -2, -3, -3, 3, 1, -2, 1, -1, -2, -2, 0, -3, -1, 4, -3, -2, -1, -4}, {-2, -1, 3, 4, -3, 0, 1, -1, 0, -3, -4, 0, -3, -3, -2, 0, -1, -4, -3, -3, 4, 1, -1, -4}, {-1, 0, 0, 1, -3, 3, 4, -2, 0, -3, -3, 1, -1, -3, -1, 0, -1, -3, -2, -2, 1, 4, -1, -4}, { 0, -1, -1, -1, -2, -1, -1, -1, -1, -1, -1, -1, -1, -1, -2, 0, 0, -2, -1, -1, -1, -1, -1, -4}, {-4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, 1} }; // local variables void usage(int argc, char **argv) { fprintf(stderr, "Usage: %s <max_rows/max_cols> <penalty> \n", argv[0]); fprintf(stderr, "\t<dimension> - x and y dimensions\n"); fprintf(stderr, "\t<penalty> - penalty(positive integer)\n"); fprintf(stderr, "\t<file> - filename\n"); exit(1); } double get_time() { struct timeval t; gettimeofday(&t,NULL); return t.tv_sec+t.tv_usec*1e-6; } __global__ void kernel1 (int*__restrict d_input_itemsets, const int*__restrict d_reference, const int offset_r, const int offset_c, const int max_cols, const int blk, const int penalty) { __shared__ int input_itemsets_l [(BLOCK_SIZE + 1) *(BLOCK_SIZE+1)]; __shared__ int reference_l [BLOCK_SIZE*BLOCK_SIZE]; int bx = blockIdx.x; int tx = threadIdx.x; // Base elements int base = offset_r * max_cols + offset_c; int b_index_x = bx; int b_index_y = blk - 1 - bx; int index = base + max_cols * BLOCK_SIZE * b_index_y + BLOCK_SIZE * b_index_x + tx + ( max_cols + 1 ); int index_n = base + max_cols * BLOCK_SIZE * b_index_y + BLOCK_SIZE * b_index_x + tx + ( 1 ); int index_w = base + max_cols * BLOCK_SIZE * b_index_y + BLOCK_SIZE * b_index_x + ( max_cols ); int index_nw = base + max_cols * BLOCK_SIZE * b_index_y + BLOCK_SIZE * b_index_x; if (tx == 0) SCORE(tx, 0) = d_input_itemsets[index_nw + tx]; __syncthreads(); for ( int ty = 0 ; ty < BLOCK_SIZE ; ty++) { REF(ty, tx) = d_reference[index + max_cols * ty]; } __syncthreads(); SCORE((tx + 1), 0) = d_input_itemsets[index_w + max_cols * tx]; __syncthreads(); SCORE(0, (tx + 1)) = d_input_itemsets[index_n]; __syncthreads(); for( int m = 0 ; m < BLOCK_SIZE ; m++){ if ( tx <= m ){ int t_index_x = tx + 1; int t_index_y = m - tx + 1; SCORE(t_index_y, t_index_x) = maximum( SCORE((t_index_y-1), (t_index_x-1)) + REF((t_index_y-1), (t_index_x-1)), SCORE((t_index_y), (t_index_x-1)) - (penalty), SCORE((t_index_y-1), (t_index_x)) - (penalty)); } __syncthreads(); } __syncthreads(); for( int m = BLOCK_SIZE - 2 ; m >=0 ; m--){ if ( tx <= m){ int t_index_x = tx + BLOCK_SIZE - m ; int t_index_y = BLOCK_SIZE - tx; SCORE(t_index_y, t_index_x) = maximum( SCORE((t_index_y-1), (t_index_x-1)) + REF((t_index_y-1), (t_index_x-1)), SCORE((t_index_y), (t_index_x-1)) - (penalty), SCORE((t_index_y-1), (t_index_x)) - (penalty)); } __syncthreads(); } for ( int ty = 0 ; ty < BLOCK_SIZE ; ty++) { d_input_itemsets[index + max_cols * ty] = SCORE((ty+1), (tx+1)); } } __global__ void kernel2 (int*__restrict d_input_itemsets, const int*__restrict d_reference, const int block_width, const int offset_r, const int offset_c, const int max_cols, const int blk, const int penalty) { __shared__ int input_itemsets_l [(BLOCK_SIZE + 1) *(BLOCK_SIZE+1)]; __shared__ int reference_l [BLOCK_SIZE*BLOCK_SIZE]; int bx = blockIdx.x; int tx = threadIdx.x; // Base elements int base = offset_r * max_cols + offset_c; int b_index_x = bx + block_width - blk ; int b_index_y = block_width - bx -1; int index = base + max_cols * BLOCK_SIZE * b_index_y + BLOCK_SIZE * b_index_x + tx + ( max_cols + 1 ); int index_n = base + max_cols * BLOCK_SIZE * b_index_y + BLOCK_SIZE * b_index_x + tx + ( 1 ); int index_w = base + max_cols * BLOCK_SIZE * b_index_y + BLOCK_SIZE * b_index_x + ( max_cols ); int index_nw = base + max_cols * BLOCK_SIZE * b_index_y + BLOCK_SIZE * b_index_x; if (tx == 0) SCORE(tx, 0) = d_input_itemsets[index_nw]; for ( int ty = 0 ; ty < BLOCK_SIZE ; ty++) REF(ty, tx) = d_reference[index + max_cols * ty]; __syncthreads(); SCORE((tx + 1), 0) = d_input_itemsets[index_w + max_cols * tx]; __syncthreads(); SCORE(0, (tx + 1)) = d_input_itemsets[index_n]; __syncthreads(); for( int m = 0 ; m < BLOCK_SIZE ; m++){ if ( tx <= m ){ int t_index_x = tx + 1; int t_index_y = m - tx + 1; SCORE(t_index_y, t_index_x) = maximum( SCORE((t_index_y-1), (t_index_x-1)) + REF((t_index_y-1), (t_index_x-1)), SCORE((t_index_y), (t_index_x-1)) - (penalty), SCORE((t_index_y-1), (t_index_x)) - (penalty)); } __syncthreads(); } for( int m = BLOCK_SIZE - 2 ; m >=0 ; m--){ if ( tx <= m){ int t_index_x = tx + BLOCK_SIZE - m ; int t_index_y = BLOCK_SIZE - tx; SCORE(t_index_y, t_index_x) = maximum( SCORE((t_index_y-1), (t_index_x-1)) + REF((t_index_y-1), (t_index_x-1)), SCORE((t_index_y), (t_index_x-1)) - (penalty), SCORE((t_index_y-1), (t_index_x)) - (penalty)); } __syncthreads(); } for ( int ty = 0 ; ty < BLOCK_SIZE ; ty++) d_input_itemsets[index + ty * max_cols] = SCORE((ty+1), (tx+1)); } int main(int argc, char **argv){ printf("WG size of kernel = %d \n", BLOCK_SIZE); int max_rows_t, max_cols_t, penalty_t; // the lengths of the two sequences should be able to divided by 16. // And at current stage max_rows needs to equal max_cols if (argc == 3) { max_rows_t = atoi(argv[1]); max_cols_t = atoi(argv[1]); penalty_t = atoi(argv[2]); } else{ usage(argc, argv); } if(atoi(argv[1])%16!=0){ fprintf(stderr,"The dimension values must be a multiple of 16\n"); exit(1); } // make constant variable to avoid kernel argument set at every loop iteration const int max_rows = max_rows_t + 1; const int max_cols = max_cols_t + 1; const int penalty = penalty_t; int *reference; int *input_itemsets; int *output_itemsets; reference = (int *)malloc( max_rows * max_cols * sizeof(int) ); input_itemsets = (int *)malloc( max_rows * max_cols * sizeof(int) ); output_itemsets = (int *)malloc( max_rows * max_cols * sizeof(int) ); srand(7); //initialization for (int i = 0 ; i < max_cols; i++){ for (int j = 0 ; j < max_rows; j++){ input_itemsets[i*max_cols+j] = 0; } } for( int i=1; i< max_rows ; i++){ //initialize the cols input_itemsets[i*max_cols] = rand() % 10 + 1; } for( int j=1; j< max_cols ; j++){ //initialize the rows input_itemsets[j] = rand() % 10 + 1; } for (int i = 1 ; i < max_cols; i++){ for (int j = 1 ; j < max_rows; j++){ reference[i*max_cols+j] = blosum62[input_itemsets[i*max_cols]][input_itemsets[j]]; } } for( int i = 1; i< max_rows ; i++) input_itemsets[i*max_cols] = -i * penalty; for( int j = 1; j< max_cols ; j++) input_itemsets[j] = -j * penalty; double offload_start = get_time(); int workgroupsize = BLOCK_SIZE; #ifdef DEBUG if(workgroupsize < 0){ printf("ERROR: invalid or missing <num_work_items>[/<work_group_size>]\n"); return -1; } #endif // set global and local workitems const size_t local_work = (size_t)workgroupsize; size_t global_work; const int worksize = max_cols - 1; #ifdef DEBUG printf("worksize = %d\n", worksize); #endif //these two parameters are for extension use, don't worry about it. const int offset_r = 0; const int offset_c = 0; const int block_width = worksize/BLOCK_SIZE ; int *d_input_itemsets; int *d_reference; cudaMalloc((void**)&d_input_itemsets, max_cols * max_rows * sizeof(int)); cudaMalloc((void**)&d_reference, max_cols * max_rows * sizeof(int)); cudaMemcpy(d_input_itemsets, input_itemsets, max_cols * max_rows * sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(d_reference, reference, max_cols * max_rows * sizeof(int), cudaMemcpyHostToDevice); #ifdef DEBUG printf("Processing upper-left matrix\n"); #endif for( int blk = 1 ; blk <= block_width ; blk++){ global_work = blk; kernel1<<<global_work, local_work>>>(d_input_itemsets, d_reference, offset_r, offset_c, max_cols, blk, penalty); } #ifdef DEBUG printf("Processing lower-right matrix\n"); #endif for( int blk = block_width - 1 ; blk >= 1 ; blk--){ global_work = blk; kernel2<<<global_work, local_work>>>(d_input_itemsets, d_reference, block_width, offset_r, offset_c, max_cols, blk, penalty); } cudaMemcpy(output_itemsets, d_input_itemsets, max_cols * max_rows * sizeof(int), cudaMemcpyDeviceToHost); double offload_end = get_time(); printf("Device offloading time = %lf(s)\n", offload_end - offload_start); // verify nw_host(input_itemsets, reference, max_cols, penalty); int err = memcmp(input_itemsets, output_itemsets, max_cols * max_rows * sizeof(int)); printf("%s\n", err ? "FAIL" : "PASS"); #ifdef TRACEBACK FILE *fpo = fopen("result.txt","w"); fprintf(fpo, "print traceback value:\n"); for (int i = max_rows - 2, j = max_rows - 2; i>=0, j>=0;){ int nw, n, w, traceback; if ( i == max_rows - 2 && j == max_rows - 2 ) fprintf(fpo, "%d ", output_itemsets[ i * max_cols + j]); //print the first element if ( i == 0 && j == 0 ) break; if ( i > 0 && j > 0 ){ nw = output_itemsets[(i - 1) * max_cols + j - 1]; w = output_itemsets[ i * max_cols + j - 1 ]; n = output_itemsets[(i - 1) * max_cols + j]; } else if ( i == 0 ){ nw = n = LIMIT; w = output_itemsets[ i * max_cols + j - 1 ]; } else if ( j == 0 ){ nw = w = LIMIT; n = output_itemsets[(i - 1) * max_cols + j]; } else{ } int new_nw, new_w, new_n; new_nw = nw + reference[i * max_cols + j]; new_w = w - penalty; new_n = n - penalty; traceback = maximum(new_nw, new_w, new_n); if(traceback == new_nw) traceback = nw; if(traceback == new_w) traceback = w; if(traceback == new_n) traceback = n; fprintf(fpo, "%d ", traceback); if(traceback == nw ) {i--; j--; continue;} else if(traceback == w ) {j--; continue;} else if(traceback == n ) {i--; continue;} else ; } fclose(fpo); #endif //printf("Computation Done\n"); free(reference); free(input_itemsets); free(output_itemsets); cudaFree(d_input_itemsets); cudaFree(d_reference); return 0; }
the_stack
namespace flowfilter { namespace gpu { __global__ void flowPropagateX_k(cudaTextureObject_t inputFlow, gpuimage_t<float2> flowPropagated, const float dt, const int border) { const int height = flowPropagated.height; const int width = flowPropagated.width; // pixel coordinate const int2 pix = make_int2(blockIdx.x*blockDim.x + threadIdx.x, blockIdx.y*blockDim.y + threadIdx.y); if(pix.x >= width || pix.y >= height) { return; } // flow values around pixel in X direction const float2 flow_m = tex2D<float2>(inputFlow, pix.x -1, pix.y); const float2 flow_0 = tex2D<float2>(inputFlow, pix.x, pix.y); const float2 flow_p = tex2D<float2>(inputFlow, pix.x +1, pix.y); // central difference of U_abs float Uabs_central = abs(flow_p.x) - abs(flow_m.x); // dominant velocity float Ud = Uabs_central > 0.0f? flow_p.x : flow_m.x; // forward and backward differences of U in X float ux_p = flow_p.x - flow_0.x; float ux_m = flow_0.x - flow_m.x; // forward and backward differences of V in X float vx_p = flow_p.y - flow_0.y; float vx_m = flow_0.y - flow_m.y; // propagation in X float2 flowPropU = flow_0; flowPropU.x -= dt*Ud* (Ud >= 0.0f? ux_m : ux_p); flowPropU.y -= dt*Ud* (Ud >= 0.0f? vx_m : vx_p); //################################# // BORDER REMOVAL //################################# const unsigned int inRange = (pix.x >= border && pix.x < width - border) && (pix.y >= border && pix.y < height - border); // if the pixel coordinate lies on the image border, // take the original value of flow (flow_0) as the propagated flow flowPropU.x = inRange? flowPropU.x : flow_0.x; flowPropU.y = inRange? flowPropU.y : flow_0.y; //################################# // PACK RESULTS //################################# *coordPitch(flowPropagated, pix) = flowPropU; } __global__ void flowPropagateY_k(cudaTextureObject_t inputFlow, gpuimage_t<float2> flowPropagated, const float dt, const int border) { const int height = flowPropagated.height; const int width = flowPropagated.width; // pixel coordinate const int2 pix = make_int2(blockIdx.x*blockDim.x + threadIdx.x, blockIdx.y*blockDim.y + threadIdx.y); if(pix.x >= width || pix.y >= height) { return; } // flow values around pixel in Y direction const float2 flow_m = tex2D<float2>(inputFlow, pix.x, pix.y -1); const float2 flow_0 = tex2D<float2>(inputFlow, pix.x, pix.y); const float2 flow_p = tex2D<float2>(inputFlow, pix.x, pix.y +1); // central difference of V_abs float Vabs_central = abs(flow_p.y) - abs(flow_m.y); // dominant velocity float Vd = Vabs_central > 0.0f? flow_p.y : flow_m.y; // forward and backward differences of U in Y float uy_p = flow_p.x - flow_0.x; float uy_m = flow_0.x - flow_m.x; // forward and backward differences of V in Y float vy_p = flow_p.y - flow_0.y; float vy_m = flow_0.y - flow_m.y; // propagation in Y float2 flowPropV = flow_0; flowPropV.x -= dt*Vd* (Vd >= 0.0f? uy_m : uy_p); flowPropV.y -= dt*Vd* (Vd >= 0.0f? vy_m : vy_p); //################################# // BORDER REMOVAL //################################# const unsigned int inRange = (pix.x >= border && pix.x < width - border) && (pix.y >= border && pix.y < height - border); // if the pixel coordinate lies on the image border, // take the original value of flow (flow_0) as the propagated flow flowPropV.x = inRange? flowPropV.x : flow_0.x; flowPropV.y = inRange? flowPropV.y : flow_0.y; //################################# // PACK THE RESULTS //################################# *coordPitch(flowPropagated, pix) = flowPropV; } __global__ void flowPropagatePayloadX_k(cudaTextureObject_t inputFlow, gpuimage_t<float2> flowPropagated, cudaTextureObject_t scalarPayload, gpuimage_t<float> scalarPropagated, cudaTextureObject_t vectorPayload, gpuimage_t<float2> vectorPropagated, const float dt, const int border) { const int height = flowPropagated.height; const int width = flowPropagated.width; // pixel coordinate const int2 pix = make_int2(blockIdx.x*blockDim.x + threadIdx.x, blockIdx.y*blockDim.y + threadIdx.y); if(pix.x >= width || pix.y >= height) { return; } // flow values around pixel in X direction const float2 flow_m = tex2D<float2>(inputFlow, pix.x -1, pix.y); const float2 flow_0 = tex2D<float2>(inputFlow, pix.x, pix.y); const float2 flow_p = tex2D<float2>(inputFlow, pix.x +1, pix.y); // central difference of U_abs const float Uabs_central = abs(flow_p.x) - abs(flow_m.x); // dominant velocity const float Ud = Uabs_central > 0.0f? flow_p.x : flow_m.x; // forward and backward differences of U in X const float ux_p = flow_p.x - flow_0.x; const float ux_m = flow_0.x - flow_m.x; // forward and backward differences of V in X const float vx_p = flow_p.y - flow_0.y; const float vx_m = flow_0.y - flow_m.y; // propagation in X float2 flowPropU = flow_0; flowPropU.x -= dt*Ud* (Ud >= 0.0f? ux_m : ux_p); flowPropU.y -= dt*Ud* (Ud >= 0.0f? vx_m : vx_p); //################################# // SCALAR PAYLOAD PROPAGATION //################################# const float load1_m = tex2D<float>(scalarPayload, pix.x -1, pix.y); const float load1_0 = tex2D<float>(scalarPayload, pix.x, pix.y); const float load1_p = tex2D<float>(scalarPayload, pix.x +1, pix.y); // forward and backward differences const float lx1_p = load1_p - load1_0; const float lx1_m = load1_0 - load1_m; float loadProp1 = load1_0; loadProp1 -= dt*Ud* (Ud >= 0.0f? lx1_m : lx1_p); //################################# // VECTOR PAYLOAD PROPAGATION //################################# const float2 load2_m = tex2D<float2>(vectorPayload, pix.x -1, pix.y); const float2 load2_0 = tex2D<float2>(vectorPayload, pix.x, pix.y); const float2 load2_p = tex2D<float2>(vectorPayload, pix.x +1, pix.y); // forward and backward differences const float2 lx2_p = make_float2(load2_p.x - load2_0.x, load2_p.y - load2_0.y); const float2 lx2_m = make_float2(load2_0.x - load2_m.x, load2_0.y - load2_m.y); float2 loadProp2 = load2_0; loadProp2.x -= dt*Ud* (Ud >= 0.0f? lx2_m.x : lx2_p.x); loadProp2.y -= dt*Ud* (Ud >= 0.0f? lx2_m.y : lx2_p.y); //################################# // BORDER REMOVAL //################################# const unsigned int inRange = (pix.x >= border && pix.x < width - border) && (pix.y >= border && pix.y < height - border); // if the pixel coordinate lies on the image border, // take the original value of flow (flow_0) as the propagated flow flowPropU.x = inRange? flowPropU.x : flow_0.x; flowPropU.y = inRange? flowPropU.y : flow_0.y; loadProp1 = inRange? loadProp1 : load1_0; loadProp2.x = inRange? loadProp2.x : load2_0.x; loadProp2.y = inRange? loadProp2.y : load2_0.y; //################################# // PACK RESULTS //################################# *coordPitch(flowPropagated, pix) = flowPropU; *coordPitch(scalarPropagated, pix) = loadProp1; *coordPitch(vectorPropagated, pix) = loadProp2; } __global__ void flowPropagatePayloadY_k(cudaTextureObject_t inputFlow, gpuimage_t<float2> flowPropagated, cudaTextureObject_t scalarPayload, gpuimage_t<float> scalarPropagated, cudaTextureObject_t vectorPayload, gpuimage_t<float2> vectorPropagated, const float dt, const int border) { const int height = flowPropagated.height; const int width = flowPropagated.width; // pixel coordinate const int2 pix = make_int2(blockIdx.x*blockDim.x + threadIdx.x, blockIdx.y*blockDim.y + threadIdx.y); if(pix.x >= width || pix.y >= height) { return; } // flow values around pixel in Y direction const float2 flow_m = tex2D<float2>(inputFlow, pix.x, pix.y -1); const float2 flow_0 = tex2D<float2>(inputFlow, pix.x, pix.y); const float2 flow_p = tex2D<float2>(inputFlow, pix.x, pix.y +1); // central difference of V_abs const float Vabs_central = abs(flow_p.y) - abs(flow_m.y); // dominant velocity const float Vd = Vabs_central > 0.0f? flow_p.y : flow_m.y; // forward and backward differences of U in Y const float uy_p = flow_p.x - flow_0.x; const float uy_m = flow_0.x - flow_m.x; // forward and backward differences of V in Y const float vy_p = flow_p.y - flow_0.y; const float vy_m = flow_0.y - flow_m.y; // propagation in Y float2 flowPropV = flow_0; flowPropV.x -= dt*Vd* (Vd >= 0.0f? uy_m : uy_p); flowPropV.y -= dt*Vd* (Vd >= 0.0f? vy_m : vy_p); //################################# // FLOAT1 PAYLOAD PROPAGATION //################################# const float load1_m = tex2D<float>(scalarPayload, pix.x, pix.y -1); const float load1_0 = tex2D<float>(scalarPayload, pix.x, pix.y); const float load1_p = tex2D<float>(scalarPayload, pix.x, pix.y +1); // forward and backward differences const float ly1_p = load1_p - load1_0; const float ly1_m = load1_0 - load1_m; float loadProp1 = load1_0; loadProp1 -= dt*Vd* (Vd >= 0.0f? ly1_m : ly1_p); //################################# // FLOAT2 PAYLOAD PROPAGATION //################################# const float2 load2_m = tex2D<float2>(vectorPayload, pix.x, pix.y -1); const float2 load2_0 = tex2D<float2>(vectorPayload, pix.x, pix.y); const float2 load2_p = tex2D<float2>(vectorPayload, pix.x, pix.y +1); // forward and backward differences const float2 ly2_p = make_float2(load2_p.x - load2_0.x, load2_p.y - load2_0.y); const float2 ly2_m = make_float2(load2_0.x - load2_m.x, load2_0.y - load2_m.y); float2 loadProp2 = load2_0; loadProp2.x -= dt*Vd* (Vd >= 0.0f? ly2_m.x : ly2_p.x); loadProp2.y -= dt*Vd* (Vd >= 0.0f? ly2_m.y : ly2_p.y); //################################# // BORDER REMOVAL //################################# const unsigned int inRange = (pix.x >= border && pix.x < width - border) && (pix.y >= border && pix.y < height - border); // if the pixel coordinate lies on the image border, // take the original value of flow (flow_0) as the propagated flow flowPropV.x = inRange? flowPropV.x : flow_0.x; flowPropV.y = inRange? flowPropV.y : flow_0.y; loadProp1 = inRange? loadProp1 : load1_0; loadProp2.x = inRange? loadProp2.x : load2_0.x; loadProp2.y = inRange? loadProp2.y : load2_0.y; //################################# // PACK RESULTS //################################# *coordPitch(flowPropagated, pix) = flowPropV; *coordPitch(scalarPropagated, pix) = loadProp1; *coordPitch(vectorPropagated, pix) = loadProp2; } }; // namespace gpu }; // namespace flowfilter
the_stack
//========================================================================================================================================================================================================200 // DEFINE / INCLUDE //========================================================================================================================================================================================================200 //======================================================================================================================================================150 // LIBRARIES //======================================================================================================================================================150 #include <stdlib.h> #include <math.h> #include <string.h> #include "main.h" // (in the main program folder) needed to recognized input parameters #include "timer.h" #include "file.h" //======================================================================================================================================================150 // MAIN FUNCTION HEADER //======================================================================================================================================================150 //======================================================================================================================================================150 // UTILITIES //======================================================================================================================================================150 #include "./util/avi/avilib.h" // (in directory) needed by avi functions #include "./util/avi/avimod.h" // (in directory) needed by avi functions //======================================================================================================================================================150 // KERNEL //======================================================================================================================================================150 void kernel_gpu_wrapper( params_common common, int* endoRow, int* endoCol, int* tEndoRowLoc, int* tEndoColLoc, int* epiRow, int* epiCol, int* tEpiRowLoc, int* tEpiColLoc, avi_t* frames); //======================================================================================================================================================150 // END //======================================================================================================================================================150 //========================================================================================================================================================================================================200 // MAIN FUNCTION //========================================================================================================================================================================================================200 int main( int argc, char* argv []){ printf("WG size of kernel = %d \n", NUMBER_THREADS); //======================================================================================================================================================150 // VARIABLES //======================================================================================================================================================150 // time long long time0; long long time1; long long time2; long long time3; long long time4; long long time5; avi_t* frames; time0 = get_time(); //======================================================================================================================================================150 // STRUCTURES, GLOBAL STRUCTURE VARIABLES //======================================================================================================================================================150 params_common common; common.common_mem = sizeof(params_common); //======================================================================================================================================================150 // FRAME INFO //======================================================================================================================================================150 // variables char* video_file_name; // open movie file video_file_name = (char *) "../data/heartwall/test.avi"; frames = (avi_t*)AVI_open_input_file(video_file_name, 1); // added casting if (frames == NULL) { AVI_print_error((char *) "Error with AVI_open_input_file"); return -1; } // dimensions common.no_frames = AVI_video_frames(frames); common.frame_rows = AVI_video_height(frames); common.frame_cols = AVI_video_width(frames); common.frame_elem = common.frame_rows * common.frame_cols; common.frame_mem = sizeof(fp) * common.frame_elem; time1 = get_time(); //======================================================================================================================================================150 // CHECK INPUT ARGUMENTS //======================================================================================================================================================150 if(argc!=2){ printf("ERROR: missing argument (number of frames to processed) or too many arguments\n"); return 0; } else{ common.frames_processed = atoi(argv[1]); if(common.frames_processed<0 || common.frames_processed>common.no_frames){ printf("ERROR: %d is an incorrect number of frames specified, select in the range of 0-%d\n", common.frames_processed, common.no_frames); return 0; } } time2 = get_time(); //======================================================================================================================================================150 // INPUTS //======================================================================================================================================================150 //====================================================================================================100 // READ PARAMETERS FROM FILE //====================================================================================================100 char* param_file_name = (char *) "../data/heartwall/input.txt"; read_parameters( param_file_name, &common.tSize, &common.sSize, &common.maxMove, &common.alpha); //====================================================================================================100 // READ SIZE OF INPUTS FROM FILE //====================================================================================================100 read_header(param_file_name, &common.endoPoints, &common.epiPoints); common.allPoints = common.endoPoints + common.epiPoints; //====================================================================================================100 // READ DATA FROM FILE //====================================================================================================100 //==================================================50 // ENDO POINTS MEMORY ALLOCATION //==================================================50 common.endo_mem = sizeof(int) * common.endoPoints; int* endoRow; endoRow = (int*)malloc(common.endo_mem); int* endoCol; endoCol = (int*)malloc(common.endo_mem); int* tEndoRowLoc; tEndoRowLoc = (int*)malloc(common.endo_mem * common.no_frames); int* tEndoColLoc; tEndoColLoc = (int*)malloc(common.endo_mem * common.no_frames); //==================================================50 // EPI POINTS MEMORY ALLOCATION //==================================================50 common.epi_mem = sizeof(int) * common.epiPoints; int* epiRow; epiRow = (int *)malloc(common.epi_mem); int* epiCol; epiCol = (int *)malloc(common.epi_mem); int* tEpiRowLoc; tEpiRowLoc = (int *)malloc(common.epi_mem * common.no_frames); int* tEpiColLoc; tEpiColLoc = (int *)malloc(common.epi_mem * common.no_frames); //==================================================50 // READ DATA FROM FILE //==================================================50 read_data(param_file_name, common.endoPoints, endoRow, endoCol, common.epiPoints, epiRow, epiCol); //==================================================50 // End //==================================================50 //====================================================================================================100 // End //====================================================================================================100 time3 = get_time(); //======================================================================================================================================================150 // KERNELL WRAPPER CALL //======================================================================================================================================================150 kernel_gpu_wrapper( common, endoRow, endoCol, tEndoRowLoc, tEndoColLoc, epiRow, epiCol, tEpiRowLoc, tEpiColLoc, frames); time4 = get_time(); //==================================================50 // DUMP DATA TO FILE //==================================================50 #ifdef OUTPUT write_data( "result.txt", common.no_frames, common.frames_processed, common.endoPoints, tEndoRowLoc, tEndoColLoc, common.epiPoints, tEpiRowLoc, tEpiColLoc); #endif //==================================================50 // End //==================================================50 //======================================================================================================================================================150 // DEALLOCATION //======================================================================================================================================================150 //====================================================================================================100 // endo points //====================================================================================================100 free(endoRow); free(endoCol); free(tEndoRowLoc); free(tEndoColLoc); //====================================================================================================100 // epi points //====================================================================================================100 free(epiRow); free(epiCol); free(tEpiRowLoc); free(tEpiColLoc); //====================================================================================================100 // End //====================================================================================================100 time5= get_time(); //======================================================================================================================================================150 // DISPLAY TIMING //======================================================================================================================================================150 printf("Time spent in different stages of the application:\n"); printf("%15.12f s, %15.12f : READ INITIAL VIDEO FRAME\n", (fp) (time1-time0) / 1000000, (fp) (time1-time0) / (fp) (time5-time0) * 100); printf("%15.12f s, %15.12f : READ COMMAND LINE PARAMETERS\n", (fp) (time2-time1) / 1000000, (fp) (time2-time1) / (fp) (time5-time0) * 100); printf("%15.12f s, %15.12f : READ INPUTS FROM FILE\n", (fp) (time3-time2) / 1000000, (fp) (time3-time2) / (fp) (time5-time0) * 100); printf("%15.12f s, %15.12f : GPU ALLOCATION, COPYING, COMPUTATION\n", (fp) (time4-time3) / 1000000, (fp) (time4-time3) / (fp) (time5-time0) * 100); printf("%15.12f s, %15.12f : FREE MEMORY\n", (fp) (time5-time4) / 1000000, (fp) (time5-time4) / (fp) (time5-time0) * 100); printf("Total time:\n"); printf("%15.12f s\n", (fp) (time5-time0) / 1000000); //======================================================================================================================================================150 // End //======================================================================================================================================================150 //========================================================================================================================================================================================================200 // End //========================================================================================================================================================================================================200 }
the_stack
#include <device_launch_parameters.h> namespace surfelwarp { namespace device { struct FusionAndMarkAppendedObservationSurfelDevice { // Some constants defined as enum enum { scale_factor = d_fusion_map_scale, fuse_window_halfsize = scale_factor >> 1, count_model_halfsize = 2 * scale_factor /*>> 1 */, append_window_halfsize = scale_factor, search_window_halfsize = scale_factor, }; //The observation struct { cudaTextureObject_t vertex_time_map; cudaTextureObject_t normal_radius_map; cudaTextureObject_t color_time_map; } observation_maps; //The rendered maps struct { cudaTextureObject_t vertex_map; cudaTextureObject_t normal_map; cudaTextureObject_t color_time_map; cudaTextureObject_t index_map; } render_maps; //The written array struct { float4* vertex_confidence; float4* normal_radius; float4* color_time; unsigned* fused_indicator; } geometry_arrays; //The shared datas unsigned short image_rows, image_cols; float current_time; __host__ __device__ __forceinline__ bool checkViewDirection( const float4& depth_vertex, const float4& depth_normal ) const { const float3 view_direction = - normalized(make_float3(depth_vertex.x, depth_vertex.y, depth_vertex.z)); const float3 normal = normalized(make_float3(depth_normal.x, depth_normal.y, depth_normal.z)); return dot(view_direction, normal) > 0.4f; } //The actual processing interface __device__ __forceinline__ void processIndicator(const mat34& world2camera, unsigned* appending_indicator) const { const int x = threadIdx.x + blockDim.x * blockIdx.x; const int y = threadIdx.y + blockDim.y * blockIdx.y; const auto offset = y * image_cols + x; if(x < search_window_halfsize || x >= image_cols - search_window_halfsize || y < search_window_halfsize || y >= image_rows - search_window_halfsize) { //Write to indicator before exit appending_indicator[offset] = 0; return; } //Load the data const float4 depth_vertex_confid = tex2D<float4>(observation_maps.vertex_time_map, x, y); const float4 depth_normal_radius = tex2D<float4>(observation_maps.normal_radius_map, x, y); const float4 image_color_time = tex2D<float4>(observation_maps.color_time_map, x, y); if(is_zero_vertex(depth_vertex_confid)) return; //The windows search state const int map_x_center = scale_factor * x; const int map_y_center = scale_factor * y; //The window search iteration variables SurfelFusionWindowState fusion_state; unsigned model_count = 0; SurfelAppendingWindowState append_state; //The row search loop for(int dy = - search_window_halfsize; dy < search_window_halfsize; dy++) { for(int dx = - search_window_halfsize; dx < search_window_halfsize; dx++) { //The actual position of in the rendered map const int map_y = dy + map_y_center; const int map_x = dx + map_x_center; const auto index = tex2D<unsigned>(render_maps.index_map, map_x, map_y); if(index != 0xFFFFFFFF) { //Load the model vertex const float4 model_world_v4 = tex2D<float4>(render_maps.vertex_map, map_x, map_y); const float4 model_world_n4 = tex2D<float4>(render_maps.normal_map, map_x, map_y); //Transform it const float3 model_camera_v3 = world2camera.rot * model_world_v4 + world2camera.trans; const float3 model_camera_n3 = world2camera.rot * model_world_n4; //Some attributes commonly used for checking const float dot_value = dotxyz(model_camera_n3, depth_normal_radius); const float diff_z = fabsf(model_camera_v3.z - depth_vertex_confid.z); const float confidence = model_world_v4.w; const float z_dist = model_camera_v3.z; const float dist_square = squared_distance(model_camera_v3, depth_vertex_confid); //First check for fusion if(dx >= -fuse_window_halfsize && dy >= -fuse_window_halfsize && dx < fuse_window_halfsize && dy < fuse_window_halfsize) { if(dot_value >= 0.8f && diff_z <= 3 * 0.001f) { // Update it fusion_state.Update(confidence, z_dist, map_x, map_y); } } //Next check for count the model if(dx >= -count_model_halfsize && dy >= -count_model_halfsize && dx < count_model_halfsize && dy < count_model_halfsize) { if(dot_value > 0.3f) model_count++; } //Finally for appending { //if(dot_value >= 0.8f && diff_z <= 3 * 0.001f) { // Update it // append_state.Update(confidence, z_dist); //} if(dot_value >= 0.8f && dist_square <= (2 * 0.001f) * (2 * 0.001f)) { // Update it append_state.Update(confidence, z_dist); } } } // There is a surfel here } // x iteration loop } // y iteration loop //For appending, as in reinit should mark all depth surfels unsigned pixel_indicator = 0; if(append_state.best_confid < -0.01 && model_count == 0 && checkViewDirection(depth_vertex_confid, depth_normal_radius) ) { pixel_indicator = 1; } appending_indicator[offset] = pixel_indicator; //For fusion if(fusion_state.best_confid > 0) { float4 model_vertex_confid = tex2D<float4>(render_maps.vertex_map, fusion_state.best_map_x, fusion_state.best_map_y); float4 model_normal_radius = tex2D<float4>(render_maps.normal_map, fusion_state.best_map_x, fusion_state.best_map_y); float4 model_color_time = tex2D<float4>(render_maps.color_time_map, fusion_state.best_map_x, fusion_state.best_map_y); const unsigned index = tex2D<unsigned>(render_maps.index_map, fusion_state.best_map_x, fusion_state.best_map_y); fuse_surfel( depth_vertex_confid, depth_normal_radius, image_color_time, world2camera, current_time, model_vertex_confid, model_normal_radius, model_color_time ); //Write it geometry_arrays.vertex_confidence[index] = model_vertex_confid; geometry_arrays.normal_radius[index] = model_normal_radius; geometry_arrays.color_time[index] = model_color_time; geometry_arrays.fused_indicator[index] = 1; } } __device__ __forceinline__ void processAtomic(const mat34& world2camera, unsigned* appending_offset, ushort2* appended_pixels) const { const int x = threadIdx.x + blockDim.x * blockIdx.x; const int y = threadIdx.y + blockDim.y * blockIdx.y; if(x < search_window_halfsize || x >= image_cols - search_window_halfsize || y < search_window_halfsize || y >= image_rows - search_window_halfsize) return; //Load the data const float4 depth_vertex_confid = tex2D<float4>(observation_maps.vertex_time_map, x, y); const float4 depth_normal_radius = tex2D<float4>(observation_maps.normal_radius_map, x, y); const float4 image_color_time = tex2D<float4>(observation_maps.color_time_map, x, y); if(is_zero_vertex(depth_vertex_confid)) return; //The windows search state const int map_x_center = scale_factor * x; const int map_y_center = scale_factor * y; //The window search iteration variables unsigned model_count = 0; SurfelFusionWindowState fusion_state; SurfelAppendingWindowState append_state; //The row search loop for(int dy = - search_window_halfsize; dy < search_window_halfsize; dy++) { for(int dx = - search_window_halfsize; dx < search_window_halfsize; dx++) { //The actual position of in the rendered map const int map_y = dy + map_y_center; const int map_x = dx + map_x_center; const auto index = tex2D<unsigned>(render_maps.index_map, map_x, map_y); if(index != 0xFFFFFFFF) { //Load the model vertex const float4 model_world_v4 = tex2D<float4>(render_maps.vertex_map, map_x, map_y); const float4 model_world_n4 = tex2D<float4>(render_maps.normal_map, map_x, map_y); //Transform it const float3 model_camera_v3 = world2camera.rot * model_world_v4 + world2camera.trans; const float3 model_camera_n3 = world2camera.rot * model_world_n4; //Some attributes commonly used for checking const float dot_value = dotxyz(model_camera_n3, depth_normal_radius); const float diff_z = fabsf(model_camera_v3.z - depth_vertex_confid.z); const float confidence = model_world_v4.w; const float z_dist = model_camera_v3.z; const float dist_square = squared_distance(model_camera_v3, depth_vertex_confid); //First check for fusion if(dx >= -fuse_window_halfsize && dy >= -fuse_window_halfsize && dx < fuse_window_halfsize && dy < fuse_window_halfsize) { if(dot_value >= 0.8f && diff_z <= 3 * 0.001f) { // Update it fusion_state.Update(confidence, z_dist, map_x, map_y); } } //Next check for count the model if(dx >= -count_model_halfsize && dy >= -count_model_halfsize && dx < count_model_halfsize && dy < count_model_halfsize) { model_count++; } //Finally for appending { if(dot_value >= 0.8f && dist_square <= (3 * 0.001f) * (3 * 0.001f)) { // Update it append_state.Update(confidence, z_dist); } } } // There is a surfel here } // x iteration loop } // y iteration loop //For fusion if(fusion_state.best_confid > 0) { float4 model_vertex_confid = tex2D<float4>(render_maps.vertex_map, fusion_state.best_map_x, fusion_state.best_map_y); float4 model_normal_radius = tex2D<float4>(render_maps.normal_map, fusion_state.best_map_x, fusion_state.best_map_y); float4 model_color_time = tex2D<float4>(render_maps.color_time_map, fusion_state.best_map_x, fusion_state.best_map_y); const unsigned index = tex2D<unsigned>(render_maps.index_map, fusion_state.best_map_x, fusion_state.best_map_y); fuse_surfel( depth_vertex_confid, depth_normal_radius, image_color_time, world2camera, current_time, model_vertex_confid, model_normal_radius, model_color_time ); //Write it geometry_arrays.vertex_confidence[index] = model_vertex_confid; geometry_arrays.normal_radius[index] = model_normal_radius; geometry_arrays.color_time[index] = model_color_time; geometry_arrays.fused_indicator[index] = 1; } //Check the view direction, and using atomic operation for appending if(append_state.best_confid < -0.01 && model_count == 0 && checkViewDirection(depth_vertex_confid, depth_normal_radius) ) { const auto offset = atomicAdd(appending_offset, 1); appended_pixels[offset] = make_ushort2(x, y); } } //The fusion processor for re-initialize __device__ __forceinline__ void processFusionReinit(const mat34& world2camera, unsigned* appending_indicator) const { const int x = threadIdx.x + blockDim.x * blockIdx.x; const int y = threadIdx.y + blockDim.y * blockIdx.y; const auto offset = y * image_cols + x; if(x < search_window_halfsize || x >= image_cols - search_window_halfsize || y < search_window_halfsize || y >= image_rows - search_window_halfsize) { //Write to indicator before exit appending_indicator[offset] = 0; return; } //Load the data const float4 depth_vertex_confid = tex2D<float4>(observation_maps.vertex_time_map, x, y); const float4 depth_normal_radius = tex2D<float4>(observation_maps.normal_radius_map, x, y); const float4 image_color_time = tex2D<float4>(observation_maps.color_time_map, x, y); if(is_zero_vertex(depth_vertex_confid)) return; //The windows search state const int map_x_center = scale_factor * x; const int map_y_center = scale_factor * y; //The window search iteration variables SurfelFusionWindowState fusion_state; //The row search loop for(int dy = - fuse_window_halfsize; dy < fuse_window_halfsize; dy++) { for(int dx = - fuse_window_halfsize; dx < fuse_window_halfsize; dx++) { //The actual position of in the rendered map const int map_y = dy + map_y_center; const int map_x = dx + map_x_center; const auto index = tex2D<unsigned>(render_maps.index_map, map_x, map_y); if(index != 0xFFFFFFFF) { //Load the model vertex const float4 model_world_v4 = tex2D<float4>(render_maps.vertex_map, map_x, map_y); const float4 model_world_n4 = tex2D<float4>(render_maps.normal_map, map_x, map_y); //Transform it const float3 model_camera_v3 = world2camera.rot * model_world_v4 + world2camera.trans; const float3 model_camera_n3 = world2camera.rot * model_world_n4; //Some attributes commonly used for checking const float dot_value = dotxyz(model_camera_n3, depth_normal_radius); const float diff_z = fabsf(model_camera_v3.z - depth_vertex_confid.z); const float confidence = model_world_v4.w; const float z_dist = model_camera_v3.z; //const float dist_square = squared_distance(model_camera_v3, depth_vertex_confid); //First check for fusion if(dot_value >= 0.9f && diff_z <= 2 * 0.001f) { // Update it fusion_state.Update(confidence, z_dist, map_x, map_y); } } // There is a surfel here } // x iteration loop } // y iteration loop //For appending, as in reinit should mark all depth surfels unsigned pixel_indicator = 0; if(fusion_state.best_confid < -0.01) { pixel_indicator = 1; } appending_indicator[offset] = pixel_indicator; //For fusion if(fusion_state.best_confid > 0) { float4 model_vertex_confid = tex2D<float4>(render_maps.vertex_map, fusion_state.best_map_x, fusion_state.best_map_y); float4 model_normal_radius = tex2D<float4>(render_maps.normal_map, fusion_state.best_map_x, fusion_state.best_map_y); float4 model_color_time = tex2D<float4>(render_maps.color_time_map, fusion_state.best_map_x, fusion_state.best_map_y); const unsigned index = tex2D<unsigned>(render_maps.index_map, fusion_state.best_map_x, fusion_state.best_map_y); fuse_surfel_replace_color( depth_vertex_confid, depth_normal_radius, image_color_time, world2camera, current_time, model_vertex_confid, model_normal_radius, model_color_time ); //Write it geometry_arrays.vertex_confidence[index] = model_vertex_confid; geometry_arrays.normal_radius[index] = model_normal_radius; geometry_arrays.color_time[index] = model_color_time; geometry_arrays.fused_indicator[index] = 1; } } }; __global__ void fuseAndMarkAppendedObservationSurfelsKernel( const FusionAndMarkAppendedObservationSurfelDevice fuser, mat34 world2camera, unsigned* appended_pixel ) { fuser.processIndicator(world2camera, appended_pixel); } __global__ void fusionAndMarkAppendObservationAtomicKernel( const FusionAndMarkAppendedObservationSurfelDevice fuser, mat34 world2camera, unsigned* append_offset, ushort2* appended_pixel ) { fuser.processAtomic(world2camera, append_offset, appended_pixel); } __global__ void fuseAndMarkAppendedObservationSurfelReinitKernel( const FusionAndMarkAppendedObservationSurfelDevice fuser, mat34 world2camera, unsigned* appended_pixel ) { fuser.processFusionReinit(world2camera, appended_pixel); } __global__ void compactIndicatorToPixelKernel( const unsigned* candidate_pixel_indicator, const unsigned* prefixsum_indicator, unsigned img_cols, ushort2* compacted_pixels ) { const auto idx = threadIdx.x + blockIdx.x * blockDim.x; if(candidate_pixel_indicator[idx] > 0) { const auto offset = prefixsum_indicator[idx] - 1; const unsigned short x = idx % img_cols; const unsigned short y = idx / img_cols; compacted_pixels[offset] = make_ushort2(x, y); } } } // device } // surfelwarp void surfelwarp::SurfelFusionHandler::prepareFuserArguments(void *fuser_ptr) { //Recovery the fuser arguments auto& fuser = *((device::FusionAndMarkAppendedObservationSurfelDevice*)fuser_ptr); //The observation maps fuser.observation_maps.vertex_time_map = m_observation.vertex_config_map; fuser.observation_maps.normal_radius_map = m_observation.normal_radius_map; fuser.observation_maps.color_time_map = m_observation.color_time_map; //The rendered maps fuser.render_maps.vertex_map = m_fusion_maps.warp_vertex_map; fuser.render_maps.normal_map = m_fusion_maps.warp_normal_map; fuser.render_maps.index_map = m_fusion_maps.index_map; fuser.render_maps.color_time_map = m_fusion_maps.color_time_map; //The written array fuser.geometry_arrays.vertex_confidence = m_fusion_geometry.live_vertex_confid.RawPtr(); fuser.geometry_arrays.normal_radius = m_fusion_geometry.live_normal_radius.RawPtr(); fuser.geometry_arrays.color_time = m_fusion_geometry.color_time.RawPtr(); fuser.geometry_arrays.fused_indicator = m_remaining_surfel_indicator.Ptr(); //Other attributes fuser.current_time = m_current_time; fuser.image_cols = m_image_cols; fuser.image_rows = m_image_rows; } void surfelwarp::SurfelFusionHandler::processFusionAppendCompaction(cudaStream_t stream) { //Resize the array const auto num_surfels = m_fusion_geometry.live_vertex_confid.Size(); m_remaining_surfel_indicator.ResizeArrayOrException(num_surfels); //Construct the fuser device::FusionAndMarkAppendedObservationSurfelDevice fuser; prepareFuserArguments((void*)&fuser); dim3 blk(16, 16); dim3 grid(divUp(m_image_cols, blk.x), divUp(m_image_rows, blk.y)); device::fuseAndMarkAppendedObservationSurfelsKernel<<<grid, blk, 0, stream>>>( fuser, m_world2camera, m_appended_depth_surfel_indicator.ptr() ); } void surfelwarp::SurfelFusionHandler::processFusionReinit(cudaStream_t stream) { //Resize the array const auto num_surfels = m_fusion_geometry.live_vertex_confid.Size(); m_remaining_surfel_indicator.ResizeArrayOrException(num_surfels); //Construct the fuser device::FusionAndMarkAppendedObservationSurfelDevice fuser; prepareFuserArguments((void*)&fuser); dim3 blk(16, 16); dim3 grid(divUp(m_image_cols, blk.x), divUp(m_image_rows, blk.y)); device::fuseAndMarkAppendedObservationSurfelReinitKernel<<<grid, blk, 0, stream>>>( fuser, m_world2camera, m_appended_depth_surfel_indicator.ptr() ); } void surfelwarp::SurfelFusionHandler::processFusionAppendAtomic(cudaStream_t stream) { //Clear the attributes cudaSafeCall(cudaMemsetAsync(m_atomic_appended_pixel_index, 0, sizeof(unsigned), stream)); //Resize the array const auto num_surfels = m_fusion_geometry.live_vertex_confid.Size(); m_remaining_surfel_indicator.ResizeArrayOrException(num_surfels); //Construct the fuser device::FusionAndMarkAppendedObservationSurfelDevice fuser; prepareFuserArguments((void*)&fuser); dim3 blk(16, 16); dim3 grid(divUp(m_image_cols, blk.x), divUp(m_image_rows, blk.y)); device::fusionAndMarkAppendObservationAtomicKernel<<<grid, blk, 0, stream>>>( fuser, m_world2camera, m_atomic_appended_pixel_index, m_atomic_appended_observation_pixel.Ptr() ); } void surfelwarp::SurfelFusionHandler::compactAppendedIndicator(cudaStream_t stream) { m_appended_surfel_indicator_prefixsum.InclusiveSum(m_appended_depth_surfel_indicator, stream); //Invoke the kernel dim3 blk(128); dim3 grid(divUp(m_image_cols * m_image_rows, blk.x)); device::compactIndicatorToPixelKernel<<<grid, blk, 0, stream>>>( m_appended_depth_surfel_indicator.ptr(), m_appended_surfel_indicator_prefixsum.valid_prefixsum_array.ptr(), m_image_cols, m_compacted_appended_pixel.Ptr() ); //Sync and check error #if defined(CUDA_DEBUG_SYNC_CHECK) cudaSafeCall(cudaStreamSynchronize(stream)); cudaSafeCall(cudaGetLastError()); #endif //Query the size unsigned num_appended_surfel; cudaSafeCall(cudaMemcpyAsync( &num_appended_surfel, m_appended_surfel_indicator_prefixsum.valid_prefixsum_array.ptr() + m_appended_surfel_indicator_prefixsum.valid_prefixsum_array.size() - 1, sizeof(unsigned), cudaMemcpyDeviceToHost, stream )); cudaSafeCall(cudaStreamSynchronize(stream)); m_compacted_appended_pixel.ResizeArrayOrException(num_appended_surfel); } void surfelwarp::SurfelFusionHandler::queryAtomicAppendedPixelSize(cudaStream_t stream) { unsigned num_candidate_pixels; cudaSafeCall(cudaMemcpyAsync( &num_candidate_pixels, m_atomic_appended_pixel_index, sizeof(unsigned), cudaMemcpyDeviceToHost, stream) ); cudaSafeCall(cudaStreamSynchronize(stream)); m_atomic_appended_observation_pixel.ResizeArrayOrException(num_candidate_pixels); }
the_stack
* Benchmark: Sparse matrix operations, i.e. matrix-vector products (sparse.cpp and sparse.cu are identical, the latter being required for compilation using CUDA nvcc) * */ //#define VIENNACL_BUILD_INFO #ifndef NDEBUG #define NDEBUG #endif #define VIENNACL_WITH_UBLAS 1 #include <boost/numeric/ublas/matrix_sparse.hpp> #include <boost/numeric/ublas/operation_sparse.hpp> #include <boost/numeric/ublas/lu.hpp> #include "viennacl/scalar.hpp" #include "viennacl/vector.hpp" #include "viennacl/coordinate_matrix.hpp" #include "viennacl/compressed_matrix.hpp" #include "viennacl/ell_matrix.hpp" #include "viennacl/hyb_matrix.hpp" #include "viennacl/linalg/prod.hpp" #include "viennacl/linalg/norm_2.hpp" #include "viennacl/io/matrix_market.hpp" #include "viennacl/linalg/ilu.hpp" #include <iostream> #include <vector> #include "benchmark-utils.hpp" #include "io.hpp" #define BENCHMARK_RUNS 10 template<typename ScalarType> int run_benchmark() { Timer timer; double exec_time; //ScalarType std_result = 0; ScalarType std_factor1 = ScalarType(3.1415); ScalarType std_factor2 = ScalarType(42.0); viennacl::scalar<ScalarType> vcl_factor1(std_factor1); viennacl::scalar<ScalarType> vcl_factor2(std_factor2); boost::numeric::ublas::vector<ScalarType> ublas_vec1; boost::numeric::ublas::vector<ScalarType> ublas_vec2; if (!readVectorFromFile<ScalarType>("../examples/testdata/result65025.txt", ublas_vec1)) { std::cout << "Error reading RHS file" << std::endl; return 0; } std::cout << "done reading rhs" << std::endl; ublas_vec2 = ublas_vec1; viennacl::compressed_matrix<ScalarType, 1> vcl_compressed_matrix_1; viennacl::compressed_matrix<ScalarType, 4> vcl_compressed_matrix_4; viennacl::compressed_matrix<ScalarType, 8> vcl_compressed_matrix_8; viennacl::coordinate_matrix<ScalarType> vcl_coordinate_matrix_128; viennacl::ell_matrix<ScalarType, 1> vcl_ell_matrix_1; viennacl::hyb_matrix<ScalarType, 1> vcl_hyb_matrix_1; boost::numeric::ublas::compressed_matrix<ScalarType> ublas_matrix; if (!viennacl::io::read_matrix_market_file(ublas_matrix, "../examples/testdata/mat65k.mtx")) { std::cout << "Error reading Matrix file" << std::endl; return 0; } //unsigned int cg_mat_size = cg_mat.size(); std::cout << "done reading matrix" << std::endl; viennacl::vector<ScalarType> vcl_vec1(ublas_vec1.size()); viennacl::vector<ScalarType> vcl_vec2(ublas_vec1.size()); viennacl::vector<ScalarType> vcl_vec3(ublas_vec1.size()); //cpu to gpu: viennacl::copy(ublas_matrix, vcl_compressed_matrix_1); #ifndef VIENNACL_EXPERIMENTAL_DOUBLE_PRECISION_WITH_STREAM_SDK_ON_GPU viennacl::copy(ublas_matrix, vcl_compressed_matrix_4); viennacl::copy(ublas_matrix, vcl_compressed_matrix_8); #endif viennacl::copy(ublas_matrix, vcl_coordinate_matrix_128); viennacl::copy(ublas_matrix, vcl_ell_matrix_1); viennacl::copy(ublas_matrix, vcl_hyb_matrix_1); viennacl::copy(ublas_vec1, vcl_vec1); viennacl::copy(ublas_vec2, vcl_vec2); ///////////// Matrix operations ///////////////// std::cout << "------- Matrix-Vector product on CPU ----------" << std::endl; timer.start(); for (int runs=0; runs<BENCHMARK_RUNS; ++runs) { //ublas_vec1 = boost::numeric::ublas::prod(ublas_matrix, ublas_vec2); boost::numeric::ublas::axpy_prod(ublas_matrix, ublas_vec2, ublas_vec1, true); } exec_time = timer.get(); std::cout << "CPU time: " << exec_time << std::endl; std::cout << "CPU "; printOps(static_cast<double>(ublas_matrix.nnz()), static_cast<double>(exec_time) / static_cast<double>(BENCHMARK_RUNS)); std::cout << ublas_vec1[0] << std::endl; std::cout << "------- Matrix-Vector product with compressed_matrix ----------" << std::endl; vcl_vec1 = viennacl::linalg::prod(vcl_compressed_matrix_1, vcl_vec2); //startup calculation vcl_vec1 = viennacl::linalg::prod(vcl_compressed_matrix_4, vcl_vec2); //startup calculation vcl_vec1 = viennacl::linalg::prod(vcl_compressed_matrix_8, vcl_vec2); //startup calculation //std_result = 0.0; viennacl::backend::finish(); timer.start(); for (int runs=0; runs<BENCHMARK_RUNS; ++runs) { vcl_vec1 = viennacl::linalg::prod(vcl_compressed_matrix_1, vcl_vec2); } viennacl::backend::finish(); exec_time = timer.get(); std::cout << "GPU time align1: " << exec_time << std::endl; std::cout << "GPU align1 "; printOps(static_cast<double>(ublas_matrix.nnz()), static_cast<double>(exec_time) / static_cast<double>(BENCHMARK_RUNS)); std::cout << vcl_vec1[0] << std::endl; std::cout << "Testing triangular solves: compressed_matrix" << std::endl; viennacl::copy(ublas_vec1, vcl_vec1); viennacl::linalg::inplace_solve(trans(vcl_compressed_matrix_1), vcl_vec1, viennacl::linalg::unit_lower_tag()); viennacl::copy(ublas_vec1, vcl_vec1); std::cout << "ublas..." << std::endl; timer.start(); boost::numeric::ublas::inplace_solve(trans(ublas_matrix), ublas_vec1, boost::numeric::ublas::unit_lower_tag()); std::cout << "Time elapsed: " << timer.get() << std::endl; std::cout << "ViennaCL..." << std::endl; viennacl::backend::finish(); timer.start(); viennacl::linalg::inplace_solve(trans(vcl_compressed_matrix_1), vcl_vec1, viennacl::linalg::unit_lower_tag()); viennacl::backend::finish(); std::cout << "Time elapsed: " << timer.get() << std::endl; ublas_vec1 = boost::numeric::ublas::prod(ublas_matrix, ublas_vec2); viennacl::backend::finish(); timer.start(); for (int runs=0; runs<BENCHMARK_RUNS; ++runs) { vcl_vec1 = viennacl::linalg::prod(vcl_compressed_matrix_4, vcl_vec2); } viennacl::backend::finish(); exec_time = timer.get(); std::cout << "GPU time align4: " << exec_time << std::endl; std::cout << "GPU align4 "; printOps(static_cast<double>(ublas_matrix.nnz()), static_cast<double>(exec_time) / static_cast<double>(BENCHMARK_RUNS)); std::cout << vcl_vec1[0] << std::endl; viennacl::backend::finish(); timer.start(); for (int runs=0; runs<BENCHMARK_RUNS; ++runs) { vcl_vec1 = viennacl::linalg::prod(vcl_compressed_matrix_8, vcl_vec2); } viennacl::backend::finish(); exec_time = timer.get(); std::cout << "GPU time align8: " << exec_time << std::endl; std::cout << "GPU align8 "; printOps(static_cast<double>(ublas_matrix.nnz()), static_cast<double>(exec_time) / static_cast<double>(BENCHMARK_RUNS)); std::cout << vcl_vec1[0] << std::endl; std::cout << "------- Matrix-Vector product with coordinate_matrix ----------" << std::endl; vcl_vec1 = viennacl::linalg::prod(vcl_coordinate_matrix_128, vcl_vec2); //startup calculation viennacl::backend::finish(); viennacl::copy(vcl_vec1, ublas_vec2); long err_cnt = 0; for (size_t i=0; i<ublas_vec1.size(); ++i) { if ( fabs(ublas_vec1[i] - ublas_vec2[i]) / std::max(fabs(ublas_vec1[i]), fabs(ublas_vec2[i])) > 1e-2) { std::cout << "Error at index " << i << ": Should: " << ublas_vec1[i] << ", Is: " << ublas_vec2[i] << std::endl; ++err_cnt; if (err_cnt > 5) break; } } viennacl::backend::finish(); timer.start(); for (int runs=0; runs<BENCHMARK_RUNS; ++runs) { vcl_vec1 = viennacl::linalg::prod(vcl_coordinate_matrix_128, vcl_vec2); } viennacl::backend::finish(); exec_time = timer.get(); std::cout << "GPU time: " << exec_time << std::endl; std::cout << "GPU "; printOps(static_cast<double>(ublas_matrix.nnz()), static_cast<double>(exec_time) / static_cast<double>(BENCHMARK_RUNS)); std::cout << vcl_vec1[0] << std::endl; std::cout << "------- Matrix-Vector product with ell_matrix ----------" << std::endl; vcl_vec1 = viennacl::linalg::prod(vcl_ell_matrix_1, vcl_vec2); //startup calculation viennacl::backend::finish(); viennacl::copy(vcl_vec1, ublas_vec2); err_cnt = 0; for (size_t i=0; i<ublas_vec1.size(); ++i) { if ( fabs(ublas_vec1[i] - ublas_vec2[i]) / std::max(fabs(ublas_vec1[i]), fabs(ublas_vec2[i])) > 1e-2) { std::cout << "Error at index " << i << ": Should: " << ublas_vec1[i] << ", Is: " << ublas_vec2[i] << std::endl; ++err_cnt; if (err_cnt > 5) break; } } viennacl::backend::finish(); timer.start(); for (int runs=0; runs<BENCHMARK_RUNS; ++runs) { vcl_vec1 = viennacl::linalg::prod(vcl_ell_matrix_1, vcl_vec2); } viennacl::backend::finish(); exec_time = timer.get(); std::cout << "GPU time: " << exec_time << std::endl; std::cout << "GPU "; printOps(static_cast<double>(ublas_matrix.nnz()), static_cast<double>(exec_time) / static_cast<double>(BENCHMARK_RUNS)); std::cout << vcl_vec1[0] << std::endl; std::cout << "------- Matrix-Vector product with hyb_matrix ----------" << std::endl; vcl_vec1 = viennacl::linalg::prod(vcl_hyb_matrix_1, vcl_vec2); //startup calculation viennacl::backend::finish(); viennacl::copy(vcl_vec1, ublas_vec2); err_cnt = 0; for (size_t i=0; i<ublas_vec1.size(); ++i) { if ( fabs(ublas_vec1[i] - ublas_vec2[i]) / std::max(fabs(ublas_vec1[i]), fabs(ublas_vec2[i])) > 1e-2) { std::cout << "Error at index " << i << ": Should: " << ublas_vec1[i] << ", Is: " << ublas_vec2[i] << std::endl; ++err_cnt; if (err_cnt > 5) break; } } viennacl::backend::finish(); timer.start(); for (int runs=0; runs<BENCHMARK_RUNS; ++runs) { vcl_vec1 = viennacl::linalg::prod(vcl_hyb_matrix_1, vcl_vec2); } viennacl::backend::finish(); exec_time = timer.get(); std::cout << "GPU time: " << exec_time << std::endl; std::cout << "GPU "; printOps(static_cast<double>(ublas_matrix.nnz()), static_cast<double>(exec_time) / static_cast<double>(BENCHMARK_RUNS)); std::cout << vcl_vec1[0] << std::endl; return EXIT_SUCCESS; } int main() { std::cout << std::endl; std::cout << "----------------------------------------------" << std::endl; std::cout << " Device Info" << std::endl; std::cout << "----------------------------------------------" << std::endl; #ifdef VIENNACL_WITH_OPENCL std::cout << viennacl::ocl::current_device().info() << std::endl; #endif std::cout << std::endl; std::cout << "----------------------------------------------" << std::endl; std::cout << "----------------------------------------------" << std::endl; std::cout << "## Benchmark :: Sparse" << std::endl; std::cout << "----------------------------------------------" << std::endl; std::cout << std::endl; std::cout << " -------------------------------" << std::endl; std::cout << " # benchmarking single-precision" << std::endl; std::cout << " -------------------------------" << std::endl; run_benchmark<float>(); #ifdef VIENNACL_WITH_OPENCL if( viennacl::ocl::current_device().double_support() ) #endif { std::cout << std::endl; std::cout << " -------------------------------" << std::endl; std::cout << " # benchmarking double-precision" << std::endl; std::cout << " -------------------------------" << std::endl; run_benchmark<double>(); } return 0; }
the_stack
#include <ATen/ATen.h> #include <THC/THCAtomics.cuh> #define CUDA_1D_KERNEL_LOOP(i, n) \ for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < n; \ i += blockDim.x * gridDim.x) #define THREADS_PER_BLOCK 1024 inline int GET_BLOCKS(const int N) { int optimal_block_num = (N + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK; int max_block_num = 65000; return min(optimal_block_num, max_block_num); } template <typename scalar_t> __device__ scalar_t bilinear_interpolate( const scalar_t* bottom_data, const int height, const int width, scalar_t y, scalar_t x, const int index /* index for debug only*/) { // deal with cases that inverse elements are out of feature map boundary if (y < -1.0 || y > height || x < -1.0 || x > width) { // empty return 0; } if (y <= 0) { y = 0; } if (x <= 0) { x = 0; } int y_low = static_cast<int>(y); int x_low = static_cast<int>(x); int y_high; int x_high; if (y_low >= height - 1) { y_high = y_low = height - 1; y = (scalar_t)y_low; } else { y_high = y_low + 1; } if (x_low >= width - 1) { x_high = x_low = width - 1; x = (scalar_t)x_low; } else { x_high = x_low + 1; } scalar_t ly = y - y_low; scalar_t lx = x - x_low; scalar_t hy = 1. - ly, hx = 1. - lx; // do bilinear interpolation scalar_t v1 = bottom_data[y_low * width + x_low]; scalar_t v2 = bottom_data[y_low * width + x_high]; scalar_t v3 = bottom_data[y_high * width + x_low]; scalar_t v4 = bottom_data[y_high * width + x_high]; scalar_t w1 = hy * hx, w2 = hy * lx, w3 = ly * hx, w4 = ly * lx; scalar_t val = (w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4); return val; } template <typename scalar_t> __global__ void PSROIALIGNRotatedForward( const int count, const scalar_t* bottom_data, const scalar_t* bottom_rois, const scalar_t spatial_scale, const int sampling_ratio, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, const int output_dim, const int group_size, scalar_t* top_data) { CUDA_1D_KERNEL_LOOP(index, count) { // The output is in order (n, ctop, ph, pw) int pw = index % pooled_width; int ph = (index / pooled_width) % pooled_height; int ctop = (index / pooled_width / pooled_height) % output_dim; int n = index / pooled_width / pooled_height / output_dim; // [start, end) interval for spatial sampling const scalar_t* offset_bottom_rois = bottom_rois + n * 6; int roi_batch_ind = offset_bottom_rois[0]; // scalar_t roi_start_w = (offset_bottom_rois[1]) * spatial_scale; // scalar_t roi_start_h = (offset_bottom_rois[2]) * spatial_scale; // scalar_t roi_end_w = (offset_bottom_rois[3]) * spatial_scale; // scalar_t roi_end_h = (offset_bottom_rois[4]) * spatial_scale; // Do not using rounding; this implementation detail is critical scalar_t roi_center_w = offset_bottom_rois[1] * spatial_scale; scalar_t roi_center_h = offset_bottom_rois[2] * spatial_scale; scalar_t roi_width = offset_bottom_rois[3] * spatial_scale; scalar_t roi_height = offset_bottom_rois[4] * spatial_scale; // T theta = offset_bottom_rois[5] * M_PI / 180.0; scalar_t theta = offset_bottom_rois[5]; // // Force too small ROIs to be 1x1 // scalar_t roi_width = max(roi_end_w - roi_start_w, (scalar_t)1.); // avoid 0 // scalar_t roi_height = max(roi_end_h - roi_start_h, (scalar_t)1.); // Force malformed ROIs to be 1x1 roi_width = max(roi_width, (scalar_t)1.); roi_height = max(roi_height, (scalar_t)1.); // Compute w and h at bottom scalar_t bin_size_h = static_cast<scalar_t>(roi_height) / static_cast<scalar_t>(pooled_height); scalar_t bin_size_w = static_cast<scalar_t>(roi_width) / static_cast<scalar_t>(pooled_width); int gw = floor(static_cast<scalar_t>(pw)* group_size / pooled_width); int gh = floor(static_cast<scalar_t>(ph)* group_size / pooled_height); gw = min(max(gw, 0), group_size - 1); gh = min(max(gh, 0), group_size - 1); int c = (ctop*group_size + gh)*group_size + gw; const scalar_t* offset_bottom_data = bottom_data + (roi_batch_ind * channels + c) * height * width; // We use roi_bin_grid to sample the grid and mimic integral int roi_bin_grid_h = (sampling_ratio > 0) ? sampling_ratio : ceil(roi_height / pooled_height); // e.g., = 2 int roi_bin_grid_w = (sampling_ratio > 0) ? sampling_ratio : ceil(roi_width / pooled_width); // roi_start_h and roi_start_w are computed wrt the center of RoI (x, y). // Appropriate translation needs to be applied after. scalar_t roi_start_h = -roi_height / 2.0; scalar_t roi_start_w = -roi_width / 2.0; scalar_t cosTheta = cos(theta); scalar_t sinTheta = sin(theta); const scalar_t sample_count = roi_bin_grid_h * roi_bin_grid_w; // e.g., iy = 0, 1 scalar_t output_val = 0.; for (int iy = 0; iy < roi_bin_grid_h; iy++) { // e.g., iy = 0, 1 const scalar_t yy = roi_start_h + ph * bin_size_h + static_cast<scalar_t>(iy + .5f) * bin_size_h / static_cast<scalar_t>(roi_bin_grid_h); // e.g., 0.5, 1.5 for (int ix = 0; ix < roi_bin_grid_w; ix++) { const scalar_t xx = roi_start_w + pw * bin_size_w + static_cast<scalar_t>(ix + .5f) * bin_size_w / static_cast<scalar_t>(roi_bin_grid_w); // Rotate by theta around the center and translate // T x = xx * cosTheta + yy * sinTheta + roi_center_w; // T y = yy * cosTheta - xx * sinTheta + roi_center_h; scalar_t x = xx * cosTheta - yy * sinTheta + roi_center_w; scalar_t y = xx * sinTheta + yy * cosTheta + roi_center_h; scalar_t val = bilinear_interpolate<scalar_t>( offset_bottom_data, height, width, y, x, index); output_val += val; } } output_val /= sample_count; top_data[index] = output_val; // scalar_t out_sum = 0; // for (int h = hstart; h < hend; ++h) { // for (int w = wstart; w < wend; ++w) { // int bottom_index = h*width + w; // out_sum += offset_bottom_data[bottom_index]; // } // } // scalar_t bin_area = (hend - hstart)*(wend - wstart); // top_data[index] = is_empty? (scalar_t)0. : out_sum/bin_area; } } int PSROIAlignRotatedForwardLaucher(const at::Tensor features, const at::Tensor rois, const float spatial_scale, const int sample_num, const int channels, const int height, const int width, const int num_rois, const int pooled_height, const int pooled_width, const int output_dim, const int group_size, at::Tensor output) { // const int output_size = num_rois * pooled_height * pooled_width * channels; const int output_size = num_rois * pooled_height * pooled_width * output_dim; AT_DISPATCH_FLOATING_TYPES_AND_HALF( features.type(), "PSROIAlignRotatedLaucherForward", ([&] { const scalar_t *bottom_data = features.data<scalar_t>(); const scalar_t *rois_data = rois.data<scalar_t>(); scalar_t *top_data = output.data<scalar_t>(); PSROIALIGNRotatedForward<scalar_t> <<<GET_BLOCKS(output_size), THREADS_PER_BLOCK>>> ( output_size, bottom_data, rois_data, scalar_t(spatial_scale), sample_num, channels, height, width, pooled_height, pooled_width, output_dim, group_size, top_data); })); THCudaCheck(cudaGetLastError()); return 1; } template <typename scalar_t> __device__ void bilinear_interpolate_gradient( const int height, const int width, scalar_t y, scalar_t x, scalar_t* w1, scalar_t* w2, scalar_t* w3, scalar_t* w4, int* x_low, int* x_high, int* y_low, int* y_high, const int /*index*/ /* index for debug only*/) { // deal with cases that inverse elements are out of feature map boundary if (y < -1.0 || y > height || x < -1.0 || x > width) { // empty *w1 = *w2 = *w3 = *w4 = 0.; *x_low = *x_high = *y_low = *y_high = -1; return; } if (y <= 0) { y = 0; } if (x <= 0) { x = 0; } *y_low = static_cast<int>(y); *x_low = static_cast<int>(x); if (*y_low >= height - 1) { *y_high = *y_low = height - 1; y = (scalar_t)*y_low; } else { *y_high = *y_low + 1; } if (*x_low >= width - 1) { *x_high = *x_low = width - 1; x = (scalar_t)*x_low; } else { *x_high = *x_low + 1; } scalar_t ly = y - *y_low; scalar_t lx = x - *x_low; scalar_t hy = 1. - ly, hx = 1. - lx; *w1 = hy * hx, *w2 = hy * lx, *w3 = ly * hx, *w4 = ly * lx; return; } template <typename scalar_t> __global__ void PSROIALIGNRotatedBackward( const int count, const scalar_t* top_diff, const scalar_t* bottom_rois, // const int num_rois, const scalar_t spatial_scale, const int sampling_ratio, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, const int output_dim, const int group_size, scalar_t* bottom_diff ) { CUDA_1D_KERNEL_LOOP(index, count) { // The output is in order (n, ctop, ph, pw) int pw = index % pooled_width; int ph = (index / pooled_width) % pooled_height; int ctop = (index / pooled_width / pooled_height) % output_dim; int n = index / pooled_width / pooled_height / output_dim; // [start, end) interval for spatial sampling const scalar_t* offset_bottom_rois = bottom_rois + n * 6; int roi_batch_ind = offset_bottom_rois[0]; // Do not round scalar_t roi_center_w = offset_bottom_rois[1] * spatial_scale; scalar_t roi_center_h = offset_bottom_rois[2] * spatial_scale; scalar_t roi_width = offset_bottom_rois[3] * spatial_scale; scalar_t roi_height = offset_bottom_rois[4] * spatial_scale; // T theta = offset_bottom_rois[5] * M_PI / 180.0; scalar_t theta = offset_bottom_rois[5]; // scalar_t roi_start_w = offset_bottom_rois[1] * spatial_scale; // scalar_t roi_start_h = offset_bottom_rois[2] * spatial_scale; // scalar_t roi_end_w = offset_bottom_rois[3] * spatial_scale; // scalar_t roi_end_h = offset_bottom_rois[4] * spatial_scale; // Force too small ROIs to be 1x1 // scalar_t roi_width = max(roi_end_w - roi_start_w, (scalar_t)1.); // avoid 0 // scalar_t roi_height = max(roi_end_h - roi_start_h, (scalar_t)1.); roi_width = max(roi_width, (scalar_t)1.); roi_height = max(roi_height, (scalar_t)1.); // Compute w and h at bottom scalar_t bin_size_h = static_cast<scalar_t>(roi_height) / static_cast<scalar_t>(pooled_height); scalar_t bin_size_w = static_cast<scalar_t>(roi_width) / static_cast<scalar_t>(pooled_width); // int hstart = floor(static_cast<scalar_t>(ph)* bin_size_h // + roi_start_h); // int wstart = floor(static_cast<scalar_t>(pw)* bin_size_w // + roi_start_w); // int hend = ceil(static_cast<scalar_t>(ph + 1) * bin_size_h // + roi_start_h); // int wend = ceil(static_cast<scalar_t>(pw + 1) * bin_size_w // + roi_start_w); // // Add roi offsets and clip to input boundaries // hstart = min(max(hstart, 0), height); // hend = min(max(hend, 0), height); // wstart = min(max(wstart, 0), width); // wend = min(max(wend, 0), width); // bool is_empty = (hend <= hstart) || (wend <= wstart); // Compute c at bottom int gw = floor(static_cast<scalar_t>(pw)* group_size / pooled_width); int gh = floor(static_cast<scalar_t>(ph)* group_size / pooled_height); gw = min(max(gw, 0), group_size - 1); gh = min(max(gh, 0), group_size - 1); int c = (ctop*group_size + gh)*group_size + gw; scalar_t* offset_bottom_diff = bottom_diff + (roi_batch_ind * channels + c) * height * width; // scalar_t bin_area = (hend - hstart)*(wend - wstart); // scalar_t diff_val = is_empty ? (scalar_t)0. : top_diff[index] / bin_area; // for (int h = hstart; h < hend; ++h) { // for (int w = wstart; w < wend; ++w) { // int bottom_index = h*width + w; // atomicAdd(offset_bottom_diff + bottom_index, diff_val); // } // } // int top_offset = (n * channels + ctop) * pooled_height * pooled_width; // const scalar_t* offset_top_diff = top_diff + top_offset; // const scalar_t top_diff_this_bin = offset_top_diff[ph * pooled_width + pw]; const scalar_t top_diff_this_bin = top_diff[index]; // We use roi_bin_grid to sample the grid and mimic integral int roi_bin_grid_h = (sampling_ratio > 0) ? sampling_ratio : ceil(roi_height / pooled_height); // e.g., = 2 int roi_bin_grid_w = (sampling_ratio > 0) ? sampling_ratio : ceil(roi_width / pooled_width); // roi_start_h and roi_start_w are computed wrt the center of RoI (x, y). // Appropriate translation needs to be applied after. scalar_t roi_start_h = -roi_height / 2.0; scalar_t roi_start_w = -roi_width / 2.0; scalar_t cosTheta = cos(theta); scalar_t sinTheta = sin(theta); // We do average (integral) pooling inside a bin const scalar_t sample_count = roi_bin_grid_h * roi_bin_grid_w; // e.g. = 4 for (int iy = 0; iy < roi_bin_grid_h; iy++) { // e.g., iy = 0, 1 const scalar_t yy = roi_start_h + ph * bin_size_h + static_cast<scalar_t>(iy + .5f) * bin_size_h / static_cast<scalar_t>(roi_bin_grid_h); // e.g., 0.5, 1.5 for (int ix = 0; ix < roi_bin_grid_w; ix++) { const scalar_t xx = roi_start_w + pw * bin_size_w + static_cast<scalar_t>(ix + .5f) * bin_size_w / static_cast<scalar_t>(roi_bin_grid_w); // Rotate by theta around the center and translate // T x = xx * cosTheta + yy * sinTheta + roi_center_w; // T y = yy * cosTheta - xx * sinTheta + roi_center_h; scalar_t x = xx * cosTheta - yy * sinTheta + roi_center_w; scalar_t y = xx * sinTheta + yy * cosTheta + roi_center_h; scalar_t w1, w2, w3, w4; int x_low, x_high, y_low, y_high; bilinear_interpolate_gradient<scalar_t>( height, width, y, x, &w1, &w2, &w3, &w4, &x_low, &x_high, &y_low, &y_high, index); // TODO: choose the index scalar_t g1 = top_diff_this_bin * w1 / sample_count; scalar_t g2 = top_diff_this_bin * w2 / sample_count; scalar_t g3 = top_diff_this_bin * w3 / sample_count; scalar_t g4 = top_diff_this_bin * w4 / sample_count; if (x_low >= 0 && x_high >= 0 && y_low >= 0 && y_high >= 0) { atomicAdd( offset_bottom_diff + y_low * width + x_low, static_cast<scalar_t>(g1)); atomicAdd( offset_bottom_diff + y_low * width + x_high, static_cast<scalar_t>(g2)); atomicAdd( offset_bottom_diff + y_high * width + x_low, static_cast<scalar_t>(g3)); atomicAdd( offset_bottom_diff + y_high * width + x_high, static_cast<scalar_t>(g4)); } // if } // ix } // iy } } int PSROIAlignRotatedBackwardLaucher(const at::Tensor top_grad, const at::Tensor rois, const float spatial_scale, const int sample_num, const int channels, const int height, const int width, const int num_rois, const int pooled_height, const int pooled_width, const int output_dim, const int group_size, at::Tensor bottom_grad) { const int output_size = num_rois * pooled_height * pooled_width * output_dim; AT_DISPATCH_FLOATING_TYPES_AND_HALF( top_grad.type(), "PSROIAlignLaucherBackward", ([&] { const scalar_t *top_diff = top_grad.data<scalar_t>(); const scalar_t *rois_data = rois.data<scalar_t>(); scalar_t *bottom_diff = bottom_grad.data<scalar_t>(); if (sizeof(scalar_t) == sizeof(double)) { fprintf(stderr, "double is not supported"); exit(-1); } PSROIALIGNRotatedBackward<scalar_t> <<<GET_BLOCKS(output_size), THREADS_PER_BLOCK>>>( output_size, top_diff, rois_data, spatial_scale, sample_num, channels, height, width, pooled_height, pooled_width, output_dim, group_size, bottom_diff); })); THCudaCheck(cudaGetLastError()); return 1; }
the_stack
#include <array> // std::array #include <cuda_runtime.h> // cudaError_t #include <cusolverDn.h> // Dn = dense (matrices) #include <cuComplex.h> // cuComplex, cuDoubleComplex #include "gputimer.h" // GpuTimer /* ****************************************************************** */ /* ****** "BOILERPLATE" routines for creating arbitrary initialization * values, print out for human reaidng of results ********************* /* ****************************************************************** */ void printMatrix(int m, int n, const cuDoubleComplex *A, int lda, const char* name) { std::cout << name << std::endl; for (int row =0; row <m; row++) { for (int col =0 ; col <n ; col++) { cuDoubleComplex Areg = A[row + col*lda]; std::cout << std::setprecision(5) << Areg.x << "+i" << Areg.y << " " ; } std::cout << std::endl; } } template <typename TT> void print1darr(const int N, const TT *A, int lda, const char* name) { std::cout << name << std::endl; for (int row =0; row < N; row++) { TT Areg = A[row *lda]; std::cout << Areg.x << "+i"<< Areg.y << " " ; // std::cout << Areg << " "; } std::cout << std::endl; } /* ****************************************************************** */ /* ****** END of boilerplate ******************************************/ /* ****************************************************************** */ /* ****************************************************************** */ /* ****** MAIN routine ************************************************/ /* ****************************************************************** */ int main(int argc, char* argv[]) { constexpr const int L = 14; // number of sites constexpr const int d = 2; // dim. of state space int lda = 1<<(L-1); cuDoubleComplex *d_Psi = nullptr; double *d_S = nullptr; cuDoubleComplex *d_SCC = nullptr; // cuDoubleComplex version of S, 1-dim. array of singular values cuDoubleComplex *d_U = nullptr; cuDoubleComplex *d_VT = nullptr; int *devInfo = NULL; cuDoubleComplex *d_work = nullptr; double *d_rwork = NULL; // cuBLAS matrix multiplication step cuDoubleComplex *d_US = nullptr; // calculate new Psi cuDoubleComplex *d_Psi_new = nullptr; int lwork =0; /* ************************************************************** */ /* ************************************************************** */ /* ****** BOILERPLATE initialization, values ******************** */ /* ************************************************************** */ // create_fixed_CC_mat(d,L,Psi); // printMatrix(lda,d, &Psi.data() ,lda,"Psi"); // cuDoubleComplex Psi[lda*d]; // cuDoubleComplex* Psi = new cuDoubleComplex[lda*d]; // boilerplate, initialization; matrix assumed to be column-major ordering std::array<cuDoubleComplex, (1<<(L-1))*d> Psi; { int M = 1<<(L-1); // d^{(L-1)}, where d is dim. of state space, L is number of sites for (int i =0; i< M; i++) { // i is the "row" of a matrix, it's an index double f = ((double) i*(0.9/M)+0.1); double theta_f = 2.* acos( -1.)*f; cuDoubleComplex Ad0 = { f*cos(theta_f) , f*sin(theta_f) } ; cuDoubleComplex Ad1 = { (1.-f)*sin(theta_f) , (1.-f)*cos(theta_f) } ; Psi[i + M*0] = Ad0 ; Psi[i + M*1] = Ad1 ; std::cout << std::setprecision(5) << Ad0.x << "+i" << Ad0.y << " " << Ad1.x << "+i" << Ad1.y << std::endl; } } GpuTimer timer; /* ************************************************************** */ /* ****** END of BOILERPLATE initialization, values ************* */ /* ************************************************************** */ cusolverDnHandle_t cusolverH = nullptr; cusolverStatus_t cusolver_status = CUSOLVER_STATUS_SUCCESS; // step 1: create cusolverDn handle cusolver_status = cusolverDnCreate(&cusolverH); cublasHandle_t cublasH = nullptr; cublasStatus_t cublas_status = CUBLAS_STATUS_SUCCESS; // step 1b: create cublas handle cublasCreate(&cublasH); // step 2: copy A and B to device cudaMalloc((void**)&d_Psi , sizeof(cuDoubleComplex)*lda*d); cudaMalloc((void**)&d_S , sizeof(double)*d); cudaMalloc((void**)&d_SCC , sizeof(cuDoubleComplex)*d); cudaMalloc((void**)&d_U , sizeof(cuDoubleComplex)*lda*lda); cudaMalloc((void**)&d_VT , sizeof(cuDoubleComplex)*d*d); cudaMalloc((void**)&devInfo, sizeof(int)); cudaMalloc((void**)&d_US , sizeof(cuDoubleComplex)*lda*lda); cudaMalloc((void**)&d_Psi_new , sizeof(cuDoubleComplex)*lda*d); cudaMemcpy(d_Psi, Psi.data(), sizeof(cuDoubleComplex)*lda*d, cudaMemcpyHostToDevice); // step 3: query working space of SVD timer.Start(); // timer "boilerplate" cusolver_status = cusolverDnZgesvd_bufferSize(cusolverH, // cusolver Handle lda,d, // matrix size dimensions of Psi &lwork ); cudaMalloc((void**)&d_work , sizeof(cuDoubleComplex)*lwork); // step 4: compute SVD cusolver_status = cusolverDnZgesvd(cusolverH,'A','A', lda,d, d_Psi,lda, d_S, d_U,lda, // ldu d_VT,d, // ldvt, d_work,lwork,d_rwork,devInfo); // change type of 1-dim. array of singular values S from double to cuDoubleComplex cudaMemcpy2D(d_SCC, // dst - Destination memory address sizeof(cuDoubleComplex), // dpitch - Pitch of destination memory (1 cuDoubleComplex, so skip over 2 double values) d_S, // src 1*sizeof(double), // spitch sizeof(double), // width of matrix transfer (columns in bytes) d, // height of matrix transfer (rows) cudaMemcpyDeviceToDevice); // Matrix Multiply U*S to obtain US, or new Psi, "flattened". cublas_status = cublasZdgmm(cublasH, CUBLAS_SIDE_RIGHT, lda,lda, d_U,lda, d_SCC,1, d_US,lda); // obtain new Psi, 1st step - "reduce" matrix size dim. to the Schmidt rank cudaMemcpy(d_Psi_new, d_US, sizeof(cuDoubleComplex)*lda*d, cudaMemcpyDeviceToDevice); // sanity check // cuDoubleComplex US[lda*lda]; // d^{L-1)-by-d unitary matrix cuDoubleComplex VT[d*d]; // d-by-d unitary matrix cudaMemcpy(VT,d_VT, sizeof(cuDoubleComplex)*d*d,cudaMemcpyDeviceToHost); // cudaMemcpy(US,d_US, sizeof(cuDoubleComplex)*lda*lda,cudaMemcpyDeviceToHost); // printMatrix(lda, lda, US, lda, "US"); // 1 should observe many 0 entries which is expected printMatrix(d, d, VT, d, "VT"); // cuDoubleComplex Psi_new[lda*d]; // d^{L-1)-by-d unitary matrix // cudaMemcpy(Psi_new,d_Psi_new, sizeof(cuDoubleComplex)*lda*d,cudaMemcpyDeviceToHost); // printMatrix(lda, d, Psi_new, lda, "Psi_new"); // free resources if (d_Psi ) cudaFree(d_Psi); if (d_S ) cudaFree(d_S); if (d_SCC ) cudaFree(d_SCC); if (d_U ) cudaFree(d_U); if (d_US ) cudaFree(d_US); if (d_work ) cudaFree(d_work); /* ****************************************************************** */ /* ****** 2nd iteration ********************************************* */ /* ****************************************************************** */ double *d_S_l2 = nullptr; cuDoubleComplex *d_SCC_l2 = nullptr; // cuDoubleComplex version of S, 1-dim. array of singular values cuDoubleComplex *d_U_l2 = nullptr; cuDoubleComplex *d_VT_l2 = nullptr; cuDoubleComplex *d_work_l2 = nullptr; // cuBLAS matrix multiplication step cuDoubleComplex *d_US_l2 = nullptr; // calculate new Psi cuDoubleComplex *d_Psi_new_l2 = nullptr; // step 2: device memory (GPU) allocation lda = (1<<(L-2)); const int dr = d*d; // sanity check // cuDoubleComplex Psi_new[lda*dr]; // cudaMemcpy(Psi_new,d_Psi_new, sizeof(cuDoubleComplex)*lda*lda,cudaMemcpyDeviceToHost); // printMatrix(lda, lda, Psi_new, lda, "Psi_new"); cudaMalloc((void**)&d_S_l2 , sizeof(double)*dr*dr); cudaMalloc((void**)&d_SCC_l2 , sizeof(cuDoubleComplex)*dr*dr); cudaMalloc((void**)&d_U_l2 , sizeof(cuDoubleComplex)*lda*lda); cudaMalloc((void**)&d_VT_l2 , sizeof(cuDoubleComplex)*dr*dr); cudaMalloc((void**)&d_US_l2 , sizeof(cuDoubleComplex)*lda*lda); cudaMalloc((void**)&d_Psi_new_l2 , sizeof(cuDoubleComplex)*lda*dr); // step 3: query working space of SVD cusolver_status = cusolverDnZgesvd_bufferSize(cusolverH, // cusolver Handle lda,dr, // matrix size dimensions of Psi &lwork ); cudaMalloc((void**)&d_work_l2 , sizeof(cuDoubleComplex)*lwork); // step 4: compute SVD cusolver_status = cusolverDnZgesvd(cusolverH,'A','A', lda,dr, d_Psi_new,lda, d_S_l2, d_U_l2,lda, // ldu d_VT_l2,dr, // ldvt, d_work_l2,lwork,NULL,devInfo); // change type of 1-dim. array of singular values S from double to cuDoubleComplex cudaMemcpy2D(d_SCC_l2, // dst - Destination memory address sizeof(cuDoubleComplex), // dpitch - Pitch of destination memory (1 cuDoubleComplex, so skip over 2 double values) d_S_l2, // src 1*sizeof(double), // spitch sizeof(double), // width of matrix transfer (columns in bytes) dr, // height of matrix transfer (rows) cudaMemcpyDeviceToDevice); // sanity check // cuDoubleComplex S_l2[dr]; // 1-dim. array // cuDoubleComplex U_l2[lda*lda]; // cudaMemcpy(S_l2,d_SCC_l2, sizeof(cuDoubleComplex)*dr,cudaMemcpyDeviceToHost); // cudaMemcpy(U_l2,d_U_l2, sizeof(cuDoubleComplex)*lda*lda,cudaMemcpyDeviceToHost); // print1darr<cuDoubleComplex>(dr,S_l2,1," S_l2 "); // printMatrix(lda, lda, U_l2, lda, "U_l2"); // Matrix Multiply U*S to obtain US, or new Psi, "flattened". cublas_status = cublasZdgmm(cublasH, CUBLAS_SIDE_RIGHT, lda,lda, d_U_l2,lda, d_SCC_l2,1, d_US_l2,lda); // obtain new Psi, 1st step - "reduce" matrix size dim. to the Schmidt rank cudaMemcpy(d_Psi_new_l2, d_US_l2, sizeof(cuDoubleComplex)*lda*dr, cudaMemcpyDeviceToDevice); // timer "boilerplate" timer.Stop(); cudaDeviceSynchronize(); // sanity check // cuDoubleComplex US_l2[lda*lda]; // d^{L-2)-by-dr unitary matrix // cudaMemcpy(US_l2,d_US_l2, sizeof(cuDoubleComplex)*lda*lda,cudaMemcpyDeviceToHost); // printMatrix(lda, lda, US_l2, lda, "US_l2 (2nd iteration)"); // cuDoubleComplex Psi_new_l2[lda*dr]; // d^{L-1)-by-d unitary matrix std::array<cuDoubleComplex, (1<<(L-2))*dr> Psi_new_l2; cudaMemcpy(Psi_new_l2.data(),d_Psi_new_l2, sizeof(cuDoubleComplex)*lda*dr,cudaMemcpyDeviceToHost); { for (int row =0; row <lda; row++) { for (int col =0 ; col <dr ; col++) { cuDoubleComplex Areg = Psi_new_l2[row + col*lda]; std::cout << std::setprecision(5) << Areg.x << "+i" << Areg.y << " " ; } std::cout << std::endl; } } // cudaDeviceSynchronize(); // printMatrix(lda, dr, Psi_new_l2, lda, "Psi_new_l2 (2nd iteration)"); // std::cout << " lda for 2nd iteration : " << lda << std::endl; // sanity check // std::cout << " dr for 2nd iteration : " << dr << std::endl; // sanity check for matrix size dim. cuDoubleComplex VT_l2[dr*dr]; // d^{L-1)-by-d unitary matrix cudaMemcpy(VT_l2,d_VT_l2, sizeof(cuDoubleComplex)*dr*dr,cudaMemcpyDeviceToHost); std::cout << "\n VT = (matlab base-1), 2nd. iteration " << std::endl; printMatrix(dr, dr, VT_l2, dr, "VT 2nd iteration"); std::cout << "===== " << std::endl; // std::cout << " Calculation of 2 iterations of SVD and matrix multiplication : " << std::setprecision(7) << timer.Elapsed() << " ms " << std::cout << " Calculation of 2 iterations of SVD and matrix multiplication : " << timer.Elapsed() << " ms " << " for " << (1<<L) << " states (of the system " << std::endl; // free all resources if (d_VT ) cudaFree(d_VT); if (d_VT_l2 ) cudaFree(d_VT_l2); if (d_U_l2 ) cudaFree(d_U_l2); if (d_work_l2 ) cudaFree(d_work_l2); if (devInfo ) cudaFree(devInfo); if (d_rwork ) cudaFree(d_rwork); if (d_US_l2 ) cudaFree(d_US_l2); if (d_Psi_new ) cudaFree(d_Psi_new); if (d_Psi_new_l2 ) cudaFree(d_Psi_new_l2); if (cusolverH) cusolverDnDestroy(cusolverH); if (cublasH ) cublasDestroy(cublasH); cudaDeviceReset(); return 0; }
the_stack
* Device code. */ #ifndef _PARTICLES_KERNEL_H_ #define _PARTICLES_KERNEL_H_ #include <stdio.h> #include <math.h> #include "cutil_math.h" #include "math_constants.h" #include "particles_kernel.cuh" #if USE_TEX // textures for particle position and velocity texture<float4, 1, cudaReadModeElementType> oldPosTex; texture<float4, 1, cudaReadModeElementType> oldVelTex; texture<uint2, 1, cudaReadModeElementType> particleHashTex; texture<uint, 1, cudaReadModeElementType> cellStartTex; texture<uint, 1, cudaReadModeElementType> gridCountersTex; texture<uint, 1, cudaReadModeElementType> gridCellsTex; #endif __constant__ SimParams params; // integrate particle attributes __global__ void integrate(float4* newPos, float4* newVel, float4* oldPos, float4* oldVel, float deltaTime) { int index = __mul24(blockIdx.x,blockDim.x) + threadIdx.x; float4 pos4 = oldPos[index]; float4 vel4 = oldVel[index]; float3 pos = make_float3(pos4); float3 vel = make_float3(vel4); vel += params.gravity * deltaTime; vel *= params.globalDamping; // new position = old position + velocity * deltaTime pos += vel * deltaTime; // bounce off cube sides if (pos.x > 1.0f - params.particleRadius) { pos.x = 1.0f - params.particleRadius; vel.x *= params.boundaryDamping; } if (pos.x < -1.0f + params.particleRadius) { pos.x = -1.0f + params.particleRadius; vel.x *= params.boundaryDamping;} if (pos.y > 1.0f - params.particleRadius) { pos.y = 1.0f - params.particleRadius; vel.y *= params.boundaryDamping; } if (pos.y < -1.0f + params.particleRadius) { pos.y = -1.0f + params.particleRadius; vel.y *= params.boundaryDamping;} if (pos.z > 1.0f - params.particleRadius) { pos.z = 1.0f - params.particleRadius; vel.z *= params.boundaryDamping; } if (pos.z < -1.0f + params.particleRadius) { pos.z = -1.0f + params.particleRadius; vel.z *= params.boundaryDamping;} // store new position and velocity newPos[index] = make_float4(pos, pos4.w); newVel[index] = make_float4(vel, vel4.w); } // calculate position in uniform grid __device__ int3 calcGridPos(float4 p) { int3 gridPos; gridPos.x = floor((p.x - params.worldOrigin.x) / params.cellSize.x); gridPos.y = floor((p.y - params.worldOrigin.y) / params.cellSize.y); gridPos.z = floor((p.z - params.worldOrigin.z) / params.cellSize.z); return gridPos; } // calculate address in grid from position (clamping to edges) __device__ uint calcGridHash(int3 gridPos) { gridPos.x = max(0, min(gridPos.x, params.gridSize.x-1)); gridPos.y = max(0, min(gridPos.y, params.gridSize.y-1)); gridPos.z = max(0, min(gridPos.z, params.gridSize.z-1)); return __mul24(__mul24(gridPos.z, params.gridSize.y), params.gridSize.x) + __mul24(gridPos.y, params.gridSize.x) + gridPos.x; } // add particle to cell using atomics __device__ void addParticleToCell(int3 gridPos, uint index, uint* gridCounters, uint* gridCells) { // calculate grid hash uint gridHash = calcGridHash(gridPos); // increment cell counter using atomics #if defined CUDA_NO_SM_11_ATOMIC_INTRINSICS int counter = 0; #else int counter = atomicAdd(&gridCounters[gridHash], 1); // returns previous value counter = min(counter, params.maxParticlesPerCell-1); #endif // write particle index into this cell (very uncoalesced!) gridCells[gridHash*params.maxParticlesPerCell + counter] = index; } // update uniform grid __global__ void updateGridD(float4* pos, uint* gridCounters, uint* gridCells) { int index = __mul24(blockIdx.x,blockDim.x) + threadIdx.x; float4 p = pos[index]; // get address in grid int3 gridPos = calcGridPos(p); addParticleToCell(gridPos, index, gridCounters, gridCells); } // calculate grid hash value for each particle __global__ void calcHashD(float4* pos, uint2* particleHash) { int index = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; float4 p = pos[index]; // get address in grid int3 gridPos = calcGridPos(p); uint gridHash = calcGridHash(gridPos); // store grid hash and particle index particleHash[index] = make_uint2(gridHash, index); } // rearrange particle data into sorted order, and find the start of each cell in the // sorted hash array __global__ void reorderDataAndFindCellStartD(uint2* particleHash, // particle id sorted by hash float4* oldPos, float4* oldVel, float4* sortedPos, float4* sortedVel, uint* cellStart) { int index = __mul24(blockIdx.x,blockDim.x) + threadIdx.x; uint2 sortedData = particleHash[index]; // Load hash data into shared memory so that we can look // at neighboring particle's hash value without loading // two hash values per thread __shared__ uint sharedHash[257]; sharedHash[threadIdx.x+1] = sortedData.x; if (index > 0 && threadIdx.x == 0) { // first thread in block must load neighbor particle hash volatile uint2 prevData = particleHash[index-1]; sharedHash[0] = prevData.x; } __syncthreads(); if (index == 0 || sortedData.x != sharedHash[threadIdx.x]) { cellStart[sortedData.x] = index; } // Now use the sorted index to reorder the pos and vel data float4 pos = FETCH(oldPos, sortedData.y); // macro does either global read or texture fetch float4 vel = FETCH(oldVel, sortedData.y); // see particles_kernel.cuh __syncthreads(); sortedPos[index] = pos; sortedVel[index] = vel; } // collide two spheres using DEM method __device__ float3 collideSpheres(float4 posA, float4 posB, float4 velA, float4 velB, float radiusA, float radiusB, float attraction) { // calculate relative position float3 relPos; relPos.x = posB.x - posA.x; relPos.y = posB.y - posA.y; relPos.z = posB.z - posA.z; float dist = length(relPos); float collideDist = radiusA + radiusB; float3 force = make_float3(0.0f); if (dist < collideDist) { float3 norm = relPos / dist; // relative velocity float3 relVel; relVel.x = velB.x - velA.x; relVel.y = velB.y - velA.y; relVel.z = velB.z - velA.z; // relative tangential velocity float3 tanVel = relVel - (dot(relVel, norm) * norm); // spring force force = -params.spring*(collideDist - dist) * norm; // dashpot (damping) force force += params.damping*relVel; // tangential shear force force += params.shear*tanVel; // attraction force += attraction*relPos; } return force; } // collide particle with all particles in a given cell // version using grid built with atomics __device__ float3 collideCell(int3 gridPos, uint index, float4 pos, float4 vel, float4* oldPos, float4* oldVel, uint* gridCounters, uint* gridCells) { float3 force = make_float3(0.0f); if ((gridPos.x < 0) || (gridPos.x > params.gridSize.x-1) || (gridPos.y < 0) || (gridPos.y > params.gridSize.y-1) || (gridPos.z < 0) || (gridPos.z > params.gridSize.z-1)) { return force; } uint gridHash = calcGridHash(gridPos); // iterate over particles in this cell uint particlesInCell = FETCH(gridCounters, gridHash); particlesInCell = min(particlesInCell, params.maxParticlesPerCell-1); for(uint i=0; i<particlesInCell; i++) { uint index2 = FETCH(gridCells, gridHash*params.maxParticlesPerCell + i); if (index2 != index) { // check not colliding with self float4 pos2 = FETCH(oldPos, index2); float4 vel2 = FETCH(oldVel, index2); // collide two spheres float3 projVec = collideSpheres(pos, pos2, vel, vel2, params.particleRadius, params.particleRadius, params.attraction); force += projVec; } } return force; } // version using sorted grid __device__ float3 collideCell2(int3 gridPos, uint index, float4 pos, float4 vel, float4* oldPos, float4* oldVel, uint2* particleHash, uint* cellStart) { float3 force = make_float3(0.0f); if ((gridPos.x < 0) || (gridPos.x > params.gridSize.x-1) || (gridPos.y < 0) || (gridPos.y > params.gridSize.y-1) || (gridPos.z < 0) || (gridPos.z > params.gridSize.z-1)) { return force; } uint gridHash = calcGridHash(gridPos); // get start of bucket for this cell uint bucketStart = FETCH(cellStart, gridHash); if (bucketStart == 0xffffffff) return force; // cell empty // iterate over particles in this cell for(uint i=0; i<params.maxParticlesPerCell; i++) { uint index2 = bucketStart + i; uint2 cellData = FETCH(particleHash, index2); if (cellData.x != gridHash) break; // no longer in same bucket if (index2 != index) { // check not colliding with self float4 pos2 = FETCH(oldPos, index2); float4 vel2 = FETCH(oldVel, index2); // collide two spheres float3 projVec = collideSpheres(pos, pos2, vel, vel2, params.particleRadius, params.particleRadius, params.attraction); force += projVec; } } return force; } __global__ void collideD(float4* newPos, float4* newVel, float4* oldPos, float4* oldVel, #if USE_SORT uint2* particleHash, uint* cellStart #else uint* gridCounters, uint* gridCells #endif ) { int index = __mul24(blockIdx.x,blockDim.x) + threadIdx.x; // read particle data from sorted arrays float4 pos = FETCH(oldPos, index); float4 vel = FETCH(oldVel, index); // get address in grid int3 gridPos = calcGridPos(pos); float3 force = make_float3(0.0f); // examine only neighbouring cells for(int z=-1; z<=1; z++) { for(int y=-1; y<=1; y++) { for(int x=-1; x<=1; x++) { #if USE_SORT force += collideCell2(gridPos + make_int3(x, y, z), index, pos, vel, oldPos, oldVel, particleHash, cellStart); #else force += collideCell(gridPos + make_int3(x, y, z), index, pos, vel, oldPos, oldVel, gridCounters, gridCells); #endif } } } float3 projVec = collideSpheres(pos, params.colliderPos, vel, make_float4(0.0f, 0.0f, 0.0f, 0.0f), params.particleRadius, params.colliderRadius, 0.0f); force += projVec; #if USE_SORT // write new velocity back to original unsorted location volatile uint2 sortedData = particleHash[index]; newVel[sortedData.y] = vel + make_float4(force, 0.0f); #else newVel[index] = vel + make_float4(force, 0.0f); #endif } #endif
the_stack
* Bottom-level digit-reduction/counting kernel ******************************************************************************/ #pragma once #include "radixsort_kernel_common.cu" namespace b40c { /****************************************************************************** * Cycle-processing Routines ******************************************************************************/ template <int BYTE> __device__ __forceinline__ int DecodeInt(int encoded){ int retval; ExtractKeyBits<int, BYTE * 8, 8>::Extract(retval, encoded); return retval; } //----------------------------------------------------------------------------- template <int PARTIAL> __device__ __forceinline__ void ReduceLanePartial( int local_counts[4], int *scan_lanes, int lane_offset) { unsigned char* encoded = (unsigned char *) &scan_lanes[lane_offset + (PARTIAL * B40C_WARP_THREADS)]; local_counts[0] += encoded[0]; local_counts[1] += encoded[1]; local_counts[2] += encoded[2]; local_counts[3] += encoded[3]; } template <int LANE, int REDUCTION_LANES, int REDUCTION_LANES_PER_WARP, int REDUCTION_PARTIALS_PER_LANE, int LANE_PARTIALS_PER_THREAD> __device__ __forceinline__ void ReduceLanePartials( int local_counts[REDUCTION_LANES_PER_WARP][4], int *scan_lanes, int lane_offset) { lane_offset += (LANE * REDUCTION_PARTIALS_PER_LANE * B40C_RADIXSORT_WARPS); if ((B40C_RADIXSORT_WARPS < REDUCTION_LANES) || (lane_offset < REDUCTION_LANES * REDUCTION_PARTIALS_PER_LANE)) { if (LANE_PARTIALS_PER_THREAD > 0) ReduceLanePartial<0>(local_counts[LANE], scan_lanes, lane_offset); if (LANE_PARTIALS_PER_THREAD > 1) ReduceLanePartial<1>(local_counts[LANE], scan_lanes, lane_offset); if (LANE_PARTIALS_PER_THREAD > 2) ReduceLanePartial<2>(local_counts[LANE], scan_lanes, lane_offset); if (LANE_PARTIALS_PER_THREAD > 3) ReduceLanePartial<3>(local_counts[LANE], scan_lanes, lane_offset); if (LANE_PARTIALS_PER_THREAD > 4) ReduceLanePartial<4>(local_counts[LANE], scan_lanes, lane_offset); if (LANE_PARTIALS_PER_THREAD > 5) ReduceLanePartial<5>(local_counts[LANE], scan_lanes, lane_offset); if (LANE_PARTIALS_PER_THREAD > 6) ReduceLanePartial<6>(local_counts[LANE], scan_lanes, lane_offset); if (LANE_PARTIALS_PER_THREAD > 7) ReduceLanePartial<7>(local_counts[LANE], scan_lanes, lane_offset); } } template < int REDUCTION_LANES, int REDUCTION_LANES_PER_WARP, int LOG_REDUCTION_PARTIALS_PER_LANE, int REDUCTION_PARTIALS_PER_LANE> __device__ __forceinline__ void ReduceEncodedCounts( int local_counts[REDUCTION_LANES_PER_WARP][4], int *scan_lanes, int warp_id, int warp_idx) { const int LANE_PARTIALS_PER_THREAD = REDUCTION_PARTIALS_PER_LANE / B40C_WARP_THREADS; SuppressUnusedConstantWarning(LANE_PARTIALS_PER_THREAD); int lane_offset = (warp_id << LOG_REDUCTION_PARTIALS_PER_LANE) + warp_idx; // my warp's (first-lane) reduction offset if (REDUCTION_LANES_PER_WARP > 0) ReduceLanePartials<0, REDUCTION_LANES, REDUCTION_LANES_PER_WARP, REDUCTION_PARTIALS_PER_LANE, LANE_PARTIALS_PER_THREAD>(local_counts, scan_lanes, lane_offset); if (REDUCTION_LANES_PER_WARP > 1) ReduceLanePartials<1, REDUCTION_LANES, REDUCTION_LANES_PER_WARP, REDUCTION_PARTIALS_PER_LANE, LANE_PARTIALS_PER_THREAD>(local_counts, scan_lanes, lane_offset); if (REDUCTION_LANES_PER_WARP > 2) ReduceLanePartials<2, REDUCTION_LANES, REDUCTION_LANES_PER_WARP, REDUCTION_PARTIALS_PER_LANE, LANE_PARTIALS_PER_THREAD>(local_counts, scan_lanes, lane_offset); if (REDUCTION_LANES_PER_WARP > 3) ReduceLanePartials<3, REDUCTION_LANES, REDUCTION_LANES_PER_WARP, REDUCTION_PARTIALS_PER_LANE, LANE_PARTIALS_PER_THREAD>(local_counts, scan_lanes, lane_offset); } template <typename K, int RADIX_BITS, int REDUCTION_PARTIALS_PER_LANE, int BIT, typename PreprocessFunctor> __device__ __forceinline__ void Bucket( K key, int *encoded_reduction_col, PreprocessFunctor preprocess = PreprocessFunctor()) { preprocess(key); int lane; ExtractKeyBits<K, BIT + 2, RADIX_BITS - 2>::Extract(lane, key); if (B40C_FERMI(__CUDA_ARCH__)) { // GF100+ has special bit-extraction instructions (instead of shift+mask) int quad_byte; if (RADIX_BITS < 2) { ExtractKeyBits<K, BIT, 1>::Extract(quad_byte, key); } else { ExtractKeyBits<K, BIT, 2>::Extract(quad_byte, key); } unsigned char *encoded_col = (unsigned char *) &encoded_reduction_col[FastMul(lane, REDUCTION_PARTIALS_PER_LANE)]; encoded_col[quad_byte]++; } else { // GT200 can save an instruction because it can source an operand // directly from smem const int BYTE_ENCODE_SHIFT = 0x3; const K QUAD_MASK = (RADIX_BITS < 2) ? 0x1 : 0x3; int quad_shift = MagnitudeShift<K, BYTE_ENCODE_SHIFT - BIT>(key & (QUAD_MASK << BIT)); encoded_reduction_col[FastMul(lane, REDUCTION_PARTIALS_PER_LANE)] += (1 << quad_shift); } } template <typename K, CacheModifier CACHE_MODIFIER, int RADIX_BITS, int REDUCTION_PARTIALS_PER_LANE, int BIT, typename PreprocessFunctor, int CYCLES> struct LoadOp; template <typename K, CacheModifier CACHE_MODIFIER, int RADIX_BITS, int REDUCTION_PARTIALS_PER_LANE, int BIT, typename PreprocessFunctor> struct LoadOp<K, CACHE_MODIFIER, RADIX_BITS, REDUCTION_PARTIALS_PER_LANE, BIT, PreprocessFunctor, 1> { static __device__ __forceinline__ void BlockOfLoads(K *d_in_keys, int block_offset, int *encoded_reduction_col) { K key; GlobalLoad<K, CACHE_MODIFIER >::Ld(key, d_in_keys, block_offset); Bucket<K, RADIX_BITS, REDUCTION_PARTIALS_PER_LANE, BIT, PreprocessFunctor>(key, encoded_reduction_col); } }; template <typename K, CacheModifier CACHE_MODIFIER, int RADIX_BITS, int REDUCTION_PARTIALS_PER_LANE, int BIT, typename PreprocessFunctor> struct LoadOp<K, CACHE_MODIFIER, RADIX_BITS, REDUCTION_PARTIALS_PER_LANE, BIT, PreprocessFunctor, 2> { static __device__ __forceinline__ void BlockOfLoads(K *d_in_keys, int block_offset, int *encoded_reduction_col) { LoadOp<K, CACHE_MODIFIER, RADIX_BITS, REDUCTION_PARTIALS_PER_LANE, BIT, PreprocessFunctor, 1>::BlockOfLoads(d_in_keys, block_offset + (B40C_RADIXSORT_THREADS * 0), encoded_reduction_col); LoadOp<K, CACHE_MODIFIER, RADIX_BITS, REDUCTION_PARTIALS_PER_LANE, BIT, PreprocessFunctor, 1>::BlockOfLoads(d_in_keys, block_offset + (B40C_RADIXSORT_THREADS * 1), encoded_reduction_col); } }; template <typename K, CacheModifier CACHE_MODIFIER, int RADIX_BITS, int REDUCTION_PARTIALS_PER_LANE, int BIT, typename PreprocessFunctor> struct LoadOp<K, CACHE_MODIFIER, RADIX_BITS, REDUCTION_PARTIALS_PER_LANE, BIT, PreprocessFunctor, 4> { static __device__ __forceinline__ void BlockOfLoads(K *d_in_keys, int block_offset, int *encoded_reduction_col) { LoadOp<K, CACHE_MODIFIER, RADIX_BITS, REDUCTION_PARTIALS_PER_LANE, BIT, PreprocessFunctor, 2>::BlockOfLoads(d_in_keys, block_offset + (B40C_RADIXSORT_THREADS * 0), encoded_reduction_col); LoadOp<K, CACHE_MODIFIER, RADIX_BITS, REDUCTION_PARTIALS_PER_LANE, BIT, PreprocessFunctor, 2>::BlockOfLoads(d_in_keys, block_offset + (B40C_RADIXSORT_THREADS * 2), encoded_reduction_col); } }; template <typename K, CacheModifier CACHE_MODIFIER, int RADIX_BITS, int REDUCTION_PARTIALS_PER_LANE, int BIT, typename PreprocessFunctor> struct LoadOp<K, CACHE_MODIFIER, RADIX_BITS, REDUCTION_PARTIALS_PER_LANE, BIT, PreprocessFunctor, 8> { static __device__ __forceinline__ void BlockOfLoads(K *d_in_keys, int block_offset, int *encoded_reduction_col) { K keys[8]; GlobalLoad<K, CACHE_MODIFIER >::Ld(keys[0], d_in_keys, block_offset + (B40C_RADIXSORT_THREADS * 0)); GlobalLoad<K, CACHE_MODIFIER >::Ld(keys[1], d_in_keys, block_offset + (B40C_RADIXSORT_THREADS * 1)); GlobalLoad<K, CACHE_MODIFIER >::Ld(keys[2], d_in_keys, block_offset + (B40C_RADIXSORT_THREADS * 2)); GlobalLoad<K, CACHE_MODIFIER >::Ld(keys[3], d_in_keys, block_offset + (B40C_RADIXSORT_THREADS * 3)); if (B40C_FERMI(__CUDA_ARCH__)) __syncthreads(); GlobalLoad<K, CACHE_MODIFIER >::Ld(keys[4], d_in_keys, block_offset + (B40C_RADIXSORT_THREADS * 4)); GlobalLoad<K, CACHE_MODIFIER >::Ld(keys[5], d_in_keys, block_offset + (B40C_RADIXSORT_THREADS * 5)); GlobalLoad<K, CACHE_MODIFIER >::Ld(keys[6], d_in_keys, block_offset + (B40C_RADIXSORT_THREADS * 6)); GlobalLoad<K, CACHE_MODIFIER >::Ld(keys[7], d_in_keys, block_offset + (B40C_RADIXSORT_THREADS * 7)); Bucket<K, RADIX_BITS, REDUCTION_PARTIALS_PER_LANE, BIT, PreprocessFunctor>(keys[0], encoded_reduction_col); Bucket<K, RADIX_BITS, REDUCTION_PARTIALS_PER_LANE, BIT, PreprocessFunctor>(keys[1], encoded_reduction_col); Bucket<K, RADIX_BITS, REDUCTION_PARTIALS_PER_LANE, BIT, PreprocessFunctor>(keys[2], encoded_reduction_col); Bucket<K, RADIX_BITS, REDUCTION_PARTIALS_PER_LANE, BIT, PreprocessFunctor>(keys[3], encoded_reduction_col); Bucket<K, RADIX_BITS, REDUCTION_PARTIALS_PER_LANE, BIT, PreprocessFunctor>(keys[4], encoded_reduction_col); Bucket<K, RADIX_BITS, REDUCTION_PARTIALS_PER_LANE, BIT, PreprocessFunctor>(keys[5], encoded_reduction_col); Bucket<K, RADIX_BITS, REDUCTION_PARTIALS_PER_LANE, BIT, PreprocessFunctor>(keys[6], encoded_reduction_col); Bucket<K, RADIX_BITS, REDUCTION_PARTIALS_PER_LANE, BIT, PreprocessFunctor>(keys[7], encoded_reduction_col); } }; template <typename K, CacheModifier CACHE_MODIFIER, int RADIX_BITS, int REDUCTION_PARTIALS_PER_LANE, int BIT, typename PreprocessFunctor> struct LoadOp<K, CACHE_MODIFIER, RADIX_BITS, REDUCTION_PARTIALS_PER_LANE, BIT, PreprocessFunctor, 16> { static __device__ __forceinline__ void BlockOfLoads(K *d_in_keys, int block_offset, int *encoded_reduction_col) { LoadOp<K, CACHE_MODIFIER, RADIX_BITS, REDUCTION_PARTIALS_PER_LANE, BIT, PreprocessFunctor, 8>::BlockOfLoads(d_in_keys, block_offset + (B40C_RADIXSORT_THREADS * 0), encoded_reduction_col); LoadOp<K, CACHE_MODIFIER, RADIX_BITS, REDUCTION_PARTIALS_PER_LANE, BIT, PreprocessFunctor, 8>::BlockOfLoads(d_in_keys, block_offset + (B40C_RADIXSORT_THREADS * 8), encoded_reduction_col); } }; template <typename K, CacheModifier CACHE_MODIFIER, int RADIX_BITS, int REDUCTION_PARTIALS_PER_LANE, int BIT, typename PreprocessFunctor> struct LoadOp<K, CACHE_MODIFIER, RADIX_BITS, REDUCTION_PARTIALS_PER_LANE, BIT, PreprocessFunctor, 32> { static __device__ __forceinline__ void BlockOfLoads(K *d_in_keys, int block_offset, int *encoded_reduction_col) { LoadOp<K, CACHE_MODIFIER, RADIX_BITS, REDUCTION_PARTIALS_PER_LANE, BIT, PreprocessFunctor, 16>::BlockOfLoads(d_in_keys, block_offset + (B40C_RADIXSORT_THREADS * 0), encoded_reduction_col); LoadOp<K, CACHE_MODIFIER, RADIX_BITS, REDUCTION_PARTIALS_PER_LANE, BIT, PreprocessFunctor, 16>::BlockOfLoads(d_in_keys, block_offset + (B40C_RADIXSORT_THREADS * 16), encoded_reduction_col); } }; template <typename K, CacheModifier CACHE_MODIFIER, int RADIX_BITS, int REDUCTION_PARTIALS_PER_LANE, int BIT, typename PreprocessFunctor> struct LoadOp<K, CACHE_MODIFIER, RADIX_BITS, REDUCTION_PARTIALS_PER_LANE, BIT, PreprocessFunctor, 64> { static __device__ __forceinline__ void BlockOfLoads(K *d_in_keys, int block_offset, int *encoded_reduction_col) { LoadOp<K, CACHE_MODIFIER, RADIX_BITS, REDUCTION_PARTIALS_PER_LANE, BIT, PreprocessFunctor, 32>::BlockOfLoads(d_in_keys, block_offset + (B40C_RADIXSORT_THREADS * 0), encoded_reduction_col); LoadOp<K, CACHE_MODIFIER, RADIX_BITS, REDUCTION_PARTIALS_PER_LANE, BIT, PreprocessFunctor, 32>::BlockOfLoads(d_in_keys, block_offset + (B40C_RADIXSORT_THREADS * 32), encoded_reduction_col); } }; template <typename K, CacheModifier CACHE_MODIFIER, int RADIX_BITS, int REDUCTION_PARTIALS_PER_LANE, int BIT, typename PreprocessFunctor> struct LoadOp<K, CACHE_MODIFIER, RADIX_BITS, REDUCTION_PARTIALS_PER_LANE, BIT, PreprocessFunctor, 128> { static __device__ __forceinline__ void BlockOfLoads(K *d_in_keys, int block_offset, int *encoded_reduction_col) { LoadOp<K, CACHE_MODIFIER, RADIX_BITS, REDUCTION_PARTIALS_PER_LANE, BIT, PreprocessFunctor, 64>::BlockOfLoads(d_in_keys, block_offset + (B40C_RADIXSORT_THREADS * 0), encoded_reduction_col); LoadOp<K, CACHE_MODIFIER, RADIX_BITS, REDUCTION_PARTIALS_PER_LANE, BIT, PreprocessFunctor, 64>::BlockOfLoads(d_in_keys, block_offset + (B40C_RADIXSORT_THREADS * 64), encoded_reduction_col); } }; template <int REDUCTION_LANES> __device__ __forceinline__ void ResetEncodedCarry( int *encoded_reduction_col) { #pragma unroll for (int SCAN_LANE = 0; SCAN_LANE < (int) REDUCTION_LANES; SCAN_LANE++) { encoded_reduction_col[SCAN_LANE * B40C_RADIXSORT_THREADS] = 0; } } template <bool UNROLL, typename K, CacheModifier CACHE_MODIFIER, int BIT, int RADIX_BITS, int REDUCTION_LANES, int REDUCTION_LANES_PER_WARP, int LOG_REDUCTION_PARTIALS_PER_LANE, int REDUCTION_PARTIALS_PER_LANE, typename PreprocessFunctor> struct UnrolledLoads; // Minimal unrolling template <typename K, CacheModifier CACHE_MODIFIER, int BIT, int RADIX_BITS, int REDUCTION_LANES, int REDUCTION_LANES_PER_WARP, int LOG_REDUCTION_PARTIALS_PER_LANE, int REDUCTION_PARTIALS_PER_LANE, typename PreprocessFunctor> struct UnrolledLoads <false, K, CACHE_MODIFIER, BIT, RADIX_BITS, REDUCTION_LANES, REDUCTION_LANES_PER_WARP, LOG_REDUCTION_PARTIALS_PER_LANE, REDUCTION_PARTIALS_PER_LANE, PreprocessFunctor> { __device__ __forceinline__ static void Unroll( K* d_in_keys, int &block_offset, int* encoded_reduction_col, int* scan_lanes, const int& out_of_bounds, int local_counts[REDUCTION_LANES_PER_WARP][4], int warp_id, int warp_idx) { // Unroll batches of loads with occasional reduction to avoid overflow while (block_offset + (B40C_RADIXSORT_THREADS * 32) < out_of_bounds) { LoadOp<K, CACHE_MODIFIER, RADIX_BITS, REDUCTION_PARTIALS_PER_LANE, BIT, PreprocessFunctor, 32>::BlockOfLoads(d_in_keys, block_offset, encoded_reduction_col); block_offset += B40C_RADIXSORT_THREADS * 32; __syncthreads(); // Aggregate back into local_count registers to prevent overflow ReduceEncodedCounts<REDUCTION_LANES, REDUCTION_LANES_PER_WARP, LOG_REDUCTION_PARTIALS_PER_LANE, REDUCTION_PARTIALS_PER_LANE>( local_counts, scan_lanes, warp_id, warp_idx); __syncthreads(); // Reset encoded counters ResetEncodedCarry<REDUCTION_LANES>(encoded_reduction_col); } } }; // Unrolled template <typename K, CacheModifier CACHE_MODIFIER, int BIT, int RADIX_BITS, int REDUCTION_LANES, int REDUCTION_LANES_PER_WARP, int LOG_REDUCTION_PARTIALS_PER_LANE, int REDUCTION_PARTIALS_PER_LANE, typename PreprocessFunctor> struct UnrolledLoads <true, K, CACHE_MODIFIER, BIT, RADIX_BITS, REDUCTION_LANES, REDUCTION_LANES_PER_WARP, LOG_REDUCTION_PARTIALS_PER_LANE, REDUCTION_PARTIALS_PER_LANE, PreprocessFunctor> { __device__ __forceinline__ static void Unroll( K* d_in_keys, int &block_offset, int* encoded_reduction_col, int* scan_lanes, const int& out_of_bounds, int local_counts[REDUCTION_LANES_PER_WARP][4], int warp_id, int warp_idx) { // Unroll batches of loads with occasional reduction to avoid overflow while (block_offset + (B40C_RADIXSORT_THREADS * 128) < out_of_bounds) { LoadOp<K, CACHE_MODIFIER, RADIX_BITS, REDUCTION_PARTIALS_PER_LANE, BIT, PreprocessFunctor, 128>::BlockOfLoads(d_in_keys, block_offset, encoded_reduction_col); block_offset += B40C_RADIXSORT_THREADS * 128; __syncthreads(); // Aggregate back into local_count registers to prevent overflow ReduceEncodedCounts<REDUCTION_LANES, REDUCTION_LANES_PER_WARP, LOG_REDUCTION_PARTIALS_PER_LANE, REDUCTION_PARTIALS_PER_LANE>( local_counts, scan_lanes, warp_id, warp_idx); __syncthreads(); // Reset encoded counters ResetEncodedCarry<REDUCTION_LANES>(encoded_reduction_col); } if (block_offset + (B40C_RADIXSORT_THREADS * 64) < out_of_bounds) { LoadOp<K, CACHE_MODIFIER, RADIX_BITS, REDUCTION_PARTIALS_PER_LANE, BIT, PreprocessFunctor, 64>::BlockOfLoads(d_in_keys, block_offset, encoded_reduction_col); block_offset += B40C_RADIXSORT_THREADS * 64; } if (block_offset + (B40C_RADIXSORT_THREADS * 32) < out_of_bounds) { LoadOp<K, CACHE_MODIFIER, RADIX_BITS, REDUCTION_PARTIALS_PER_LANE, BIT, PreprocessFunctor, 32>::BlockOfLoads(d_in_keys, block_offset, encoded_reduction_col); block_offset += B40C_RADIXSORT_THREADS * 32; } if (block_offset + (B40C_RADIXSORT_THREADS * 16) < out_of_bounds) { LoadOp<K, CACHE_MODIFIER, RADIX_BITS, REDUCTION_PARTIALS_PER_LANE, BIT, PreprocessFunctor, 16>::BlockOfLoads(d_in_keys, block_offset, encoded_reduction_col); block_offset += B40C_RADIXSORT_THREADS * 16; } if (block_offset + (B40C_RADIXSORT_THREADS * 8) < out_of_bounds) { LoadOp<K, CACHE_MODIFIER, RADIX_BITS, REDUCTION_PARTIALS_PER_LANE, BIT, PreprocessFunctor, 8>::BlockOfLoads(d_in_keys, block_offset, encoded_reduction_col); block_offset += B40C_RADIXSORT_THREADS * 8; } if (block_offset + (B40C_RADIXSORT_THREADS * 4) < out_of_bounds) { LoadOp<K, CACHE_MODIFIER, RADIX_BITS, REDUCTION_PARTIALS_PER_LANE, BIT, PreprocessFunctor, 4>::BlockOfLoads(d_in_keys, block_offset, encoded_reduction_col); block_offset += B40C_RADIXSORT_THREADS * 4; } if (block_offset + (B40C_RADIXSORT_THREADS * 2) < out_of_bounds) { LoadOp<K, CACHE_MODIFIER, RADIX_BITS, REDUCTION_PARTIALS_PER_LANE, BIT, PreprocessFunctor, 2>::BlockOfLoads(d_in_keys, block_offset, encoded_reduction_col); block_offset += B40C_RADIXSORT_THREADS * 2; } } }; template < typename K, CacheModifier CACHE_MODIFIER, int BIT, int RADIX_BITS, int RADIX_DIGITS, int REDUCTION_LANES, int LOG_REDUCTION_PARTIALS_PER_LANE, int REDUCTION_PARTIALS_PER_LANE, typename PreprocessFunctor, bool UNROLL> __device__ __forceinline__ void ReductionPass( K* d_in_keys, int* d_spine, int block_offset, int* encoded_reduction_col, int* scan_lanes, const int& out_of_bounds) { const int REDUCTION_LANES_PER_WARP = (REDUCTION_LANES > B40C_RADIXSORT_WARPS) ? REDUCTION_LANES / B40C_RADIXSORT_WARPS : 1; // Always at least one fours group per warp const int PARTIALS_PER_ROW = B40C_WARP_THREADS; const int PADDED_PARTIALS_PER_ROW = PARTIALS_PER_ROW + 1; int warp_id = threadIdx.x >> B40C_LOG_WARP_THREADS; int warp_idx = threadIdx.x & (B40C_WARP_THREADS - 1); block_offset += threadIdx.x; // Each thread is responsible for aggregating an unencoded segment of a fours-group int local_counts[REDUCTION_LANES_PER_WARP][4]; // Initialize local counts #pragma unroll for (int LANE = 0; LANE < (int) REDUCTION_LANES_PER_WARP; LANE++) { local_counts[LANE][0] = 0; local_counts[LANE][1] = 0; local_counts[LANE][2] = 0; local_counts[LANE][3] = 0; } // Reset encoded counters ResetEncodedCarry<REDUCTION_LANES>(encoded_reduction_col); // Process loads in bulk (if applicable) UnrolledLoads<UNROLL, K, CACHE_MODIFIER, BIT, RADIX_BITS, REDUCTION_LANES, REDUCTION_LANES_PER_WARP, LOG_REDUCTION_PARTIALS_PER_LANE, REDUCTION_PARTIALS_PER_LANE, PreprocessFunctor>::Unroll( d_in_keys, block_offset, encoded_reduction_col, scan_lanes, out_of_bounds + threadIdx.x, local_counts, warp_id, warp_idx); // Process (potentially-partial) loads singly while (block_offset < out_of_bounds) { LoadOp<K, CACHE_MODIFIER, RADIX_BITS, REDUCTION_PARTIALS_PER_LANE, BIT, PreprocessFunctor, 1>::BlockOfLoads(d_in_keys, block_offset, encoded_reduction_col); block_offset += B40C_RADIXSORT_THREADS; } __syncthreads(); // Aggregate back into local_count registers ReduceEncodedCounts<REDUCTION_LANES, REDUCTION_LANES_PER_WARP, LOG_REDUCTION_PARTIALS_PER_LANE, REDUCTION_PARTIALS_PER_LANE>( local_counts, scan_lanes, warp_id, warp_idx); __syncthreads(); // // Reduce the local_counts within each reduction lane within each warp // // Place into smem int lane_base = FastMul(warp_id, PADDED_PARTIALS_PER_ROW * B40C_RADIXSORT_WARPS); // my warp's (first) reduction lane #pragma unroll for (int i = 0; i < (int) REDUCTION_LANES_PER_WARP; i++) { scan_lanes[lane_base + warp_idx + (PADDED_PARTIALS_PER_ROW * 0)] = local_counts[i][0]; scan_lanes[lane_base + warp_idx + (PADDED_PARTIALS_PER_ROW * 1)] = local_counts[i][1]; scan_lanes[lane_base + warp_idx + (PADDED_PARTIALS_PER_ROW * 2)] = local_counts[i][2]; scan_lanes[lane_base + warp_idx + (PADDED_PARTIALS_PER_ROW * 3)] = local_counts[i][3]; lane_base += PADDED_PARTIALS_PER_ROW * B40C_RADIXSORT_WARPS; } __syncthreads(); // Rake-reduce and write out the digit_count reductions if (threadIdx.x < RADIX_DIGITS) { int lane_base = FastMul(threadIdx.x, PADDED_PARTIALS_PER_ROW); int digit_count = SerialReduce<PARTIALS_PER_ROW>(scan_lanes + lane_base); int spine_digit_offset = FastMul(gridDim.x, threadIdx.x) + blockIdx.x; d_spine[spine_digit_offset] = digit_count; } } template <typename K, typename V, int PASS, int RADIX_BITS, int BIT, typename PreprocessFunctor> __launch_bounds__ (B40C_RADIXSORT_THREADS, B40C_RADIXSORT_REDUCE_CTA_OCCUPANCY(__CUDA_ARCH__)) __global__ void LsbRakingReductionKernel( int *d_selectors, int *d_spine, K *d_in_keys, K *d_out_keys, CtaDecomposition work_decomposition) { const int RADIX_DIGITS = 1 << RADIX_BITS; const int TILE_ELEMENTS = B40C_RADIXSORT_TILE_ELEMENTS(__CUDA_ARCH__, K, V); const int LOG_REDUCTION_PARTIALS_PER_LANE = B40C_RADIXSORT_LOG_THREADS; const int REDUCTION_PARTIALS_PER_LANE = 1 << LOG_REDUCTION_PARTIALS_PER_LANE; const int LOG_REDUCTION_LANES = (RADIX_BITS >= 2) ? RADIX_BITS - 2 : 0; // Always at least one fours group const int REDUCTION_LANES = 1 << LOG_REDUCTION_LANES; SuppressUnusedConstantWarning(RADIX_DIGITS); // Each thread gets its own column of fours-groups (for conflict-free updates) __shared__ int scan_lanes[REDUCTION_LANES * REDUCTION_PARTIALS_PER_LANE]; int *encoded_reduction_col = &scan_lanes[threadIdx.x]; // first element of column // Determine where to read our input int selector = (PASS == 0) ? 0 : d_selectors[PASS & 0x1]; if (selector) d_in_keys = d_out_keys; // Calculate our threadblock's range int block_offset, block_elements; if (blockIdx.x < work_decomposition.num_big_blocks) { block_offset = work_decomposition.big_block_elements * blockIdx.x; block_elements = work_decomposition.big_block_elements; } else { block_offset = (work_decomposition.normal_block_elements * blockIdx.x) + (work_decomposition.num_big_blocks * TILE_ELEMENTS); block_elements = work_decomposition.normal_block_elements; } int out_of_bounds = block_offset + block_elements; if (blockIdx.x == gridDim.x - 1) { if (work_decomposition.extra_elements_last_block > 0) { out_of_bounds -= TILE_ELEMENTS; } out_of_bounds += work_decomposition.extra_elements_last_block; } // Perform reduction pass ReductionPass<K, NONE, BIT, RADIX_BITS, RADIX_DIGITS, REDUCTION_LANES, LOG_REDUCTION_PARTIALS_PER_LANE, REDUCTION_PARTIALS_PER_LANE, PreprocessFunctor, true>( d_in_keys, d_spine, block_offset, encoded_reduction_col, scan_lanes, out_of_bounds); } } // namespace b40c
the_stack
#include "core/context_cuda.h" #include "contrib/rcnn/bbox_utils.h" namespace dragon { namespace rcnn { /******************** BBox ********************/ template <typename T> __device__ int _BBoxTransform(const T dx, const T dy, const T d_log_w, const T d_log_h, const T im_w, const T im_h, const T min_box_w, const T min_box_h, T* bbox) { const T w = bbox[2] - bbox[0] + (T)1; const T h = bbox[3] - bbox[1] + (T)1; const T ctr_x = bbox[0] + (T)0.5 * w; const T ctr_y = bbox[1] + (T)0.5 * h; const T pred_ctr_x = dx * w + ctr_x; const T pred_ctr_y = dy * h + ctr_y; const T pred_w = exp(d_log_w) * w; const T pred_h = exp(d_log_h) * h; bbox[0] = pred_ctr_x - (T)0.5 * pred_w; bbox[1] = pred_ctr_y - (T)0.5 * pred_h; bbox[2] = pred_ctr_x + (T)0.5 * pred_w; bbox[3] = pred_ctr_y + (T)0.5 * pred_h; bbox[0] = max((T)0, min(bbox[0], im_w - (T)1)); bbox[1] = max((T)0, min(bbox[1], im_h - (T)1)); bbox[2] = max((T)0, min(bbox[2], im_w - (T)1)); bbox[3] = max((T)0, min(bbox[3], im_h - (T)1)); const T box_w = bbox[2] - bbox[0] + (T)1; const T box_h = bbox[3] - bbox[1] + (T)1; return (box_w >= min_box_w) * (box_h >= min_box_h); } /******************** Proposal ********************/ template <typename T> __global__ void _GenerateProposals(const int nthreads, const int A, const int feat_h, const int feat_w, const int stride, const float im_h, const float im_w, const float min_box_h, const float min_box_w, const T* scores, const T* bbox_deltas, const T* anchors, T* proposals) { CUDA_KERNEL_LOOP(idx, nthreads) { const int h = idx / A / feat_w; const int w = (idx / A) % feat_w; const int a = idx % A; const T x = w * stride; const T y = h * stride; const T* bbox_delta = bbox_deltas + h * feat_w + w; const T* score = scores + h * feat_w + w; const int K = feat_h * feat_w; const T dx = bbox_delta[(a * 4 + 0) * K]; const T dy = bbox_delta[(a * 4 + 1) * K]; const T d_log_w = bbox_delta[(a * 4 + 2) * K]; const T d_log_h = bbox_delta[(a * 4 + 3) * K]; T* proposal = proposals + idx * 5; proposal[0] = x + anchors[a * 4 + 0]; proposal[1] = y + anchors[a * 4 + 1]; proposal[2] = x + anchors[a * 4 + 2]; proposal[3] = y + anchors[a * 4 + 3]; proposal[4] = _BBoxTransform(dx, dy, d_log_w, d_log_h, im_w, im_h, min_box_w, min_box_h, proposal) * score[a * K]; } } template <> void GenerateProposals<float, CUDAContext>(const int A, const int feat_h, const int feat_w, const int stride, const float im_h, const float im_w, const float min_box_h, const float min_box_w, const float* scores, const float* bbox_deltas, const float* anchors, float* proposals) { const int num_proposals = A * feat_h * feat_w; _GenerateProposals<float> << <GET_BLOCKS(num_proposals), CUDA_NUM_THREADS >> >(num_proposals, A, feat_h, feat_w, stride, im_h, im_w, min_box_h, min_box_w, scores, bbox_deltas, anchors, proposals); CUDA_POST_KERNEL_CHECK; } template <typename T> __global__ void _GenerateProposals_v2(const int nthreads, const float im_h, const float im_w, const float min_box_h, const float min_box_w, const T* scores, const T* bbox_deltas, T* proposals) { CUDA_KERNEL_LOOP(idx, nthreads) { const float dx = bbox_deltas[idx]; const float dy = bbox_deltas[nthreads + idx]; const float d_log_w = bbox_deltas[2 * nthreads + idx]; const float d_log_h = bbox_deltas[3 * nthreads + idx]; T* proposal = proposals + idx * 5; proposal[4] = _BBoxTransform(dx, dy, d_log_w, d_log_h, im_w, im_h, min_box_w, min_box_h, proposal) * scores[idx]; } } template <> void GenerateProposals_v2<float, CUDAContext>(const int total_anchors, const float im_h, const float im_w, const float min_box_h, const float min_box_w, const float* scores, const float* bbox_deltas, float* proposals) { _GenerateProposals_v2<float> << <GET_BLOCKS(total_anchors), CUDA_NUM_THREADS >> >(total_anchors, im_h, im_w, min_box_h, min_box_w, scores, bbox_deltas, proposals); CUDA_POST_KERNEL_CHECK; } /******************** NMS ********************/ #define DIV_THEN_CEIL(x, y) (((x) + (y) - 1) / (y)) #define NMS_BLOCK_SIZE 64 template <typename T> __device__ T iou(const T* A, const T* B) { const T x1 = max(A[0], B[0]); const T y1 = max(A[1], B[1]); const T x2 = min(A[2], B[2]); const T y2 = min(A[3], B[3]); const T width = max((T)0, x2 - x1 + (T)1); const T height = max((T)0, y2 - y1 + (T)1); const T area = width * height; const T A_area = (A[2] - A[0] + (T)1) * (A[3] - A[1] + (T)1); const T B_area = (B[2] - B[0] + (T)1) * (B[3] - B[1] + (T)1); return area / (A_area + B_area - area); } template <typename T> __global__ static void nms_mask(const T boxes[], unsigned long long mask[], const int num_boxes, const T nms_thresh) { const int i_start = blockIdx.x * NMS_BLOCK_SIZE; const int di_end = min(num_boxes - i_start, NMS_BLOCK_SIZE); const int j_start = blockIdx.y * NMS_BLOCK_SIZE; const int dj_end = min(num_boxes - j_start, NMS_BLOCK_SIZE); __shared__ T boxes_i[NMS_BLOCK_SIZE * 4]; { const int di = threadIdx.x; if (di < di_end) { boxes_i[di * 4 + 0] = boxes[(i_start + di) * 5 + 0]; boxes_i[di * 4 + 1] = boxes[(i_start + di) * 5 + 1]; boxes_i[di * 4 + 2] = boxes[(i_start + di) * 5 + 2]; boxes_i[di * 4 + 3] = boxes[(i_start + di) * 5 + 3]; } } __syncthreads(); { const int dj = threadIdx.x; if (dj < dj_end) { const T* const box_j = boxes + (j_start + dj) * 5; unsigned long long mask_j = 0; const int di_start = (i_start == j_start) ? (dj + 1) : 0; for (int di = di_start; di < di_end; ++di) { const T* const box_i = boxes_i + di * 4; if (iou(box_j, box_i) > nms_thresh) { mask_j |= 1ULL << di; } } { const int num_blocks = DIV_THEN_CEIL(num_boxes, NMS_BLOCK_SIZE); const int bi = blockIdx.x; mask[(j_start + dj) * num_blocks + bi] = mask_j; } } } } template <typename T> void _NMS(const int num_boxes, const int max_keeps, const float thresh, const float* proposals, int* roi_indices, int& num_rois, Tensor* mask) { const int num_blocks = DIV_THEN_CEIL(num_boxes, NMS_BLOCK_SIZE); { const dim3 blocks(num_blocks, num_blocks); vector<TIndex> mask_shape(2); mask_shape[0] = num_boxes; mask_shape[1] = num_blocks * sizeof(unsigned long long) / sizeof(int); mask->Reshape(mask_shape); nms_mask << <blocks, NMS_BLOCK_SIZE >> >( proposals, (unsigned long long*)mask->template mutable_data<int, CUDAContext>(), num_boxes, thresh); CUDA_POST_KERNEL_CHECK; } // discard i-th box if it is significantly overlapped with // one or more previous (= scored higher) boxes { const unsigned long long* p_mask_cpu = (unsigned long long*)mask->mutable_data<int, CPUContext>(); int num_selected = 0; vector<unsigned long long> dead_bit(num_blocks); for (int i = 0; i < num_blocks; ++i) { dead_bit[i] = 0; } for (int i = 0; i < num_boxes; ++i) { const int nblock = i / NMS_BLOCK_SIZE; const int inblock = i % NMS_BLOCK_SIZE; if (!(dead_bit[nblock] & (1ULL << inblock))) { roi_indices[num_selected++] = i; const unsigned long long* const mask_i = p_mask_cpu + i * num_blocks; for (int j = nblock; j < num_blocks; ++j) { dead_bit[j] |= mask_i[j]; } if (num_selected == max_keeps) { break; } } } num_rois = num_selected; } } template <> void NMS<float, CUDAContext>(const int num_boxes, const int max_keeps, const float thresh, const float* proposals, int* roi_indices, int& num_rois, Tensor* mask) { _NMS<float>(num_boxes, max_keeps, thresh, proposals, roi_indices, num_rois, mask); } } // namespace rcnn } // namespace dragon #endif // WITH_CUDA
the_stack
#include <ATen/cuda/CUDAApplyUtils.cuh> #define CHECK_CUDA(x) \ TORCH_CHECK(x.device().is_cuda(), #x " must be a CUDA tensor") #define CHECK_CONTIGUOUS(x) \ TORCH_CHECK(x.is_contiguous(), #x " must be contiguous") #define CHECK_INPUT(x) \ CHECK_CUDA(x); \ CHECK_CONTIGUOUS(x) namespace { int const threadsPerBlock = sizeof(unsigned long long) * 8; } #define CUDA_1D_KERNEL_LOOP(i, n) \ for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < n; \ i += blockDim.x * gridDim.x) template <typename T, typename T_int> __global__ void dynamic_voxelize_kernel( const T* points, T_int* coors, const float voxel_x, const float voxel_y, const float voxel_z, const float coors_x_min, const float coors_y_min, const float coors_z_min, const float coors_x_max, const float coors_y_max, const float coors_z_max, const int grid_x, const int grid_y, const int grid_z, const int num_points, const int num_features, const int NDim) { // const int index = blockIdx.x * threadsPerBlock + threadIdx.x; CUDA_1D_KERNEL_LOOP(index, num_points) { // To save some computation auto points_offset = points + index * num_features; auto coors_offset = coors + index * NDim; int c_x = floor((points_offset[0] - coors_x_min) / voxel_x); if (c_x < 0 || c_x >= grid_x) { coors_offset[0] = -1; return; } int c_y = floor((points_offset[1] - coors_y_min) / voxel_y); if (c_y < 0 || c_y >= grid_y) { coors_offset[0] = -1; coors_offset[1] = -1; return; } int c_z = floor((points_offset[2] - coors_z_min) / voxel_z); if (c_z < 0 || c_z >= grid_z) { coors_offset[0] = -1; coors_offset[1] = -1; coors_offset[2] = -1; } else { coors_offset[0] = c_z; coors_offset[1] = c_y; coors_offset[2] = c_x; } } } template <typename T, typename T_int> __global__ void assign_point_to_voxel(const int nthreads, const T* points, T_int* point_to_voxelidx, T_int* coor_to_voxelidx, T* voxels, const int max_points, const int num_features, const int num_points, const int NDim) { CUDA_1D_KERNEL_LOOP(thread_idx, nthreads) { // const int index = blockIdx.x * threadsPerBlock + threadIdx.x; int index = thread_idx / num_features; int num = point_to_voxelidx[index]; int voxelidx = coor_to_voxelidx[index]; if (num > -1 && voxelidx > -1) { auto voxels_offset = voxels + voxelidx * max_points * num_features + num * num_features; int k = thread_idx % num_features; voxels_offset[k] = points[thread_idx]; } } } template <typename T, typename T_int> __global__ void assign_voxel_coors(const int nthreads, T_int* coor, T_int* point_to_voxelidx, T_int* coor_to_voxelidx, T_int* voxel_coors, const int num_points, const int NDim) { CUDA_1D_KERNEL_LOOP(thread_idx, nthreads) { // const int index = blockIdx.x * threadsPerBlock + threadIdx.x; // if (index >= num_points) return; int index = thread_idx / NDim; int num = point_to_voxelidx[index]; int voxelidx = coor_to_voxelidx[index]; if (num == 0 && voxelidx > -1) { auto coors_offset = voxel_coors + voxelidx * NDim; int k = thread_idx % NDim; coors_offset[k] = coor[thread_idx]; } } } template <typename T_int> __global__ void point_to_voxelidx_kernel(const T_int* coor, T_int* point_to_voxelidx, T_int* point_to_pointidx, const int max_points, const int max_voxels, const int num_points, const int NDim) { CUDA_1D_KERNEL_LOOP(index, num_points) { auto coor_offset = coor + index * NDim; // skip invalid points if ((index >= num_points) || (coor_offset[0] == -1)) return; int num = 0; int coor_x = coor_offset[0]; int coor_y = coor_offset[1]; int coor_z = coor_offset[2]; // only calculate the coors before this coor[index] for (int i = 0; i < index; ++i) { auto prev_coor = coor + i * NDim; if (prev_coor[0] == -1) continue; // Find all previous points that have the same coors // if find the same coor, record it if ((prev_coor[0] == coor_x) && (prev_coor[1] == coor_y) && (prev_coor[2] == coor_z)) { num++; if (num == 1) { // point to the same coor that first show up point_to_pointidx[index] = i; } else if (num >= max_points) { // out of boundary return; } } } if (num == 0) { point_to_pointidx[index] = index; } if (num < max_points) { point_to_voxelidx[index] = num; } } } template <typename T_int> __global__ void determin_voxel_num( // const T_int* coor, T_int* num_points_per_voxel, T_int* point_to_voxelidx, T_int* point_to_pointidx, T_int* coor_to_voxelidx, T_int* voxel_num, const int max_points, const int max_voxels, const int num_points) { // only calculate the coors before this coor[index] for (int i = 0; i < num_points; ++i) { // if (coor[i][0] == -1) // continue; int point_pos_in_voxel = point_to_voxelidx[i]; // record voxel if (point_pos_in_voxel == -1) { // out of max_points or invalid point continue; } else if (point_pos_in_voxel == 0) { // record new voxel int voxelidx = voxel_num[0]; if (voxel_num[0] >= max_voxels) continue; voxel_num[0] += 1; coor_to_voxelidx[i] = voxelidx; num_points_per_voxel[voxelidx] = 1; } else { int point_idx = point_to_pointidx[i]; int voxelidx = coor_to_voxelidx[point_idx]; if (voxelidx != -1) { coor_to_voxelidx[i] = voxelidx; num_points_per_voxel[voxelidx] += 1; } } } } __global__ void nondisterministic_get_assign_pos( const int nthreads, const int32_t *coors_map, int32_t *pts_id, int32_t *coors_count, int32_t *reduce_count, int32_t *coors_order) { CUDA_1D_KERNEL_LOOP(thread_idx, nthreads) { int coors_idx = coors_map[thread_idx]; if (coors_idx > -1) { int32_t coors_pts_pos = atomicAdd(&reduce_count[coors_idx], 1); pts_id[thread_idx] = coors_pts_pos; if (coors_pts_pos == 0) { coors_order[coors_idx] = atomicAdd(coors_count, 1); } } } } template<typename T> __global__ void nondisterministic_assign_point_voxel( const int nthreads, const T *points, const int32_t *coors_map, const int32_t *pts_id, const int32_t *coors_in, const int32_t *reduce_count, const int32_t *coors_order, T *voxels, int32_t *coors, int32_t *pts_count, const int max_voxels, const int max_points, const int num_features, const int NDim) { CUDA_1D_KERNEL_LOOP(thread_idx, nthreads) { int coors_idx = coors_map[thread_idx]; int coors_pts_pos = pts_id[thread_idx]; if (coors_idx > -1) { int coors_pos = coors_order[coors_idx]; if (coors_pos < max_voxels && coors_pts_pos < max_points) { auto voxels_offset = voxels + (coors_pos * max_points + coors_pts_pos) * num_features; auto points_offset = points + thread_idx * num_features; for (int k = 0; k < num_features; k++) { voxels_offset[k] = points_offset[k]; } if (coors_pts_pos == 0) { pts_count[coors_pos] = min(reduce_count[coors_idx], max_points); auto coors_offset = coors + coors_pos * NDim; auto coors_in_offset = coors_in + coors_idx * NDim; for (int k = 0; k < NDim; k++) { coors_offset[k] = coors_in_offset[k]; } } } } } } namespace voxelization { int hard_voxelize_gpu(const at::Tensor& points, at::Tensor& voxels, at::Tensor& coors, at::Tensor& num_points_per_voxel, const std::vector<float> voxel_size, const std::vector<float> coors_range, const int max_points, const int max_voxels, const int NDim = 3) { // current version tooks about 0.04s for one frame on cpu // check device CHECK_INPUT(points); at::cuda::CUDAGuard device_guard(points.device()); const int num_points = points.size(0); const int num_features = points.size(1); const float voxel_x = voxel_size[0]; const float voxel_y = voxel_size[1]; const float voxel_z = voxel_size[2]; const float coors_x_min = coors_range[0]; const float coors_y_min = coors_range[1]; const float coors_z_min = coors_range[2]; const float coors_x_max = coors_range[3]; const float coors_y_max = coors_range[4]; const float coors_z_max = coors_range[5]; const int grid_x = round((coors_x_max - coors_x_min) / voxel_x); const int grid_y = round((coors_y_max - coors_y_min) / voxel_y); const int grid_z = round((coors_z_max - coors_z_min) / voxel_z); // map points to voxel coors at::Tensor temp_coors = at::zeros({num_points, NDim}, points.options().dtype(at::kInt)); dim3 grid(std::min(at::cuda::ATenCeilDiv(num_points, 512), 4096)); dim3 block(512); // 1. link point to corresponding voxel coors AT_DISPATCH_ALL_TYPES( points.scalar_type(), "hard_voxelize_kernel", ([&] { dynamic_voxelize_kernel<scalar_t, int> <<<grid, block, 0, at::cuda::getCurrentCUDAStream()>>>( points.contiguous().data_ptr<scalar_t>(), temp_coors.contiguous().data_ptr<int>(), voxel_x, voxel_y, voxel_z, coors_x_min, coors_y_min, coors_z_min, coors_x_max, coors_y_max, coors_z_max, grid_x, grid_y, grid_z, num_points, num_features, NDim); })); cudaDeviceSynchronize(); AT_CUDA_CHECK(cudaGetLastError()); // 2. map point to the idx of the corresponding voxel, find duplicate coor // create some temporary variables auto point_to_pointidx = -at::ones( { num_points, }, points.options().dtype(at::kInt)); auto point_to_voxelidx = -at::ones( { num_points, }, points.options().dtype(at::kInt)); dim3 map_grid(std::min(at::cuda::ATenCeilDiv(num_points, 512), 4096)); dim3 map_block(512); AT_DISPATCH_ALL_TYPES( temp_coors.scalar_type(), "determin_duplicate", ([&] { point_to_voxelidx_kernel<int> <<<map_grid, map_block, 0, at::cuda::getCurrentCUDAStream()>>>( temp_coors.contiguous().data_ptr<int>(), point_to_voxelidx.contiguous().data_ptr<int>(), point_to_pointidx.contiguous().data_ptr<int>(), max_points, max_voxels, num_points, NDim); })); cudaDeviceSynchronize(); AT_CUDA_CHECK(cudaGetLastError()); // 3. determin voxel num and voxel's coor index // make the logic in the CUDA device could accelerate about 10 times auto coor_to_voxelidx = -at::ones( { num_points, }, points.options().dtype(at::kInt)); auto voxel_num = at::zeros( { 1, }, points.options().dtype(at::kInt)); // must be zero from the begining AT_DISPATCH_ALL_TYPES( temp_coors.scalar_type(), "determin_duplicate", ([&] { determin_voxel_num<int><<<1, 1, 0, at::cuda::getCurrentCUDAStream()>>>( num_points_per_voxel.contiguous().data_ptr<int>(), point_to_voxelidx.contiguous().data_ptr<int>(), point_to_pointidx.contiguous().data_ptr<int>(), coor_to_voxelidx.contiguous().data_ptr<int>(), voxel_num.contiguous().data_ptr<int>(), max_points, max_voxels, num_points); })); cudaDeviceSynchronize(); AT_CUDA_CHECK(cudaGetLastError()); // 4. copy point features to voxels // Step 4 & 5 could be parallel auto pts_output_size = num_points * num_features; dim3 cp_grid(std::min(at::cuda::ATenCeilDiv(pts_output_size, 512), 4096)); dim3 cp_block(512); AT_DISPATCH_ALL_TYPES( points.scalar_type(), "assign_point_to_voxel", ([&] { assign_point_to_voxel<float, int> <<<cp_grid, cp_block, 0, at::cuda::getCurrentCUDAStream()>>>( pts_output_size, points.contiguous().data_ptr<float>(), point_to_voxelidx.contiguous().data_ptr<int>(), coor_to_voxelidx.contiguous().data_ptr<int>(), voxels.contiguous().data_ptr<float>(), max_points, num_features, num_points, NDim); })); // cudaDeviceSynchronize(); // AT_CUDA_CHECK(cudaGetLastError()); // 5. copy coors of each voxels auto coors_output_size = num_points * NDim; dim3 coors_cp_grid( std::min(at::cuda::ATenCeilDiv(coors_output_size, 512), 4096)); dim3 coors_cp_block(512); AT_DISPATCH_ALL_TYPES( points.scalar_type(), "assign_point_to_voxel", ([&] { assign_voxel_coors<float, int><<<coors_cp_grid, coors_cp_block, 0, at::cuda::getCurrentCUDAStream()>>>( coors_output_size, temp_coors.contiguous().data_ptr<int>(), point_to_voxelidx.contiguous().data_ptr<int>(), coor_to_voxelidx.contiguous().data_ptr<int>(), coors.contiguous().data_ptr<int>(), num_points, NDim); })); cudaDeviceSynchronize(); AT_CUDA_CHECK(cudaGetLastError()); auto voxel_num_cpu = voxel_num.to(at::kCPU); int voxel_num_int = voxel_num_cpu.data_ptr<int>()[0]; return voxel_num_int; } int nondisterministic_hard_voxelize_gpu( const at::Tensor &points, at::Tensor &voxels, at::Tensor &coors, at::Tensor &num_points_per_voxel, const std::vector<float> voxel_size, const std::vector<float> coors_range, const int max_points, const int max_voxels, const int NDim = 3) { CHECK_INPUT(points); at::cuda::CUDAGuard device_guard(points.device()); const int num_points = points.size(0); const int num_features = points.size(1); if (num_points == 0) return 0; const float voxel_x = voxel_size[0]; const float voxel_y = voxel_size[1]; const float voxel_z = voxel_size[2]; const float coors_x_min = coors_range[0]; const float coors_y_min = coors_range[1]; const float coors_z_min = coors_range[2]; const float coors_x_max = coors_range[3]; const float coors_y_max = coors_range[4]; const float coors_z_max = coors_range[5]; const int grid_x = round((coors_x_max - coors_x_min) / voxel_x); const int grid_y = round((coors_y_max - coors_y_min) / voxel_y); const int grid_z = round((coors_z_max - coors_z_min) / voxel_z); // map points to voxel coors at::Tensor temp_coors = at::zeros({num_points, NDim}, points.options().dtype(torch::kInt32)); dim3 grid(std::min(at::cuda::ATenCeilDiv(num_points, 512), 4096)); dim3 block(512); // 1. link point to corresponding voxel coors AT_DISPATCH_ALL_TYPES( points.scalar_type(), "hard_voxelize_kernel", ([&] { dynamic_voxelize_kernel<scalar_t, int> <<<grid, block, 0, at::cuda::getCurrentCUDAStream()>>>( points.contiguous().data_ptr<scalar_t>(), temp_coors.contiguous().data_ptr<int>(), voxel_x, voxel_y, voxel_z, coors_x_min, coors_y_min, coors_z_min, coors_x_max, coors_y_max, coors_z_max, grid_x, grid_y, grid_z, num_points, num_features, NDim); })); at::Tensor coors_map; at::Tensor coors_count; at::Tensor coors_order; at::Tensor reduce_count; at::Tensor pts_id; auto coors_clean = temp_coors.masked_fill(temp_coors.lt(0).any(-1, true), -1); std::tie(temp_coors, coors_map, reduce_count) = at::unique_dim(coors_clean, 0, true, true, false); if (temp_coors.index({0, 0}).lt(0).item<bool>()) { // the first element of temp_coors is (-1,-1,-1) and should be removed temp_coors = temp_coors.slice(0, 1); coors_map = coors_map - 1; } int num_coors = temp_coors.size(0); temp_coors = temp_coors.to(torch::kInt32); coors_map = coors_map.to(torch::kInt32); coors_count = coors_map.new_zeros(1); coors_order = coors_map.new_empty(num_coors); reduce_count = coors_map.new_zeros(num_coors); pts_id = coors_map.new_zeros(num_points); dim3 cp_grid(std::min(at::cuda::ATenCeilDiv(num_points, 512), 4096)); dim3 cp_block(512); AT_DISPATCH_ALL_TYPES(points.scalar_type(), "get_assign_pos", ([&] { nondisterministic_get_assign_pos<<<cp_grid, cp_block, 0, at::cuda::getCurrentCUDAStream()>>>( num_points, coors_map.contiguous().data_ptr<int32_t>(), pts_id.contiguous().data_ptr<int32_t>(), coors_count.contiguous().data_ptr<int32_t>(), reduce_count.contiguous().data_ptr<int32_t>(), coors_order.contiguous().data_ptr<int32_t>()); })); AT_DISPATCH_ALL_TYPES( points.scalar_type(), "assign_point_to_voxel", ([&] { nondisterministic_assign_point_voxel<scalar_t> <<<cp_grid, cp_block, 0, at::cuda::getCurrentCUDAStream()>>>( num_points, points.contiguous().data_ptr<scalar_t>(), coors_map.contiguous().data_ptr<int32_t>(), pts_id.contiguous().data_ptr<int32_t>(), temp_coors.contiguous().data_ptr<int32_t>(), reduce_count.contiguous().data_ptr<int32_t>(), coors_order.contiguous().data_ptr<int32_t>(), voxels.contiguous().data_ptr<scalar_t>(), coors.contiguous().data_ptr<int32_t>(), num_points_per_voxel.contiguous().data_ptr<int32_t>(), max_voxels, max_points, num_features, NDim); })); AT_CUDA_CHECK(cudaGetLastError()); return max_voxels < num_coors ? max_voxels : num_coors; } void dynamic_voxelize_gpu(const at::Tensor& points, at::Tensor& coors, const std::vector<float> voxel_size, const std::vector<float> coors_range, const int NDim = 3) { // current version tooks about 0.04s for one frame on cpu // check device CHECK_INPUT(points); at::cuda::CUDAGuard device_guard(points.device()); const int num_points = points.size(0); const int num_features = points.size(1); const float voxel_x = voxel_size[0]; const float voxel_y = voxel_size[1]; const float voxel_z = voxel_size[2]; const float coors_x_min = coors_range[0]; const float coors_y_min = coors_range[1]; const float coors_z_min = coors_range[2]; const float coors_x_max = coors_range[3]; const float coors_y_max = coors_range[4]; const float coors_z_max = coors_range[5]; const int grid_x = round((coors_x_max - coors_x_min) / voxel_x); const int grid_y = round((coors_y_max - coors_y_min) / voxel_y); const int grid_z = round((coors_z_max - coors_z_min) / voxel_z); const int col_blocks = at::cuda::ATenCeilDiv(num_points, threadsPerBlock); dim3 blocks(col_blocks); dim3 threads(threadsPerBlock); cudaStream_t stream = at::cuda::getCurrentCUDAStream(); AT_DISPATCH_ALL_TYPES(points.scalar_type(), "dynamic_voxelize_kernel", [&] { dynamic_voxelize_kernel<scalar_t, int><<<blocks, threads, 0, stream>>>( points.contiguous().data_ptr<scalar_t>(), coors.contiguous().data_ptr<int>(), voxel_x, voxel_y, voxel_z, coors_x_min, coors_y_min, coors_z_min, coors_x_max, coors_y_max, coors_z_max, grid_x, grid_y, grid_z, num_points, num_features, NDim); }); cudaDeviceSynchronize(); AT_CUDA_CHECK(cudaGetLastError()); return; } } // namespace voxelization
the_stack
#include <cassert> // CUDA #include "../include/cuda_basic.h" #include "../include/helper_math.h" #include <thrust/copy.h> #include <thrust/execution_policy.h> #include <thrust/reduce.h> #include <thrust/scan.h> namespace pbf { namespace impl_ { // ParticleSystemGpu // // CellGridGpu (equivalent to SpatialHash on CPU) // - Need to config the grid size and cell size before usage. Once // configured, they are not allowed to be modified. Not adaptive. // - active cell: a cell that contains at least one particle. // // Example: a particle system of 8 particles, a cell grid of 5 cells. // We will illustrate the necessary arrays for updating the cell grid. // // cell index // | 0 | 1 | 2 | 3 | 4 | // // ptc_to_cell // - size: #particles // - a map between each particle index to its cell // (NOT the active cell) index // // cell_num_ptcs // 3 0 1 4 0 // - size: #cells // - number of particles in each cell, including inactive(empty) ones // - sum of this array is the total number of particles // // cell_is_active_flags // 1 0 1 1 0 // - size: #cells // - sum of this array is the number of active cells // // cell_to_active_cell_indices // 0 1 1 2 3 // - size: #cells // - a prefix scan of |cell_is_active_flags| // // active_cell_num_ptcs // 3 1 4 // - size: #active cells // - a compact of |cell_num_ptcs| accoording to |cell_to_active_cell_indices| // - sum of this array is the total number of particles // // ptc_begins_in_active_cell // 0 3 4 // - size: #active cells // - beginning index of the particle in each (active) cell in // |cell_ptc_indices| // - a prefix scan of |active_cell_num_ptcs| // // cell_ptc_indices // - size: #ptcs // - each slot stores a particle index in the particle system. these particle // indices are arranged in a way that particles within the same grid cell // are continuously stored inside |cell_ptc_indices|. // // ptc_offsets_within_cell // - size: #particles // - for any given particle index, |p_i|, we can get its position, |pos_i|, // and its cell, |cell_i|. Then: // // the active cell index of |cell_i| in which |ptc_i| lives // ac_idx = cell_to_active_cell_indices[cell_i]; // // the beginning index of the particles within |cell_i| // // in |cell_ptc_indices| // ptc_begin_idx = ptc_begins_in_active_cell[ac_idx]; // p_i' = cell_ptc_indices[ptc_begin_idx + ptc_offset_within_cell[p_i]]; // assert(p_i == p_i'); // // Find neighbors for each particle: // ptc_num_neighbors // - size: #particles // - stores the number of neighbor particles for each particle // // ptc_neighbor_begins // - size: #particles // - ~[p_i] stores the beginning index of particle |p_i|'s neighbors // in |ptc_neighbor_indices| // // ptc_neighbor_indices // - size: sum of |ptc_num_neighbors| int ComputeNumBlocks(int num) { return ((num + kNumThreadPerBlock - 1) / kNumThreadPerBlock); } __device__ int3 GetCell(float3 pos, float cell_sz) { const float cell_sz_recpr = 1.0f / cell_sz; int cx = (int)(pos.x * cell_sz_recpr); int cy = (int)(pos.y * cell_sz_recpr); int cz = (int)(pos.z * cell_sz_recpr); return make_int3(cx, cy, cz); } __device__ int GetCellIndex(int3 cell, int3 num_cells_dim) { int result = cell.y * num_cells_dim.z; result = (result + cell.z) * num_cells_dim.x; result += cell.x; return result; } __device__ bool IsCellInRange(int3 cell, int3 num_cells_dim) { return ((0 <= cell.x && cell.x < num_cells_dim.x) && (0 <= cell.y && cell.y < num_cells_dim.y) && (0 <= cell.z && cell.z < num_cells_dim.z)); } __device__ float DistanceSquare(float3 a, float3 b) { float x = a.x - b.x; float y = a.y - b.y; float z = a.z - b.z; float result = x * x + y * y + z * z; return result; } __device__ bool IsInside(const float3 &pt, const float3 &min, const float3 &max) { bool cond = (min.x <= pt.x) && (pt.x <= max.x) && (min.y <= pt.y) && (pt.y <= max.y) && (min.z <= pt.z) && (pt.z <= max.z); return cond; } ///// // CellGrid ///// // - compute |ptc_to_cell| // - count |cell_num_ptcs| // - set the offset of each partilce in |ptc_offset_within_cell|. __global__ void CellGridEntryPointKernel(const float3 *positions, const int num_ptcs, const float cell_sz, const int3 num_cells_dim, int *ptc_to_cell, int *cell_num_ptcs, int *ptc_offsets_within_cell) { const int ptc_i = (blockIdx.x * blockDim.x) + threadIdx.x; if (ptc_i >= num_ptcs) return; int3 ptc_cell = GetCell(positions[ptc_i], cell_sz); int cell_index = GetCellIndex(ptc_cell, num_cells_dim); ptc_to_cell[ptc_i] = cell_index; // Count the number of particles in |ptc_cell|. The returned // value is also used as this particle's unique offset. int offs = atomicAdd(&cell_num_ptcs[cell_index], 1); (void)offs; ptc_offsets_within_cell[ptc_i] = offs; } // set |cell_is_active_flags| __global__ void SetCellIsActiveFlagsKernel(const int *cell_num_ptcs, const int num_cells, int *cell_is_active_flags) { const int cell_i = (blockIdx.x * blockDim.x) + threadIdx.x; if (cell_i >= num_cells) return; cell_is_active_flags[cell_i] = (cell_num_ptcs[cell_i] > 0); } // compute |cell_to_active_cell_indices| void ComputeCellToActiveCellIndices( const d_vector<int> &cell_is_active_flags, d_vector<int> *cell_to_active_cell_indices) { assert(cell_is_active_flags.size() == cell_to_active_cell_indices->size()); thrust::exclusive_scan(thrust::device, cell_is_active_flags.begin(), cell_is_active_flags.end(), cell_to_active_cell_indices->begin(), 0); } __global__ void CompactKernel(const int *input, const int *flag, const int *compact_indices, const int size, int *output) { const int idx = (blockIdx.x * blockDim.x) + threadIdx.x; if (idx >= size) return; if (flag[idx] != 0) { const int compact_idx = compact_indices[idx]; output[compact_idx] = input[idx]; } } // compact |cell_num_ptcs| to get |active_cell_num_ptcs| void ComputeActiveCellNumPtcs(const d_vector<int> &cell_num_ptcs, const d_vector<int> &cell_is_active_flags, const d_vector<int> &cell_to_active_cell_indices, d_vector<int> *active_cell_num_ptcs) { const int size = cell_is_active_flags.size(); const int num_blocks = ComputeNumBlocks(size); const int *input = thrust::raw_pointer_cast(cell_num_ptcs.data()); const int *flags = thrust::raw_pointer_cast(cell_is_active_flags.data()); const int *compact_indices = thrust::raw_pointer_cast(cell_to_active_cell_indices.data()); int *output = thrust::raw_pointer_cast(active_cell_num_ptcs->data()); CompactKernel<<<num_blocks, kNumThreadPerBlock>>>( input, flags, compact_indices, size, output); cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError()); } // compute |ptc_begins_in_active_cell| void ComputePtcBeginsInActiveCell(const d_vector<int> &active_cell_num_ptcs, d_vector<int> *ptc_begins_in_active_cell) { assert(active_cell_num_ptcs.size() == ptc_begins_in_active_cell->size()); thrust::exclusive_scan(thrust::device, active_cell_num_ptcs.begin(), active_cell_num_ptcs.end(), ptc_begins_in_active_cell->begin(), 0); } // compute |cell_ptc_indices| __global__ void ComputeCellPtcIndicesKernel( const int *ptc_to_cell, const int *cell_to_active_cell_indices, const int *ptc_begins_in_active_cell, const int *ptc_offsets_within_cell, const int num_ptcs, int *cell_ptc_indices) { const int ptc_i = (blockIdx.x * blockDim.x) + threadIdx.x; if (ptc_i >= num_ptcs) return; const int cell_i = ptc_to_cell[ptc_i]; // active cell index const int ac_idx = cell_to_active_cell_indices[cell_i]; const int ptc_begin_index = ptc_begins_in_active_cell[ac_idx]; const int i = ptc_begin_index + ptc_offsets_within_cell[ptc_i]; cell_ptc_indices[i] = ptc_i; } ///// // Find Neighbor Particles ///// // Count |ptc_num_neighbors| // - |radius|: searching radius __global__ void CountPtcNumNeighborsKernel( const float3 *positions, const int *cell_is_active_flags, const int *cell_to_active_cell_indices, const int *cell_ptc_indices, const int *ptc_begins_in_active_cell, const int *active_cell_num_ptcs, const int num_ptcs, const float cell_sz, const int3 num_cells_dim, const float radius, int *ptc_num_neighbors) { const int ptc_i = (blockIdx.x * blockDim.x) + threadIdx.x; if (ptc_i >= num_ptcs) return; int3 ptc_cell = GetCell(positions[ptc_i], cell_sz); int num_neighbors = 0; const float radius_sqr = radius * radius; const float3 pos_i = positions[ptc_i]; // We are only checking the 8 adjacent cells plus the cell itself, // this implies that our cell size must be greater than |radius|. for (int cz = -1; cz <= 1; ++cz) { for (int cy = -1; cy <= 1; ++cy) { for (int cx = -1; cx <= 1; ++cx) { int3 nb_cell = ptc_cell + make_int3(cx, cy, cz); if (!IsCellInRange(nb_cell, num_cells_dim)) continue; int nb_cell_idx = GetCellIndex(nb_cell, num_cells_dim); if (!cell_is_active_flags[nb_cell_idx]) continue; const int nb_ac_idx = cell_to_active_cell_indices[nb_cell_idx]; const int ac_num_ptcs = active_cell_num_ptcs[nb_ac_idx]; const int nb_ptc_begin = ptc_begins_in_active_cell[nb_ac_idx]; for (int offs = 0; offs < ac_num_ptcs; ++offs) { const int ptc_j = cell_ptc_indices[nb_ptc_begin + offs]; if (ptc_i == ptc_j) continue; float dist_sqr = DistanceSquare(pos_i, positions[ptc_j]); if (dist_sqr < radius_sqr) { ++num_neighbors; } } } } } ptc_num_neighbors[ptc_i] = num_neighbors; } // compute |ptc_neighbor_begins| void ComputePtcNeighborBegins(const d_vector<int> &ptc_num_neighbors, d_vector<int> *ptc_neighbor_begins) { assert(ptc_num_neighbors.size() == ptc_neighbor_begins->size()); thrust::exclusive_scan(thrust::device, ptc_num_neighbors.begin(), ptc_num_neighbors.end(), ptc_neighbor_begins->begin(), 0); } // Find neighbor particles and store them in |ptc_neighbor_indices| // - |radius|: searching radius __global__ void FindPtcNeighborIndicesKernel( const float3 *positions, const int *cell_is_active_flags, const int *cell_to_active_cell_indices, const int *cell_ptc_indices, const int *ptc_begins_in_active_cell, const int *active_cell_num_ptcs, const int num_ptcs, const float cell_sz, const int3 num_cells_dim, const float radius, const int *ptc_neighbor_begins, int *ptc_neighbor_indices, const int *ptc_num_neighbors /*debug purpose, rm once correct*/) { const int ptc_i = (blockIdx.x * blockDim.x) + threadIdx.x; if (ptc_i >= num_ptcs) return; int3 ptc_cell = GetCell(positions[ptc_i], cell_sz); int cur = ptc_neighbor_begins[ptc_i]; const int cur_copy = cur; const float radius_sqr = radius * radius; const float3 pos_i = positions[ptc_i]; // We are only checking the 8 adjacent cells plus the cell itself, // this implies that our cell size must be greater than |radius|. for (int cz = -1; cz <= 1; ++cz) { for (int cy = -1; cy <= 1; ++cy) { for (int cx = -1; cx <= 1; ++cx) { int3 nb_cell = ptc_cell + make_int3(cx, cy, cz); if (!IsCellInRange(nb_cell, num_cells_dim)) continue; int nb_cell_idx = GetCellIndex(nb_cell, num_cells_dim); if (!cell_is_active_flags[nb_cell_idx]) continue; const int nb_ac_idx = cell_to_active_cell_indices[nb_cell_idx]; const int ac_num_ptcs = active_cell_num_ptcs[nb_ac_idx]; const int nb_ptc_begin = ptc_begins_in_active_cell[nb_ac_idx]; for (int offs = 0; offs < ac_num_ptcs; ++offs) { const int ptc_j = cell_ptc_indices[nb_ptc_begin + offs]; if (ptc_i == ptc_j) continue; float dist_sqr = DistanceSquare(pos_i, positions[ptc_j]); if (dist_sqr < radius_sqr) { ptc_neighbor_indices[cur] = ptc_j; ++cur; } } } } } // Use GPU assert! // assert((cur - cur_copy) == ptc_num_neighbors[ptc_i]); } __global__ static void QueryCountKernel( const int num_cells, const float3 range_min, const float3 range_max, const float3 *positions, const int *cell_is_active_flags, const int *cell_to_active_cell_indices, const int *ptc_begins_in_active_cell, const int *active_cell_num_ptcs, const int *cell_ptc_indices, int *cell_num_ptcs_inside) { int cell_i = (blockDim.x * blockIdx.x) + threadIdx.x; if (cell_i >= num_cells) return; bool is_active = cell_is_active_flags[cell_i]; if (!is_active) return; const int ac_idx = cell_to_active_cell_indices[cell_i]; const int ptc_begin = ptc_begins_in_active_cell[ac_idx]; const int ac_num_ptcs = active_cell_num_ptcs[ac_idx]; int num_inside = 0; for (int offs = 0; offs < ac_num_ptcs; ++offs) { int ptc_i = cell_ptc_indices[ptc_begin + offs]; if (IsInside(positions[ptc_i], range_min, range_max)) { ++num_inside; } } cell_num_ptcs_inside[cell_i] = num_inside; } } // namespace impl_ CellGridGpu::CellGridGpu(float3 world_sz, float cell_sz) : world_sz_per_dim_(world_sz), cell_sz_(cell_sz) { num_cells_per_dim_.x = (int)(world_sz_per_dim_.x / cell_sz_) + 1; num_cells_per_dim_.y = (int)(world_sz_per_dim_.y / cell_sz_) + 1; num_cells_per_dim_.z = (int)(world_sz_per_dim_.z / cell_sz_) + 1; total_num_cells_ = num_cells_per_dim_.x * num_cells_per_dim_.y * num_cells_per_dim_.z; } void UpdateCellGrid(const d_vector<float3> &positions, CellGridGpu *cell_grid) { using thrust::raw_pointer_cast; using namespace impl_; // extract necessary params const int num_ptcs = positions.size(); const int num_cells = cell_grid->total_num_cells(); // extract necessary pointers const float3 *positions_ptr = raw_pointer_cast(positions.data()); d_vector<int> &ptc_to_cell = cell_grid->ptc_to_cell; ptc_to_cell.clear(); ptc_to_cell.resize(num_ptcs, 0); int *ptc_to_cell_ptr = raw_pointer_cast(ptc_to_cell.data()); d_vector<int> cell_num_ptcs(num_cells, 0); int *cell_num_ptcs_ptr = raw_pointer_cast(cell_num_ptcs.data()); // d_vector<int> ptc_offsets_within_cell(num_ptcs, 0); d_vector<int> &ptc_offsets_within_cell = cell_grid->ptc_offsets_within_cell; ptc_offsets_within_cell.clear(); ptc_offsets_within_cell.resize(num_ptcs, 0); int *ptc_offsets_within_cell_ptr = raw_pointer_cast(ptc_offsets_within_cell.data()); const int num_blocks_ptc = ComputeNumBlocks(num_ptcs); CellGridEntryPointKernel<<<num_blocks_ptc, kNumThreadPerBlock>>>( positions_ptr, num_ptcs, cell_grid->cell_size(), cell_grid->num_cells_per_dim(), ptc_to_cell_ptr, cell_num_ptcs_ptr, ptc_offsets_within_cell_ptr); cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError()); d_vector<int> &cell_is_active_flags = cell_grid->cell_is_active_flags; cell_is_active_flags.clear(); cell_is_active_flags.resize(num_cells, 0); int *cell_is_active_flags_ptr = raw_pointer_cast(cell_is_active_flags.data()); const int num_blocks_cell = ComputeNumBlocks(num_cells); SetCellIsActiveFlagsKernel<<<num_blocks_cell, kNumThreadPerBlock>>>( cell_num_ptcs_ptr, num_cells, cell_is_active_flags_ptr); cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError()); d_vector<int> &cell_to_active_cell_indices = cell_grid->cell_to_active_cell_indices; cell_to_active_cell_indices.clear(); cell_to_active_cell_indices.resize(num_cells, 0); ComputeCellToActiveCellIndices(cell_is_active_flags, &cell_to_active_cell_indices); d_vector<int> &active_cell_num_ptcs = cell_grid->active_cell_num_ptcs; active_cell_num_ptcs.clear(); active_cell_num_ptcs.resize(num_cells, 0); ComputeActiveCellNumPtcs(cell_num_ptcs, cell_is_active_flags, cell_to_active_cell_indices, &active_cell_num_ptcs); d_vector<int> &ptc_begins_in_active_cell = cell_grid->ptc_begins_in_active_cell; ptc_begins_in_active_cell.clear(); ptc_begins_in_active_cell.resize(num_cells, 0); ComputePtcBeginsInActiveCell(active_cell_num_ptcs, &ptc_begins_in_active_cell); const int *cell_to_active_cell_indices_ptr = raw_pointer_cast(cell_to_active_cell_indices.data()); const int *ptc_begins_in_active_cell_ptr = raw_pointer_cast(ptc_begins_in_active_cell.data()); d_vector<int> &cell_ptc_indices = cell_grid->cell_ptc_indices; cell_ptc_indices.clear(); cell_ptc_indices.resize(num_ptcs, 0); int *cell_ptc_indices_ptr = raw_pointer_cast(cell_ptc_indices.data()); ComputeCellPtcIndicesKernel<<<num_blocks_ptc, kNumThreadPerBlock>>>( ptc_to_cell_ptr, cell_to_active_cell_indices_ptr, ptc_begins_in_active_cell_ptr, ptc_offsets_within_cell_ptr, num_ptcs, cell_ptc_indices_ptr); cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError()); } void FindParticleNeighbors(const d_vector<float3> &positions, const CellGridGpu &cell_grid, const float h, GpuParticleNeighbors *pn) { using namespace impl_; using thrust::raw_pointer_cast; // extract necessary params const float cell_sz = cell_grid.cell_size(); const int3 num_cells_dim = cell_grid.num_cells_per_dim(); const int num_ptcs = positions.size(); // extract necessary pointers const float3 *positions_ptr = raw_pointer_cast(positions.data()); const int *cell_is_active_flags_ptr = raw_pointer_cast(cell_grid.cell_is_active_flags.data()); const int *cell_to_active_cell_indices_ptr = raw_pointer_cast(cell_grid.cell_to_active_cell_indices.data()); const int *cell_ptc_indices_ptr = raw_pointer_cast(cell_grid.cell_ptc_indices.data()); const int *ptc_begins_in_active_cell_ptr = raw_pointer_cast(cell_grid.ptc_begins_in_active_cell.data()); const int *active_cell_num_ptcs_ptr = raw_pointer_cast(cell_grid.active_cell_num_ptcs.data()); d_vector<int> &ptc_num_neighbors = pn->ptc_num_neighbors; // make sure we allocate memory first ptc_num_neighbors.clear(); ptc_num_neighbors.resize(num_ptcs, 0); int *ptc_num_neighbors_ptr = raw_pointer_cast(ptc_num_neighbors.data()); const int num_blocks_ptc = ComputeNumBlocks(num_ptcs); // First step, count how many neighbors each particle has CountPtcNumNeighborsKernel<<<num_blocks_ptc, kNumThreadPerBlock>>>( positions_ptr, cell_is_active_flags_ptr, cell_to_active_cell_indices_ptr, cell_ptc_indices_ptr, ptc_begins_in_active_cell_ptr, active_cell_num_ptcs_ptr, num_ptcs, cell_sz, num_cells_dim, h, ptc_num_neighbors_ptr); cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError()); // make sure we allocate memory first d_vector<int> &ptc_neighbor_begins = pn->ptc_neighbor_begins; ptc_neighbor_begins.clear(); ptc_neighbor_begins.resize(num_ptcs, 0); ComputePtcNeighborBegins(ptc_num_neighbors, &ptc_neighbor_begins); const int *ptc_neighbor_begins_ptr = raw_pointer_cast(ptc_neighbor_begins.data()); // make sure we allocate memory first d_vector<int> &ptc_neighbor_indices = pn->ptc_neighbor_indices; { int vec_sz = thrust::reduce(ptc_num_neighbors.begin(), ptc_num_neighbors.end(), 0); ptc_neighbor_indices.clear(); ptc_neighbor_indices.resize(vec_sz, 0); } int *ptc_neighbor_indices_ptr = raw_pointer_cast(ptc_neighbor_indices.data()); // Second step, record the neighbor indices for each particle FindPtcNeighborIndicesKernel<<<num_blocks_ptc, kNumThreadPerBlock>>>( positions_ptr, cell_is_active_flags_ptr, cell_to_active_cell_indices_ptr, cell_ptc_indices_ptr, ptc_begins_in_active_cell_ptr, active_cell_num_ptcs_ptr, num_ptcs, cell_sz, num_cells_dim, h, ptc_neighbor_begins_ptr, ptc_neighbor_indices_ptr, ptc_num_neighbors_ptr /*debug purpose, rm once correct*/); cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError()); } void Query(const d_vector<float3> &positions, const CellGridGpu &cell_grid, const AABB &range, d_vector<int> *cell_num_ptcs_inside) { using namespace impl_; using thrust::raw_pointer_cast; const int num_cells = cell_grid.total_num_cells(); const float3 range_min = Convert(range.min()); const float3 range_max = Convert(range.max()); const float3 *positions_ptr = raw_pointer_cast(positions.data()); const int *cell_is_active_flags_ptr = raw_pointer_cast(cell_grid.cell_is_active_flags.data()); const int *cell_to_active_cell_indices_ptr = raw_pointer_cast(cell_grid.cell_to_active_cell_indices.data()); const int *ptc_begins_in_active_cell_ptr = raw_pointer_cast(cell_grid.ptc_begins_in_active_cell.data()); const int *active_cell_num_ptcs_ptr = raw_pointer_cast(cell_grid.active_cell_num_ptcs.data()); const int *cell_ptc_indices_ptr = raw_pointer_cast(cell_grid.cell_ptc_indices.data()); cell_num_ptcs_inside->clear(); cell_num_ptcs_inside->resize(num_cells, 0); int *cell_num_ptcs_inside_ptr = raw_pointer_cast(cell_num_ptcs_inside->data()); const int num_blocks_cell = ComputeNumBlocks(num_cells); QueryCountKernel<<<num_blocks_cell, kNumThreadPerBlock>>>( num_cells, range_min, range_max, positions_ptr, cell_is_active_flags_ptr, cell_to_active_cell_indices_ptr, ptc_begins_in_active_cell_ptr, active_cell_num_ptcs_ptr, cell_ptc_indices_ptr, cell_num_ptcs_inside_ptr); } #define GRAVITY_Y -9.8f __global__ static void ApplyGravityKernel(const int num_ptcs, const float dt, float3 *positions, float3 *velocities) { const int ptc_i = (blockDim.x * blockIdx.x) + threadIdx.x; if (ptc_i >= num_ptcs) return; float3 pos_i = positions[ptc_i]; float3 vel_i = velocities[ptc_i]; vel_i.y += GRAVITY_Y * dt; pos_i += vel_i * dt; positions[ptc_i] = pos_i; velocities[ptc_i] = vel_i; } #undef GRAVITY_Y __global__ static void ComputeLambdaKernel(const float3 *positions, const int *ptc_num_neighbors, const int *ptc_neighbor_begins, const int *ptc_neighbor_indices, const int num_ptcs, const float h, const float mass, const float rho_0_recpr, const float epsilon, float *lambdas) { const int ptc_i = (blockDim.x * blockIdx.x) + threadIdx.x; if (ptc_i >= num_ptcs) return; const int num_nbs = ptc_num_neighbors[ptc_i]; const int nb_begin = ptc_neighbor_begins[ptc_i]; const float3 pos_i = positions[ptc_i]; float3 gradient_i = make_float3(0.0f); float sum_gradient = 0.0f; float density_constraint = 0.0f; for (int offs = 0; offs < num_nbs; ++offs) { const int ptc_j = ptc_neighbor_indices[nb_begin + offs]; const float3 pos_ji = pos_i - positions[ptc_j]; const float3 gradient_j = SpikyGradient(pos_ji, h); sum_gradient += dot(gradient_j, gradient_j); gradient_i += gradient_j; density_constraint += mass * Poly6Value(pos_ji, h); } sum_gradient += dot(gradient_i, gradient_i); density_constraint = (density_constraint * rho_0_recpr) - 1.0f; const float lambda_i = (-density_constraint) / (sum_gradient + epsilon); lambdas[ptc_i] = lambda_i; } __device__ float ComputeScorr(const float3 pos_ji, const float h, const float corr_delta_q_coeff, const float corr_k, const float corr_n) { // Eq (13) float x = Poly6Value(pos_ji, h) / Poly6Value(corr_delta_q_coeff * h, h); float result = (-corr_k) * pow(x, corr_n); return result; } __global__ static void ComputeDeltaPositionsKernel( const float3 *positions, const int *ptc_num_neighbors, const int *ptc_neighbor_begins, const int *ptc_neighbor_indices, const float *lambdas, const int num_ptcs, const float h, const float rho_0_recpr, const float corr_delta_q_coeff, const float corr_k, const float corr_n, float3 *delta_positions) { const int ptc_i = (blockDim.x * blockIdx.x) + threadIdx.x; if (ptc_i >= num_ptcs) return; const int num_nbs = ptc_num_neighbors[ptc_i]; const int nb_begin = ptc_neighbor_begins[ptc_i]; const float3 pos_i = positions[ptc_i]; const float lambda_i = lambdas[ptc_i]; float3 delta_pos_i = make_float3(0.0f); for (int offs = 0; offs < num_nbs; ++offs) { const int ptc_j = ptc_neighbor_indices[nb_begin + offs]; const float lambda_j = lambdas[ptc_j]; const float3 pos_ji = pos_i - positions[ptc_j]; const float scorr_ij = ComputeScorr(pos_ji, h, corr_delta_q_coeff, corr_k, corr_n); delta_pos_i += (lambda_i + lambda_j + scorr_ij) * SpikyGradient(pos_ji, h); } delta_pos_i *= rho_0_recpr; delta_positions[ptc_i] = delta_pos_i; } __global__ static void ApplyDeltaPositionsKernel(const float3 *delta_positions, const int num_ptcs, float3 *positions) { const int ptc_i = (blockDim.x * blockIdx.x) + threadIdx.x; if (ptc_i >= num_ptcs) return; float3 pos_i = positions[ptc_i]; pos_i += delta_positions[ptc_i]; positions[ptc_i] = pos_i; } __global__ static void UpdateVelocitiesKernel(const float3 *old_positions, const float3 *new_positions, const int num_ptcs, const float dt, float3 *velocities) { const int ptc_i = (blockDim.x * blockIdx.x) + threadIdx.x; if (ptc_i >= num_ptcs) return; const float3 old_pos_i = old_positions[ptc_i]; const float3 new_pos_i = new_positions[ptc_i]; const float3 new_vel_i = (new_pos_i - old_pos_i) / dt; velocities[ptc_i] = new_vel_i; } __global__ static void ComputeVorticitiesKernel(const float3 *positions, const float3 *velocities, const int *ptc_num_neighbors, const int *ptc_neighbor_begins, const int *ptc_neighbor_indices, const int num_ptcs, const float h, float3 *vorticities) { const int ptc_i = (blockDim.x * blockIdx.x) + threadIdx.x; if (ptc_i >= num_ptcs) return; const float3 pos_i = positions[ptc_i]; const float3 vel_i = velocities[ptc_i]; const int num_nbs = ptc_num_neighbors[ptc_i]; const int nb_begin = ptc_neighbor_begins[ptc_i]; float3 vorticity = make_float3(0.0f); for (int offs = 0; offs < num_nbs; ++offs) { const int ptc_j = ptc_neighbor_indices[nb_begin + offs]; // vel_diff_ij = vel_j - vel_i; const float3 vel_ij = velocities[ptc_j] - vel_i; // gradient = kernel_.Gradient(pos_i - pos_j); const float3 pos_ji = pos_i - positions[ptc_j]; const float3 gradient = SpikyGradient(pos_ji, h); // result += glm::cross(vel_diff_ij, gradient); vorticity += cross(vel_ij, gradient); } vorticities[ptc_i] = vorticity; } __global__ static void ComputeVorticityCorrForcesKernel( const float3 *positions, const int *ptc_num_neighbors, const int *ptc_neighbor_begins, const int *ptc_neighbor_indices, const float3 *vorticities, const int num_ptcs, const float h, const float vorticity_epsilon, float3 *vorticity_corr_forces) { const int ptc_i = (blockDim.x * blockIdx.x) + threadIdx.x; if (ptc_i >= num_ptcs) return; const float3 pos_i = positions[ptc_i]; // Compute Eta const int num_nbs = ptc_num_neighbors[ptc_i]; const int nb_begin = ptc_neighbor_begins[ptc_i]; float3 eta = make_float3(0.0f); for (int offs = 0; offs < num_nbs; ++offs) { const int ptc_j = ptc_neighbor_indices[nb_begin + offs]; const float3 pos_ji = pos_i - positions[ptc_j]; const float omega_j_len = length(vorticities[ptc_j]); const float3 gradient = SpikyGradient(pos_ji, h); eta += (omega_j_len * gradient); } // Compute Vorticity Corr Force const float eta_len = length(eta); float3 vort_corr_force = make_float3(0.0f); if (eta_len > 1e-6) { eta = normalize(eta); const float3 omega_i = vorticities[ptc_i]; vort_corr_force = vorticity_epsilon * cross(eta, omega_i); } vorticity_corr_forces[ptc_i] = vort_corr_force; } __global__ static void ComputeXsphsKernel(const float3 *positions, const float3 *velocities, const int *ptc_num_neighbors, const int *ptc_neighbor_begins, const int *ptc_neighbor_indices, const int num_ptcs, const float h, const float xsph_c, float3 *xsphs) { const int ptc_i = (blockDim.x * blockIdx.x) + threadIdx.x; if (ptc_i >= num_ptcs) return; const float3 pos_i = positions[ptc_i]; const float3 vel_i = velocities[ptc_i]; const int num_nbs = ptc_num_neighbors[ptc_i]; const int nb_begin = ptc_neighbor_begins[ptc_i]; float3 xsph = make_float3(0.0f); for (int offs = 0; offs < num_nbs; ++offs) { const int ptc_j = ptc_neighbor_indices[nb_begin + offs]; const float3 vel_ij = velocities[ptc_j] - vel_i; const float w = Poly6Value(pos_i - positions[ptc_j], h); xsph += (w * vel_ij); } xsph *= xsph_c; xsphs[ptc_i] = xsph; } __global__ static void ApplyVelocityCorrectionsKernel(const float3 *vorticity_corr_forces, const float3 *xsphs, const int num_ptcs, const float dt, float3 *velocities) { const int ptc_i = (blockDim.x * blockIdx.x) + threadIdx.x; if (ptc_i >= num_ptcs) return; float3 vel_i = velocities[ptc_i]; vel_i += vorticity_corr_forces[ptc_i] * dt; vel_i += xsphs[ptc_i]; velocities[ptc_i] = vel_i; } void PbfSolverGpu::CustomConfigure_(const PbfSolverConfig &config) { cell_grid_size_ = config.spatial_hash_cell_size; } void PbfSolverGpu::CustomInitPs_() { // cache num of particles num_ptcs_ = ps_->NumParticles(); // copy the positions/velocities to the device memory. // // This is only needed once, later on after every update, // we only need to copy the most up-to-date data from the // device memory back to each particle. This is because // PbfSolverGpu is the only one who modifies the particles' // data (position/velocity). ps_adaptor_ = std::make_shared<ParticleSystemGpuAdaptor>(); ps_adaptor_->SetPs(ps_); // d_positions_.reserve(num_ptcs_); // d_velocities_.reserve(num_ptcs_); // for (size_t p_i = 0; p_i < num_ptcs_; ++p_i) { // auto ptc_i = ps_->Get(p_i); // d_positions_.push_back(Convert(ptc_i.position())); // d_velocities_.push_back(Convert(ptc_i.velocity())); // } // init particle records const float3 zeros = make_float3(0.0f); old_positions_.resize(num_ptcs_, zeros); lambdas_.resize(num_ptcs_, 0.0f); delta_positions_.resize(num_ptcs_, zeros); vorticities_.resize(num_ptcs_, zeros); vorticity_corr_forces_.resize(num_ptcs_, zeros); xsphs_.resize(num_ptcs_, zeros); } void PbfSolverGpu::SetBoundaryConstraint(BoundaryConstraintGpu *bc) { boundary_constraint_ = bc; assert(ps_adaptor_ != nullptr); boundary_constraint_->SetPsAdaptor(ps_adaptor_); } void PbfSolverGpu::Update(float dt) { ResetParticleRecords_(); RecordOldPositions_(); ApplyGravity_(dt); ImposeBoundaryConstraint_(); FindNeighbors_(); for (unsigned itr = 0; itr < num_iters_; ++itr) { ComputeLambdas_(); ComputeDeltaPositions_(); ApplyDeltaPositions_(); } ImposeBoundaryConstraint_(); UpdateVelocities_(dt); ComputeVorticities_(); ComputeVorticityCorrForces_(); ComputeXsphs_(); ApplyVelocityCorrections_(dt); UpdatePs_(); } void PbfSolverGpu::ResetParticleRecords_() { using thrust::device; using thrust::fill; const float3 zeros = make_float3(0.0f); fill(device, old_positions_.begin(), old_positions_.end(), zeros); fill(device, lambdas_.begin(), lambdas_.end(), 0.0f); fill(device, delta_positions_.begin(), delta_positions_.end(), zeros); fill(device, vorticities_.begin(), vorticities_.end(), zeros); fill(device, vorticity_corr_forces_.begin(), vorticity_corr_forces_.end(), zeros); fill(device, xsphs_.begin(), xsphs_.end(), zeros); } void PbfSolverGpu::RecordOldPositions_() { auto *d_positions = ps_adaptor_->PositionsVec(); thrust::copy(thrust::device, d_positions->begin(), d_positions->end(), old_positions_.begin()); } void PbfSolverGpu::ApplyGravity_(const float dt) { const int num_blocks_ptc = impl_::ComputeNumBlocks(num_ptcs_); ApplyGravityKernel<<<num_blocks_ptc, kNumThreadPerBlock>>>( num_ptcs_, dt, PositionsPtr_(), VelocitiesPtr_()); cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError()); } void PbfSolverGpu::ImposeBoundaryConstraint_() { boundary_constraint_->ApplyBoundaryConstraint(); } void PbfSolverGpu::FindNeighbors_() { const float3 world_sz_dim = make_float3(world_size_x_, world_size_y_, world_size_z_); CellGridGpu cell_grid{world_sz_dim, cell_grid_size_}; auto &d_positions = *(ps_adaptor_->PositionsVec()); UpdateCellGrid(d_positions, &cell_grid); FindParticleNeighbors(d_positions, cell_grid, h_, &ptc_nb_recs_); } void PbfSolverGpu::ComputeLambdas_() { float *lambdas_ptr = thrust::raw_pointer_cast(lambdas_.data()); const int num_blocks_ptc = impl_::ComputeNumBlocks(num_ptcs_); ComputeLambdaKernel<<<num_blocks_ptc, kNumThreadPerBlock>>>( PositionsPtr_(), ptc_nb_recs_.ptc_num_neighbors_ptr(), ptc_nb_recs_.ptc_neighbor_begins_ptr(), ptc_nb_recs_.ptc_neighbor_indices_ptr(), num_ptcs_, h_, mass_, rho_0_recpr_, epsilon_, lambdas_ptr); cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError()); } void PbfSolverGpu::ComputeDeltaPositions_() { const float *lambdas_ptr = thrust::raw_pointer_cast(lambdas_.data()); float3 *delta_positions_ptr = thrust::raw_pointer_cast(delta_positions_.data()); const int num_blocks_ptc = impl_::ComputeNumBlocks(num_ptcs_); ComputeDeltaPositionsKernel<<<num_blocks_ptc, kNumThreadPerBlock>>>( PositionsPtr_(), ptc_nb_recs_.ptc_num_neighbors_ptr(), ptc_nb_recs_.ptc_neighbor_begins_ptr(), ptc_nb_recs_.ptc_neighbor_indices_ptr(), lambdas_ptr, num_ptcs_, h_, rho_0_recpr_, corr_delta_q_coeff_, corr_k_, corr_n_, delta_positions_ptr); cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError()); } void PbfSolverGpu::ApplyDeltaPositions_() { const float3 *delta_positions_ptr = thrust::raw_pointer_cast(delta_positions_.data()); const int num_blocks_ptc = impl_::ComputeNumBlocks(num_ptcs_); ApplyDeltaPositionsKernel<<<num_blocks_ptc, kNumThreadPerBlock>>>( delta_positions_ptr, num_ptcs_, PositionsPtr_()); cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError()); } void PbfSolverGpu::UpdateVelocities_(const float dt) { const float3 *old_positions_ptr = thrust::raw_pointer_cast(old_positions_.data()); const int num_blocks_ptc = impl_::ComputeNumBlocks(num_ptcs_); UpdateVelocitiesKernel<<<num_blocks_ptc, kNumThreadPerBlock>>>( old_positions_ptr, PositionsPtr_(), num_ptcs_, dt, VelocitiesPtr_()); cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError()); } void PbfSolverGpu::ComputeVorticities_() { float3 *vorticities_ptr = thrust::raw_pointer_cast(vorticities_.data()); const int num_blocks_ptc = impl_::ComputeNumBlocks(num_ptcs_); ComputeVorticitiesKernel<<<num_blocks_ptc, kNumThreadPerBlock>>>( PositionsPtr_(), VelocitiesPtr_(), ptc_nb_recs_.ptc_num_neighbors_ptr(), ptc_nb_recs_.ptc_neighbor_begins_ptr(), ptc_nb_recs_.ptc_neighbor_indices_ptr(), num_ptcs_, h_, vorticities_ptr); cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError()); } void PbfSolverGpu::ComputeVorticityCorrForces_() { const float3 *vorticities_ptr = thrust::raw_pointer_cast(vorticities_.data()); float3 *vort_corr_forces_ptr = thrust::raw_pointer_cast(vorticity_corr_forces_.data()); const int num_blocks_ptc = impl_::ComputeNumBlocks(num_ptcs_); ComputeVorticityCorrForcesKernel<<<num_blocks_ptc, kNumThreadPerBlock>>>( PositionsPtr_(), ptc_nb_recs_.ptc_num_neighbors_ptr(), ptc_nb_recs_.ptc_neighbor_begins_ptr(), ptc_nb_recs_.ptc_neighbor_indices_ptr(), vorticities_ptr, num_ptcs_, h_, vorticity_epsilon_, vort_corr_forces_ptr); cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError()); } void PbfSolverGpu::ComputeXsphs_() { float3 *xsphs_ptr = thrust::raw_pointer_cast(xsphs_.data()); const int num_blocks_ptc = impl_::ComputeNumBlocks(num_ptcs_); ComputeXsphsKernel<<<num_blocks_ptc, kNumThreadPerBlock>>>( PositionsPtr_(), VelocitiesPtr_(), ptc_nb_recs_.ptc_num_neighbors_ptr(), ptc_nb_recs_.ptc_neighbor_begins_ptr(), ptc_nb_recs_.ptc_neighbor_indices_ptr(), num_ptcs_, h_, xsph_c_, xsphs_ptr); cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError()); } void PbfSolverGpu::ApplyVelocityCorrections_(const float dt) { const float3 *vort_corr_forces_ptr = thrust::raw_pointer_cast(vorticity_corr_forces_.data()); const float3 *xsphs_ptr = thrust::raw_pointer_cast(xsphs_.data()); const int num_blocks_ptc = impl_::ComputeNumBlocks(num_ptcs_); ApplyVelocityCorrectionsKernel<<<num_blocks_ptc, kNumThreadPerBlock>>>( vort_corr_forces_ptr, xsphs_ptr, num_ptcs_, dt, VelocitiesPtr_()); cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError()); } void PbfSolverGpu::UpdatePs_() { ps_adaptor_->UpdatePs(); // using thrust::host_vector; // host_vector<float3> h_positions{ d_positions_ }; // host_vector<float3> h_velocities{ d_velocities_ }; // for (size_t p_i = 0; p_i < num_ptcs_; ++p_i) { // auto ptc_i = ps_->Get(p_i); // ptc_i.set_position(Convert(h_positions[p_i])); // ptc_i.set_velocity(Convert(h_velocities[p_i])); // } } } // namespace pbf
the_stack
using namespace at; /* ------------------------------begin of the forward--------------------------- */ #define CUDA_1D_KERNEL_LOOP(i, n) \ for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < n; \ i += blockDim.x * gridDim.x) #define maxn 51 const double eps = 1E-8; __device__ inline int sig(const float &d) { return (d > eps) - (d < -eps); } __device__ inline int point_eq(const float2 &a, const float2 &b) { return sig(a.x - b.x) == 0 && sig(a.y - b.y) == 0; } __device__ inline void point_swap(float2 *a, float2 *b) { float2 temp = *a; *a = *b; *b = temp; } __device__ inline void point_reverse(float2 *first, float2 *last) { while ((first != last) && (first != --last)) { point_swap(first, last); ++first; } } __device__ inline float cross(const float2 &o, const float2 &a, const float2 &b) { //叉积 return (a.x - o.x) * (b.y - o.y) - (b.x - o.x) * (a.y - o.y); } __device__ inline float area(float2 *ps, const int &n) { ps[n] = ps[0]; float res = 0; for (int i = 0; i < n; i++) { res += ps[i].x * ps[i + 1].y - ps[i].y * ps[i + 1].x; } return res / 2.0; } __device__ inline int lineCross(const float2 &a, const float2 &b, const float2 &c, const float2 &d, float2 &p) { float s1, s2; s1 = cross(a, b, c); s2 = cross(a, b, d); if (sig(s1) == 0 && sig(s2) == 0) return 2; if (sig(s2 - s1) == 0) return 0; p.x = (c.x * s2 - d.x * s1) / (s2 - s1); p.y = (c.y * s2 - d.y * s1) / (s2 - s1); return 1; } __device__ inline void polygon_cut(float2 *p, int &n, const float2 &a, const float2 &b, float2 *pp) { int m = 0; p[n] = p[0]; for (int i = 0; i < n; i++) { if (sig(cross(a, b, p[i])) > 0) pp[m++] = p[i]; if (sig(cross(a, b, p[i])) != sig(cross(a, b, p[i + 1]))) lineCross(a, b, p[i], p[i + 1], pp[m++]); } n = 0; for (int i = 0; i < m; i++) if (!i || !(point_eq(pp[i], pp[i - 1]))) p[n++] = pp[i]; // while(n>1&&p[n-1]==p[0])n--; while (n > 1 && point_eq(p[n - 1], p[0])) n--; } //---------------华丽的分隔线-----------------// //返回三角形oab和三角形ocd的有向交面积,o是原点// __device__ inline float intersectArea(float2 a, float2 b, float2 c, float2 d) { float2 o = make_float2(0, 0); int s1 = sig(cross(o, a, b)); int s2 = sig(cross(o, c, d)); if (s1 == 0 || s2 == 0) return 0.0; //退化,面积为0 // if (s1 == -1) // point_swap(&a, &b); if (s2 == -1) point_swap(&c, &d); float2 p[10] = {o, a, b}; if (s1 == -1) { p[1] = b; p[2] = a; } int n = 3; float2 pp[maxn]; polygon_cut(p, n, o, c, pp); polygon_cut(p, n, c, d, pp); polygon_cut(p, n, d, o, pp); float res = fabs(area(p, n)); if (s1 * s2 == -1) res = -res; return res; } //求两多边形的交面积 __device__ inline float intersectArea(float2 *ps1, const int &n1, float2 *ps2, const int &n2) { if (area(ps1, n1) < 0) point_reverse(ps1, ps1 + n1); if (area(ps2, n2) < 0) point_reverse(ps2, ps2 + n2); ps1[n1] = ps1[0]; ps2[n2] = ps2[0]; float res = 0; for (int i = 0; i < n1; i++) { for (int j = 0; j < n2; j++) { res += intersectArea(ps1[i], ps1[i + 1], ps2[j], ps2[j + 1]); } } return res; //assumeresispositive! } __device__ inline float devPolyIoU(float const *const p, float const *const q) { float2 ps1[maxn], ps2[maxn]; int n1 = 4; int n2 = 4; for (int i = 0; i < 4; i++) { ps1[i].x = p[i * 2]; ps1[i].y = p[i * 2 + 1]; ps2[i].x = q[i * 2]; ps2[i].y = q[i * 2 + 1]; } float inter_area = intersectArea(ps1, n1, ps2, n2); float union_area = fabs(area(ps1, n1)) + fabs(area(ps2, n2)) - inter_area; return inter_area / (union_area + 1e-9); } __device__ void get_pixel_area(const float &cx, const float &cy, const float &w, const float &h, const float &angle, const float &x, const float &y, const int &k, float &pixel_weights, float *grad_loc=NULL) { const float dx = cx - x; const float dy = cy - y; const float xx = dx * dx; const float yy = dy * dy; const float dis = sqrt(xx + yy) + 1e-9; const float rate = dx / dis; const float cos_v = acos(rate); const float a1 = angle + cos_v; const float a2 = angle - cos_v; float a = (cy > y) ? a1 : a2; const float cos_a = cos(a); const float sin_a = sin(a); const float dis_w = dis * fabs(cos_a); const float dis_h = dis * fabs(sin_a); float factor_h = -k * (dis_h - h / 2.); factor_h = (factor_h > 50) ? 50 : (factor_h < -50) ? -50 : factor_h; float factor_w = -k * (dis_w - w / 2.); factor_w = (factor_w > 50) ? 50 : (factor_w < -50) ? -50 : factor_w; const float kerner_h = 1.0 / (exp(factor_h) + 1.0); const float kerner_w = 1.0 / (exp(factor_w) + 1.0); const float pixel_area = (1 - kerner_h) * (1 - kerner_w); pixel_weights = pixel_area; if (grad_loc == NULL) return; if (pixel_area < 1e-9) return; if (kerner_w < 1e-9 && kerner_h < 1e-9) return; const float dx_sin_a = dx * sin_a; const float dy_sin_a = dy * sin_a; const float dx_cos_a = dx * cos_a; const float dy_cos_a = dy * cos_a; float dis_w_dcx = (dy_sin_a + dx_cos_a) / dis; float dis_w_dcy = (dy_cos_a - dx_sin_a) / dis; float dis_w_da = -dis * sin_a; float dis_h_dcx = (dx_sin_a - dy_cos_a) / dis; float dis_h_dcy = (dy_sin_a + dx_cos_a) / dis; float dis_h_da = dis * cos_a; if (cos_a < 0.0) { dis_w_dcx = -dis_w_dcx; dis_w_dcy = -dis_w_dcy; dis_w_da = -dis_w_da; } if (sin_a < 0.0) { dis_h_dcx = -dis_h_dcx; dis_h_dcy = -dis_h_dcy; dis_h_da = -dis_h_da; } const float fw_dcx = - k * dis_w_dcx; const float fw_dcy = - k * dis_w_dcy; const float fw_da = - k * dis_w_da; const float fw_dw = k / 2.; const float fh_dcx = - k * dis_h_dcx; const float fh_dcy = - k * dis_h_dcy; const float fh_da = - k * dis_h_da; const float fh_dh = k / 2.; const float kwp = kerner_w * pixel_area; const float khp = kerner_h * pixel_area; const float p_dcx = kwp * fw_dcx + khp * fh_dcx; const float p_dcy = kwp * fw_dcy + khp * fh_dcy; const float p_dw = kwp * fw_dw; const float p_dh = khp * fh_dh; const float p_da = kwp * fw_da + khp * fh_da; grad_loc[0] = p_dcx; grad_loc[1] = p_dcy; grad_loc[2] = p_dw; grad_loc[3] = p_dh; grad_loc[4] = p_da; } __device__ inline float get_pixel_area_fast(const float &cx, const float &cy, const float &w, const float &h, const float &angle, const float &x, const float &y) { const float dx = cx - x; const float dy = cy - y; const float xx = dx * dx; const float yy = dy * dy; const float dis = sqrt(xx + yy) + 1e-9; const float rate = dx / dis; const float cos_v = acos(rate); const float a1 = angle + cos_v; const float a2 = angle - cos_v; float a = (cy > y) ? a1 : a2; const float cos_a = cos(a); const float sin_a = sin(a); const float dis_w = dis * fabs(cos_a); const float dis_h = dis * fabs(sin_a); if ((dis_h < h / 2.) && (dis_w < w / 2.)) return 1.0; else return 0.0; } __device__ void rbox2corners_y(const float &cx, const float &cy, const float &w, const float &h, const float &angle, float *grid_data) { const float w_sin = 0.5 * w * sin(angle); const float h_cos = 0.5 * h * cos(angle); const float y0 = cy - w_sin + h_cos; const float y1 = cy + w_sin + h_cos; const float y2 = cy + w_sin - h_cos; const float y3 = cy - w_sin - h_cos; const float ymin = min(y0, min(y1, min(y2, y3))); const float ymax = max(y0, max(y1, max(y2, y3))); grid_data[0] = ymin; grid_data[1] = ymax; } // loc_p_data: Nx5 // loc_t_data: Nx5 __global__ void get_grid_forward_kernel( const int nthreads, const float *loc_p_data, const float *loc_t_data, float *grid_data) { CUDA_1D_KERNEL_LOOP(index, nthreads) { loc_p_data += index * 5; loc_t_data += index * 5; grid_data += index * 2; float grid_y_p[2] = {0}; float grid_y_t[2] = {0}; rbox2corners_y(loc_p_data[0], loc_p_data[1], loc_p_data[2], loc_p_data[3], loc_p_data[4], grid_y_p); rbox2corners_y(loc_t_data[0], loc_t_data[1], loc_t_data[2], loc_t_data[3], loc_t_data[4], grid_y_t); grid_data[0] = (float)min(grid_y_p[0], grid_y_t[0]); grid_data[1] = (float)max(grid_y_p[1], grid_y_t[1]); } } // loc_p_data: Nx3 // loc_t_data: Nx5 __global__ void get_grid_share_center_forward_kernel( const int nthreads, const float *loc_p_data, const float *loc_t_data, float *grid_data) { CUDA_1D_KERNEL_LOOP(index, nthreads) { loc_p_data += index * 3; loc_t_data += index * 5; grid_data += index * 2; float grid_y_p[2] = {0}; float grid_y_t[2] = {0}; rbox2corners_y(loc_t_data[0], loc_t_data[1], loc_p_data[0], loc_p_data[1], loc_p_data[2], grid_y_p); rbox2corners_y(loc_t_data[0], loc_t_data[1], loc_t_data[2], loc_t_data[3], loc_t_data[4], grid_y_t); grid_data[0] = (float)min(grid_y_p[0], grid_y_t[0]); grid_data[1] = (float)max(grid_y_p[1], grid_y_t[1]); } } // loc_p_data: N x 5 // loc_t_data: N x 5 // grid_x_data: N x 1 // grid_y_data: N x 2 __global__ void pixel_weights_forward_kernel( const int nthreads, const float *loc_p_data, const float *loc_t_data, const float *grid_x_data, const float *grid_y_data, const int k, const int num, const int dim, float *inter_union_data, float *grad_pixel_weights_data) { CUDA_1D_KERNEL_LOOP(index, nthreads) { // locate batch const int n = index / dim; const int d = index % dim; loc_p_data += 5 * n; loc_t_data += 5 * n; grid_y_data += 2 * n; inter_union_data += 2 * (n * dim + d); grad_pixel_weights_data += 10 * (n * dim + d); int ymin = grid_y_data[0] - 5; int ymax = grid_y_data[1] + 5; ymin = (ymin < -100) ? -100 : ymin; ymax = (ymax > 1000) ? 1000 : ymax; float grad_cx_1 = 0.0; float grad_cy_1 = 0.0; float grad_w_1 = 0.0; float grad_h_1 = 0.0; float grad_angle_1 = 0.0; float grad_cx_2 = 0.0; float grad_cy_2 = 0.0; float grad_w_2 = 0.0; float grad_h_2 = 0.0; float grad_angle_2 = 0.0; // sum grad // sum area float grad_loc[5] = {0}; for (int i = ymin; i <= ymax; i++) { const float y = i + 0.5; float pixel_weight_p = 0.0; float pixel_weight_t = 0.0; get_pixel_area(loc_p_data[0], loc_p_data[1], loc_p_data[2], loc_p_data[3], loc_p_data[4], grid_x_data[d], y, k, pixel_weight_p, grad_loc); get_pixel_area(loc_t_data[0], loc_t_data[1], loc_t_data[2], loc_t_data[3], loc_t_data[4], grid_x_data[d], y, k, pixel_weight_t); if (pixel_weight_p < 1e-9 && pixel_weight_t < 1e-9) continue; const float inter_pixel_area = pixel_weight_p * pixel_weight_t; const float union_pixel_area = pixel_weight_p + pixel_weight_t - inter_pixel_area; grad_cx_1 += pixel_weight_t * grad_loc[0]; grad_cy_1 += pixel_weight_t * grad_loc[1]; grad_w_1 += pixel_weight_t * grad_loc[2]; grad_h_1 += pixel_weight_t * grad_loc[3]; grad_angle_1 += pixel_weight_t * grad_loc[4]; grad_cx_2 += grad_loc[0]; grad_cy_2 += grad_loc[1]; grad_w_2 += grad_loc[2]; grad_h_2 += grad_loc[3]; grad_angle_2 += grad_loc[4]; inter_union_data[0] += inter_pixel_area; inter_union_data[1] += union_pixel_area; } grad_pixel_weights_data[0] = grad_cx_1; grad_pixel_weights_data[1] = grad_cy_1; grad_pixel_weights_data[2] = grad_w_1; grad_pixel_weights_data[3] = grad_h_1; grad_pixel_weights_data[4] = grad_angle_1; grad_pixel_weights_data[5] = grad_cx_2; grad_pixel_weights_data[6] = grad_cy_2; grad_pixel_weights_data[7] = grad_w_2; grad_pixel_weights_data[8] = grad_h_2; grad_pixel_weights_data[9] = grad_angle_2; } } // loc_p_data: N x 3 // loc_t_data: N x 5 // grid_x_data: N x 1 // grid_y_data: N x 2 __global__ void pixel_weights_share_center_forward_kernel( const int nthreads, const float *loc_p_data, const float *loc_t_data, const float *grid_x_data, const float *grid_y_data, const int k, const int num, const int dim, float *inter_union_data, float *grad_pixel_weights_data) { CUDA_1D_KERNEL_LOOP(index, nthreads) { // locate batch const int n = index / dim; const int d = index % dim; loc_p_data += 3 * n; loc_t_data += 5 * n; grid_y_data += 2 * n; inter_union_data += 2 * (n * dim + d); grad_pixel_weights_data += 10 * (n * dim + d); int ymin = grid_y_data[0] - 5; int ymax = grid_y_data[1] + 5; ymin = (ymin < -100) ? -100 : ymin; ymax = (ymax > 1000) ? 1000 : ymax; float grad_cx_1 = 0.0; float grad_cy_1 = 0.0; float grad_w_1 = 0.0; float grad_h_1 = 0.0; float grad_angle_1 = 0.0; float grad_cx_2 = 0.0; float grad_cy_2 = 0.0; float grad_w_2 = 0.0; float grad_h_2 = 0.0; float grad_angle_2 = 0.0; // sum grad // sum area float grad_loc[5] = {0}; for (int i = ymin; i <= ymax; i++) { const float y = i + 0.5; float pixel_weight_p = 0.0; float pixel_weight_t = 0.0; get_pixel_area(loc_t_data[0], loc_t_data[1], loc_p_data[0], loc_p_data[1], loc_p_data[2], grid_x_data[d], y, k, pixel_weight_p, grad_loc); get_pixel_area(loc_t_data[0], loc_t_data[1], loc_t_data[2], loc_t_data[3], loc_t_data[4], grid_x_data[d], y, k, pixel_weight_t); if (pixel_weight_p < 1e-9 && pixel_weight_t < 1e-9) continue; const float inter_pixel_area = pixel_weight_p * pixel_weight_t; const float union_pixel_area = pixel_weight_p + pixel_weight_t - inter_pixel_area; grad_cx_1 += pixel_weight_t * grad_loc[0]; grad_cy_1 += pixel_weight_t * grad_loc[1]; grad_w_1 += pixel_weight_t * grad_loc[2]; grad_h_1 += pixel_weight_t * grad_loc[3]; grad_angle_1 += pixel_weight_t * grad_loc[4]; grad_cx_2 += grad_loc[0]; grad_cy_2 += grad_loc[1]; grad_w_2 += grad_loc[2]; grad_h_2 += grad_loc[3]; grad_angle_2 += grad_loc[4]; inter_union_data[0] += inter_pixel_area; inter_union_data[1] += union_pixel_area; } grad_pixel_weights_data[0] = grad_cx_1; grad_pixel_weights_data[1] = grad_cy_1; grad_pixel_weights_data[2] = grad_w_1; grad_pixel_weights_data[3] = grad_h_1; grad_pixel_weights_data[4] = grad_angle_1; grad_pixel_weights_data[5] = grad_cx_2; grad_pixel_weights_data[6] = grad_cy_2; grad_pixel_weights_data[7] = grad_w_2; grad_pixel_weights_data[8] = grad_h_2; grad_pixel_weights_data[9] = grad_angle_2; } } // loc_p_data: N x 5 // loc_t_data: N x 5 // grid_x_data: N x 1 // grid_y_data: N x 2 __global__ void hpixel_weights_forward_kernel( const int nthreads, const float *loc_p_data, const float *loc_t_data, const float *grid_x_data, const float *grid_y_data, const int k, const int num, const int dim, float *inter_data, float *grad_pixel_weights_data) { CUDA_1D_KERNEL_LOOP(index, nthreads) { // locate batch const int n = index / dim; const int d = index % dim; loc_p_data += 5 * n; loc_t_data += 5 * n; grid_y_data += 2 * n; inter_data += (n * dim + d); grad_pixel_weights_data += 5 * (n * dim + d); int ymin = grid_y_data[0] - 5; int ymax = grid_y_data[1] + 5; ymin = (ymin < -100) ? -100 : ymin; ymax = (ymax > 1000) ? 1000 : ymax; float grad_cx = 0.0; float grad_cy = 0.0; float grad_w = 0.0; float grad_h = 0.0; float grad_angle = 0.0; // sum grad // sum area float grad_loc[5] = {0}; for (int i = ymin; i <= ymax; i++) { const float y = i + 0.5; float pixel_weight_p = 0.0; float pixel_weight_t = 0.0; get_pixel_area(loc_p_data[0], loc_p_data[1], loc_p_data[2], loc_p_data[3], loc_p_data[4], grid_x_data[d], y, k, pixel_weight_p, grad_loc); get_pixel_area(loc_t_data[0], loc_t_data[1], loc_t_data[2], loc_t_data[3], loc_t_data[4], grid_x_data[d], y, k, pixel_weight_t); if (pixel_weight_p < 1e-9 && pixel_weight_t < 1e-9) continue; const float inter_pixel_area = pixel_weight_p * pixel_weight_t; grad_cx += pixel_weight_t * grad_loc[0]; grad_cy += pixel_weight_t * grad_loc[1]; grad_w += pixel_weight_t * grad_loc[2]; grad_h += pixel_weight_t * grad_loc[3]; grad_angle += pixel_weight_t * grad_loc[4]; inter_data[0] += inter_pixel_area; } grad_pixel_weights_data[0] = grad_cx; grad_pixel_weights_data[1] = grad_cy; grad_pixel_weights_data[2] = grad_w; grad_pixel_weights_data[3] = grad_h; grad_pixel_weights_data[4] = grad_angle; } } // // loc_p_data: N x 8 // // loc_t_data: M x 8 // __global__ void overlap_r_forward_kernel( // const int nthreads, // const float *loc_p_data, // const float *loc_t_data, // const int dim, // float *pious_data) // { // CUDA_1D_KERNEL_LOOP(index, nthreads) // { // // locate batch // const int n = index / dim; // const int d = index % dim; // loc_p_data += 8 * n; // loc_t_data += 8 * d; // pious_data += (n * dim + d); // const float xmin_p = min(loc_p_data[0], min(loc_p_data[2], min(loc_p_data[4], loc_p_data[6]))); // const float ymin_p = min(loc_p_data[1], min(loc_p_data[3], min(loc_p_data[5], loc_p_data[7]))); // const float xmax_p = max(loc_p_data[0], max(loc_p_data[2], max(loc_p_data[4], loc_p_data[6]))); // const float ymax_p = max(loc_p_data[1], max(loc_p_data[3], max(loc_p_data[5], loc_p_data[7]))); // const float xmin_t = min(loc_t_data[0], min(loc_t_data[2], min(loc_t_data[4], loc_t_data[6]))); // const float ymin_t = min(loc_t_data[1], min(loc_t_data[3], min(loc_t_data[5], loc_t_data[7]))); // const float xmax_t = max(loc_t_data[0], max(loc_t_data[2], max(loc_t_data[4], loc_t_data[6]))); // const float ymax_t = max(loc_t_data[1], max(loc_t_data[3], max(loc_t_data[5], loc_t_data[7]))); // float iou = 0.0; // if (xmin_p > xmax_t || xmax_p < xmin_t || ymin_p > ymax_t || ymax_p < ymin_t) // { // iou = 0.0; // } else { // const float inter_xmin = max(xmin_p, xmin_t); // const float inter_ymin = max(ymin_p, ymin_t); // const float inter_xmax = min(xmax_p, xmax_t); // const float inter_ymax = min(ymax_p, ymax_t); // const float inter_area = (inter_ymax - inter_ymin) * (inter_xmax - inter_xmin); // const float area_p = (ymax_p - ymin_p) * (xmax_p - xmin_p); // const float area_t = (ymax_t - ymin_t) * (xmax_t - xmin_t); // iou = inter_area / (area_p + area_t - inter_area); // } // // pious_data[0] = iou; // if (iou < 0.25) // { // pious_data[0] = iou; // } else { // const float union_xmin = min(xmin_p, xmin_t); // const float union_ymin = min(ymin_p, ymin_t); // const float union_xmax = max(xmax_p, xmax_t); // const float union_ymax = max(ymax_p, ymax_t); // int ymin = grid_y_data[0] - 5; // int ymax = grid_y_data[1] + 5; // ymin = (ymin < -100) ? -100 : ymin; // ymax = (ymax > 1000) ? 1000 : ymax; // pious_data[0] = devPolyIoU(loc_p_data, loc_t_data); // } // } // } // loc_p_data: N x 5 // loc_t_data: M x 5 // __global__ void overlap_r_fast_forward_kernel( // const int nthreads, // const float *loc_p_data, // const float *loc_t_data, // const int dim, // float *pious_data) // { // CUDA_1D_KERNEL_LOOP(index, nthreads) // { // // locate batch // const int n = index / dim; // const int d = index % dim; // loc_p_data += 5 * n; // loc_t_data += 5 * d; // pious_data += (n * dim + d); // // pre_processing // const float dis = sqrt(xx * xx + yy * yy); // const float max_wh_p = max(loc_p_data[2], loc_p_data[3]); // const float max_wh_t = max(loc_t_data[2], loc_t_data[3]); // if (dis > 0.707107 * (max_wh_p + max_wh_t)) // { // pious_data[0] = 0.0; // return; // } else { // const float xmin_p = loc_p_data[0] - loc_p_data[2]; // const float ymin_p = loc_p_data[1] - loc_p_data[3]; // const float xmax_p = loc_p_data[0] + loc_p_data[2]; // const float ymax_p = loc_p_data[1] + loc_p_data[3]; // const float xmin_t = loc_t_data[0] - loc_t_data[2]; // const float ymin_t = loc_t_data[1] - loc_t_data[3]; // const float xmax_t = loc_t_data[0] + loc_t_data[2]; // const float ymax_t = loc_t_data[1] + loc_t_data[3]; // float iou = 0.0; // if (xmin_p > xmax_t || xmax_p < xmin_t || ymin_p > ymax_t || ymax_p < ymin_t) // { // iou = 0.0; // } else { // const float inter_xmin = max(xmin_p, xmin_t); // const float inter_ymin = max(ymin_p, ymin_t); // const float inter_xmax = min(xmax_p, xmax_t); // const float inter_ymax = min(ymax_p, ymax_t); // const float inter_area = (inter_ymax - inter_ymin) * (inter_xmax - inter_xmin); // const float area_p = (ymax_p - ymin_p) * (xmax_p - xmin_p); // const float area_t = (ymax_t - ymin_t) * (xmax_t - xmin_t); // iou = inter_area / (area_p + area_t - inter_area); // } // const float ar_p = loc_p_data[2] / loc_p_data[3]; // const float ar_t = loc_t_data[2] / loc_t_data[3]; // if (ar_p < 1.2 && ar_t < 1.2) // { // } else { // } // } // } // } // __device__ float OverlapArea(const float &xcenter1, const float &ycenter1, const float &width1, const float &height1, const float &angle1, const float &xcenter2, const float &ycenter2, const float &width2, const float &height2, const float &angle2) // { // float angle1_ = -angle1; // float angle2_ = -angle2; // float angled = angle2_ - angle1_; // angled *= (float)3.14159265 / 180; // angle1_ *= (float)3.14159265 / 180; // float area = 0; // float hw1 = width1 / 2; // float hh1 = height1 / 2; // float hw2 = width2 / 2; // float hh2 = height2 / 2; // float xcenterd = xcenter2 - xcenter1; // float ycenterd = ycenter2 - ycenter1; // float tmp = xcenterd * cosf(angle1_) + ycenterd * sinf(angle1_); // ycenterd = -xcenterd * sinf(angle1_) + ycenterd * cosf(angle1_); // xcenterd = tmp; // float max_width_height1 = width1 > height1 ? width1 : height1; // float max_width_height2 = width2 > height2 ? width2 : height2; // if (sqrt(xcenterd * xcenterd + ycenterd * ycenterd) > // (max_width_height1 + max_width_height2) * 0.707107) // { // area = 0; // return (area); // } // if (fabs(sin(angled)) < 1e-3) // { // if (fabs(xcenterd) > (hw1 + hw2) || fabs(ycenterd) > (hh1 + hh2)) // { // area = 0; // return (area); // } // else // { // float x_min_inter = -hw1 > (xcenterd - hw2) ? -hw1 : (xcenterd - hw2); // float x_max_inter = hw1 < (xcenterd + hw2) ? hw1 : (xcenterd + hw2); // float y_min_inter = -hh1 > (ycenterd - hh2) ? -hh1 : (ycenterd - hh2); // float y_max_inter = hh1 < (ycenterd + hh2) ? hh1 : (ycenterd + hh2); // const float inter_width = x_max_inter - x_min_inter; // const float inter_height = y_max_inter - y_min_inter; // const float inter_size = inter_width * inter_height; // area = inter_size; // return (area); // } // } // if (fabs(cos(angled)) < 1e-3) // { // float x_min_inter = -hw1 > (xcenterd - hh2) ? -hw1 : (xcenterd - hh2); // float x_max_inter = hw1 < (xcenterd + hh2) ? hw1 : (xcenterd + hh2); // float y_min_inter = -hh1 > (ycenterd - hw2) ? -hh1 : (ycenterd - hw2); // float y_max_inter = hh1 < (ycenterd + hw2) ? hh1 : (ycenterd + hw2); // const float inter_width = x_max_inter - x_min_inter; // const float inter_height = y_max_inter - y_min_inter; // const float inter_size = inter_width * inter_height; // area = inter_size; // return (area); // } // } // __global__ void overlap_r_fast_forward_kernel( // const int nthreads, // const float *loc_p_data, // const float *loc_t_data, // const int dim, // float *pious_data) // { // CUDA_1D_KERNEL_LOOP(index, nthreads) // { // // locate batch // const int n = index / dim; // const int d = index % dim; // loc_p_data += 5 * n; // loc_t_data += 5 * d; // pious_data += (n * dim + d); // const float xmin = grid_data[0]; // const float ymin = grid_data[1]; // const float xmax = grid_data[2]; // const float ymax = grid_data[3]; // const float overlap_h = grid_data[4]; // const float area_p = loc_p_data[2] * loc_p_data[3]; // const float area_t = loc_t_data[2] * loc_t_data[3]; // const float xx = loc_p_data[0] - loc_t_data[0]; // const float yy = loc_p_data[1] - loc_t_data[1]; // // pre_processing // const float dis = sqrt(xx * xx + yy * yy); // const float max_wh_p = max(loc_p_data[2], loc_p_data[3]); // const float max_wh_t = max(loc_t_data[2], loc_t_data[3]); // if (dis > 0.707107 * (max_wh_p + max_wh_t)) // { // pious_data[0] = 0.0; // return; // } // pious_data[0] = overlap_h; // return; // if (overlap_h < 0.1) // { // pious_data[0] = overlap_h; // return; // } // else // { // } // } // } // inter_union_data: Nxdimx2 // grad_pixel_weights_data: Nxdimx10 // pious_data:Nx1 // grad_loc_memory_data:Nx5 __global__ void pious_forward_kernel( const int nthreads, const float *inter_union_data, const float *grad_pixel_weights_data, const int num, const int dim, float *pious_data, float *grad_loc_memory_data) { CUDA_1D_KERNEL_LOOP(index, nthreads) { // locate batch inter_union_data += index * dim * 2; grad_pixel_weights_data += index * dim * 10; grad_loc_memory_data += index * 5; float inter_area = 0.0; float union_area = 0.0; float grad_cx_1 = 0.0; float grad_cy_1 = 0.0; float grad_w_1 = 0.0; float grad_h_1 = 0.0; float grad_angle_1 = 0.0; float grad_cx_2 = 0.0; float grad_cy_2 = 0.0; float grad_w_2 = 0.0; float grad_h_2 = 0.0; float grad_angle_2 = 0.0; for (int d = 0; d < dim; d++) { const int offset = 2 * d; const int offset_ = 10 * d; grad_cx_1 += grad_pixel_weights_data[offset_]; grad_cy_1 += grad_pixel_weights_data[offset_ + 1]; grad_w_1 += grad_pixel_weights_data[offset_ + 2]; grad_h_1 += grad_pixel_weights_data[offset_ + 3]; grad_angle_1 += grad_pixel_weights_data[offset_ + 4]; grad_cx_2 += grad_pixel_weights_data[offset_ + 5]; grad_cy_2 += grad_pixel_weights_data[offset_ + 6]; grad_w_2 += grad_pixel_weights_data[offset_ + 7]; grad_h_2 += grad_pixel_weights_data[offset_ + 8]; grad_angle_2 += grad_pixel_weights_data[offset_ + 9]; inter_area += inter_union_data[offset]; union_area += inter_union_data[offset + 1]; } pious_data[index] = inter_area / (union_area + 1e-9); const float k = inter_area + union_area; const float b = union_area * union_area + 1e-9; grad_loc_memory_data[0] = (k * grad_cx_1 - inter_area * grad_cx_2) / b; grad_loc_memory_data[1] = (k * grad_cy_1 - inter_area * grad_cy_2) / b; grad_loc_memory_data[2] = (k * grad_w_1 - inter_area * grad_w_2) / b; grad_loc_memory_data[3] = (k * grad_h_1 - inter_area * grad_h_2) / b; grad_loc_memory_data[4] = (k * grad_angle_1 - inter_area * grad_angle_2) / b; } } // inter_union_data: Nxdimx2 // grad_pixel_weights_data: Nxdimx10 // pious_data:Nx1 // grad_loc_memory_data:Nx5 __global__ void hpious_forward_kernel( const int nthreads, const float *loc_p_data, const float *loc_t_data, const float *inter_data, const float *grad_pixel_weights_data, const int num, const int dim, float *pious_data, float *grad_loc_memory_data) { CUDA_1D_KERNEL_LOOP(index, nthreads) { // locate batch loc_p_data += 5 * index; loc_t_data += 5 * index; inter_data += index * dim; grad_pixel_weights_data += index * dim * 5; grad_loc_memory_data += index * 5; float inter_area = 0.0; float grad_cx = 0.0; float grad_cy = 0.0; float grad_w = 0.0; float grad_h = 0.0; float grad_angle = 0.0; for (int d = 0; d < dim; d++) { const int offset = 5 * d; grad_cx += grad_pixel_weights_data[offset]; grad_cy += grad_pixel_weights_data[offset + 1]; grad_w += grad_pixel_weights_data[offset + 2]; grad_h += grad_pixel_weights_data[offset + 3]; grad_angle += grad_pixel_weights_data[offset + 4]; inter_area += inter_data[d]; } const float union_area = loc_p_data[2] * loc_p_data[3] + loc_t_data[2] * loc_t_data[3] - inter_area; pious_data[index] = inter_area / (union_area + 1e-9); const float k = inter_area + union_area; const float b = union_area * union_area + 1e-9; grad_loc_memory_data[0] = k * grad_cx / b; grad_loc_memory_data[1] = k * grad_cy / b; grad_loc_memory_data[2] = (k * grad_w - loc_p_data[3] * inter_area) / b; grad_loc_memory_data[3] = (k * grad_h - loc_p_data[2] * inter_area) / b; grad_loc_memory_data[4] = k * grad_angle / b; } } // #loc-- > num x 5 // #grid_xy-- > dim x 2 // output-- > num x 1 at::Tensor pixel_weights_forward_cuda( const at::Tensor &loc_p, // const at::Tensor &loc_t, // const at::Tensor &grid_x, // const int k, const bool is_hard, at::Tensor &grad_loc_memory) { const int num = loc_p.size(0); const int dim = grid_x.size(0); const int tmp_total_count = num * dim; const int total_count = num; const int thread_per_block = 1024; const int block_count = (total_count + thread_per_block - 1) / thread_per_block; const int tmp_block_count = (tmp_total_count + thread_per_block - 1) / thread_per_block; // final output auto options = torch::TensorOptions().dtype(torch::kFloat32).requires_grad(true).device(torch::kCUDA, loc_p.device().index()); auto output = torch::zeros({num}, options).to(torch::kCUDA); auto pixel_weights = torch::zeros({num, dim}, options).to(torch::kCUDA); auto grid_y = torch::zeros({num, 2}, options).to(torch::kCUDA); AT_CHECK(loc_p.device().index() == loc_t.device().index(), "loc_p & loc_t must be same device"); AT_CHECK(loc_p.device().index() == grid_x.device().index(), "loc_p & grid_x must be same device"); // for paralell if (block_count <= 0) return output; // get grid y-dir get_grid_forward_kernel<<<block_count, thread_per_block>>>( total_count, loc_p.data<float>(), loc_t.data<float>(), grid_y.data<float>()); // get_grid_share_center_forward_kernel<<<block_count, thread_per_block>>>( // total_count, loc_p.data<float>(), loc_t.data<float>(), grid_y.data<float>()); // kernel function for sum if (is_hard) { auto inter = torch::zeros({num, dim}, options).to(torch::kCUDA); auto grad_pixel_weights = torch::zeros({num, dim, 5}, options).to(torch::kCUDA); hpixel_weights_forward_kernel<<<tmp_block_count, thread_per_block>>>( tmp_total_count, loc_p.data<float>(), loc_t.data<float>(), grid_x.data<float>(), grid_y.data<float>(), k, num, dim, inter.data<float>(), grad_pixel_weights.data<float>()); hpious_forward_kernel<<<block_count, thread_per_block>>>( total_count, loc_p.data<float>(), loc_t.data<float>(), inter.data<float>(), grad_pixel_weights.data<float>(), num, dim, output.data<float>(), grad_loc_memory.data<float>()); } else { auto inter_union = torch::zeros({num, dim, 2}, options).to(torch::kCUDA); auto grad_pixel_weights = torch::zeros({num, dim, 10}, options).to(torch::kCUDA); // kernel function for pixels pixel_weights_forward_kernel<<<tmp_block_count, thread_per_block>>>( tmp_total_count, loc_p.data<float>(), loc_t.data<float>(), grid_x.data<float>(), grid_y.data<float>(), k, num, dim, inter_union.data<float>(), grad_pixel_weights.data<float>()); // pixel_weights_share_center_forward_kernel<<<tmp_block_count, thread_per_block>>>( // tmp_total_count, loc_p.data<float>(), loc_t.data<float>(), grid_x.data<float>(), grid_y.data<float>(), k, num, dim, inter_union.data<float>(), grad_pixel_weights.data<float>()); pious_forward_kernel<<<block_count, thread_per_block>>>( total_count, inter_union.data<float>(), grad_pixel_weights.data<float>(), num, dim, output.data<float>(), grad_loc_memory.data<float>()); } AT_CHECK(cudaGetLastError() == cudaSuccess, "pious_forward_kernel failed"); return output; } at::Tensor overlap_r_forward_cuda( const at::Tensor &loc_p, // const at::Tensor &loc_t, const at::Tensor &grid) { const int num_p = loc_p.size(0); const int num_t = loc_t.size(0); const int total_count = num_p * num_t; const int thread_per_block = 1024; const int block_count = (total_count + thread_per_block - 1) / thread_per_block; // final output auto options = torch::TensorOptions().dtype(torch::kFloat32).requires_grad(true).device(torch::kCUDA, loc_p.device().index()); auto output = torch::zeros({num_p, num_t}, options).to(torch::kCUDA); // for paralell if (block_count <= 0) return output; // // kernel function for sum // overlap_r_forward_kernel<<<block_count, thread_per_block>>>( // total_count, loc_p.data<float>(), loc_t.data<float>(), grid.data<float>(), num_t, output.data<float>());G56 // AT_CHECK(cudaGetLastError() == cudaSuccess, "overlap_r_forward_kernel failed"); return output; } /* ------------------------------end of the forward--------------------------- */ __global__ void pious_backward_kernel( const int nthreads, const float *grad_pious_data, const float *grad_loc_memory_data, float *grad_loc_p_data) { CUDA_1D_KERNEL_LOOP(index, nthreads) { // locate batch const int n = index / 5; const int d = index % 5; grad_loc_memory_data += n * 5; grad_loc_p_data += n * 5; grad_loc_p_data[d] = grad_pious_data[n] * grad_loc_memory_data[d]; } } at::Tensor pixel_weights_backward_cuda(const at::Tensor &grads_pious, const at::Tensor &grad_loc_memory) { const int num = grads_pious.size(0); const int total_count = 5 * num; const int thread_per_block = 1024; const int block_count = (total_count + thread_per_block - 1) / thread_per_block; auto options = torch::TensorOptions().dtype(torch::kFloat32).requires_grad(true).device(torch::kCUDA, grads_pious.device().index()); auto grad_loc_p = torch::zeros({num, 5}, options).to(torch::kCUDA); pious_backward_kernel<<<block_count, thread_per_block>>>( total_count, grads_pious.data<float>(), grad_loc_memory.data<float>(), grad_loc_p.data<float>()); AT_CHECK(cudaGetLastError() == cudaSuccess, "pious_backward_kernel failed"); return grad_loc_p; }
the_stack
#include "opencv2/core/cuda/common.hpp" #include "opencv2/core/cuda/vec_math.hpp" namespace cv { namespace cuda { namespace device { namespace match_template { __device__ __forceinline__ float sum(float v) { return v; } __device__ __forceinline__ float sum(float2 v) { return v.x + v.y; } __device__ __forceinline__ float sum(float3 v) { return v.x + v.y + v.z; } __device__ __forceinline__ float sum(float4 v) { return v.x + v.y + v.z + v.w; } __device__ __forceinline__ float first(float v) { return v; } __device__ __forceinline__ float first(float2 v) { return v.x; } __device__ __forceinline__ float first(float3 v) { return v.x; } __device__ __forceinline__ float first(float4 v) { return v.x; } __device__ __forceinline__ float mul(float a, float b) { return a * b; } __device__ __forceinline__ float2 mul(float2 a, float2 b) { return make_float2(a.x * b.x, a.y * b.y); } __device__ __forceinline__ float3 mul(float3 a, float3 b) { return make_float3(a.x * b.x, a.y * b.y, a.z * b.z); } __device__ __forceinline__ float4 mul(float4 a, float4 b) { return make_float4(a.x * b.x, a.y * b.y, a.z * b.z, a.w * b.w); } __device__ __forceinline__ float mul(uchar a, uchar b) { return a * b; } __device__ __forceinline__ float2 mul(uchar2 a, uchar2 b) { return make_float2(a.x * b.x, a.y * b.y); } __device__ __forceinline__ float3 mul(uchar3 a, uchar3 b) { return make_float3(a.x * b.x, a.y * b.y, a.z * b.z); } __device__ __forceinline__ float4 mul(uchar4 a, uchar4 b) { return make_float4(a.x * b.x, a.y * b.y, a.z * b.z, a.w * b.w); } __device__ __forceinline__ float sub(float a, float b) { return a - b; } __device__ __forceinline__ float2 sub(float2 a, float2 b) { return make_float2(a.x - b.x, a.y - b.y); } __device__ __forceinline__ float3 sub(float3 a, float3 b) { return make_float3(a.x - b.x, a.y - b.y, a.z - b.z); } __device__ __forceinline__ float4 sub(float4 a, float4 b) { return make_float4(a.x - b.x, a.y - b.y, a.z - b.z, a.w - b.w); } __device__ __forceinline__ float sub(uchar a, uchar b) { return a - b; } __device__ __forceinline__ float2 sub(uchar2 a, uchar2 b) { return make_float2(a.x - b.x, a.y - b.y); } __device__ __forceinline__ float3 sub(uchar3 a, uchar3 b) { return make_float3(a.x - b.x, a.y - b.y, a.z - b.z); } __device__ __forceinline__ float4 sub(uchar4 a, uchar4 b) { return make_float4(a.x - b.x, a.y - b.y, a.z - b.z, a.w - b.w); } ////////////////////////////////////////////////////////////////////// // Naive_CCORR template <typename T, int cn> __global__ void matchTemplateNaiveKernel_CCORR(int w, int h, const PtrStepb image, const PtrStepb templ, PtrStepSzf result) { typedef typename TypeVec<T, cn>::vec_type Type; typedef typename TypeVec<float, cn>::vec_type Typef; int x = blockDim.x * blockIdx.x + threadIdx.x; int y = blockDim.y * blockIdx.y + threadIdx.y; if (x < result.cols && y < result.rows) { Typef res = VecTraits<Typef>::all(0); for (int i = 0; i < h; ++i) { const Type* image_ptr = (const Type*)image.ptr(y + i); const Type* templ_ptr = (const Type*)templ.ptr(i); for (int j = 0; j < w; ++j) res = res + mul(image_ptr[x + j], templ_ptr[j]); } result.ptr(y)[x] = sum(res); } } template <typename T, int cn> void matchTemplateNaive_CCORR(const PtrStepSzb image, const PtrStepSzb templ, PtrStepSzf result, cudaStream_t stream) { const dim3 threads(32, 8); const dim3 grid(divUp(result.cols, threads.x), divUp(result.rows, threads.y)); matchTemplateNaiveKernel_CCORR<T, cn><<<grid, threads, 0, stream>>>(templ.cols, templ.rows, image, templ, result); cudaSafeCall( cudaGetLastError() ); if (stream == 0) cudaSafeCall( cudaDeviceSynchronize() ); } void matchTemplateNaive_CCORR_32F(const PtrStepSzb image, const PtrStepSzb templ, PtrStepSzf result, int cn, cudaStream_t stream) { typedef void (*caller_t)(const PtrStepSzb image, const PtrStepSzb templ, PtrStepSzf result, cudaStream_t stream); static const caller_t callers[] = { 0, matchTemplateNaive_CCORR<float, 1>, matchTemplateNaive_CCORR<float, 2>, matchTemplateNaive_CCORR<float, 3>, matchTemplateNaive_CCORR<float, 4> }; callers[cn](image, templ, result, stream); } void matchTemplateNaive_CCORR_8U(const PtrStepSzb image, const PtrStepSzb templ, PtrStepSzf result, int cn, cudaStream_t stream) { typedef void (*caller_t)(const PtrStepSzb image, const PtrStepSzb templ, PtrStepSzf result, cudaStream_t stream); static const caller_t callers[] = { 0, matchTemplateNaive_CCORR<uchar, 1>, matchTemplateNaive_CCORR<uchar, 2>, matchTemplateNaive_CCORR<uchar, 3>, matchTemplateNaive_CCORR<uchar, 4> }; callers[cn](image, templ, result, stream); } ////////////////////////////////////////////////////////////////////// // Naive_SQDIFF template <typename T, int cn> __global__ void matchTemplateNaiveKernel_SQDIFF(int w, int h, const PtrStepb image, const PtrStepb templ, PtrStepSzf result) { typedef typename TypeVec<T, cn>::vec_type Type; typedef typename TypeVec<float, cn>::vec_type Typef; int x = blockDim.x * blockIdx.x + threadIdx.x; int y = blockDim.y * blockIdx.y + threadIdx.y; if (x < result.cols && y < result.rows) { Typef res = VecTraits<Typef>::all(0); Typef delta; for (int i = 0; i < h; ++i) { const Type* image_ptr = (const Type*)image.ptr(y + i); const Type* templ_ptr = (const Type*)templ.ptr(i); for (int j = 0; j < w; ++j) { delta = sub(image_ptr[x + j], templ_ptr[j]); res = res + delta * delta; } } result.ptr(y)[x] = sum(res); } } template <typename T, int cn> void matchTemplateNaive_SQDIFF(const PtrStepSzb image, const PtrStepSzb templ, PtrStepSzf result, cudaStream_t stream) { const dim3 threads(32, 8); const dim3 grid(divUp(result.cols, threads.x), divUp(result.rows, threads.y)); matchTemplateNaiveKernel_SQDIFF<T, cn><<<grid, threads, 0, stream>>>(templ.cols, templ.rows, image, templ, result); cudaSafeCall( cudaGetLastError() ); if (stream == 0) cudaSafeCall( cudaDeviceSynchronize() ); } void matchTemplateNaive_SQDIFF_32F(const PtrStepSzb image, const PtrStepSzb templ, PtrStepSzf result, int cn, cudaStream_t stream) { typedef void (*caller_t)(const PtrStepSzb image, const PtrStepSzb templ, PtrStepSzf result, cudaStream_t stream); static const caller_t callers[] = { 0, matchTemplateNaive_SQDIFF<float, 1>, matchTemplateNaive_SQDIFF<float, 2>, matchTemplateNaive_SQDIFF<float, 3>, matchTemplateNaive_SQDIFF<float, 4> }; callers[cn](image, templ, result, stream); } void matchTemplateNaive_SQDIFF_8U(const PtrStepSzb image, const PtrStepSzb templ, PtrStepSzf result, int cn, cudaStream_t stream) { typedef void (*caller_t)(const PtrStepSzb image, const PtrStepSzb templ, PtrStepSzf result, cudaStream_t stream); static const caller_t callers[] = { 0, matchTemplateNaive_SQDIFF<uchar, 1>, matchTemplateNaive_SQDIFF<uchar, 2>, matchTemplateNaive_SQDIFF<uchar, 3>, matchTemplateNaive_SQDIFF<uchar, 4> }; callers[cn](image, templ, result, stream); } ////////////////////////////////////////////////////////////////////// // Prepared_SQDIFF template <int cn> __global__ void matchTemplatePreparedKernel_SQDIFF_8U(int w, int h, const PtrStep<double> image_sqsum, double templ_sqsum, PtrStepSzf result) { const int x = blockIdx.x * blockDim.x + threadIdx.x; const int y = blockIdx.y * blockDim.y + threadIdx.y; if (x < result.cols && y < result.rows) { float image_sqsum_ = (float)( (image_sqsum.ptr(y + h)[(x + w) * cn] - image_sqsum.ptr(y)[(x + w) * cn]) - (image_sqsum.ptr(y + h)[x * cn] - image_sqsum.ptr(y)[x * cn])); float ccorr = result.ptr(y)[x]; result.ptr(y)[x] = image_sqsum_ - 2.f * ccorr + templ_sqsum; } } template <int cn> void matchTemplatePrepared_SQDIFF_8U(int w, int h, const PtrStepSz<double> image_sqsum, double templ_sqsum, PtrStepSzf result, cudaStream_t stream) { const dim3 threads(32, 8); const dim3 grid(divUp(result.cols, threads.x), divUp(result.rows, threads.y)); matchTemplatePreparedKernel_SQDIFF_8U<cn><<<grid, threads, 0, stream>>>(w, h, image_sqsum, templ_sqsum, result); cudaSafeCall( cudaGetLastError() ); if (stream == 0) cudaSafeCall( cudaDeviceSynchronize() ); } void matchTemplatePrepared_SQDIFF_8U(int w, int h, const PtrStepSz<double> image_sqsum, double templ_sqsum, PtrStepSzf result, int cn, cudaStream_t stream) { typedef void (*caller_t)(int w, int h, const PtrStepSz<double> image_sqsum, double templ_sqsum, PtrStepSzf result, cudaStream_t stream); static const caller_t callers[] = { 0, matchTemplatePrepared_SQDIFF_8U<1>, matchTemplatePrepared_SQDIFF_8U<2>, matchTemplatePrepared_SQDIFF_8U<3>, matchTemplatePrepared_SQDIFF_8U<4> }; callers[cn](w, h, image_sqsum, templ_sqsum, result, stream); } ////////////////////////////////////////////////////////////////////// // Prepared_SQDIFF_NORMED // normAcc* are accurate normalization routines which make CUDA matchTemplate // consistent with CPU one __device__ float normAcc(float num, float denum) { if (::fabs(num) < denum) return num / denum; if (::fabs(num) < denum * 1.125f) return num > 0 ? 1 : -1; return 0; } __device__ float normAcc_SQDIFF(float num, float denum) { if (::fabs(num) < denum) return num / denum; if (::fabs(num) < denum * 1.125f) return num > 0 ? 1 : -1; return 1; } template <int cn> __global__ void matchTemplatePreparedKernel_SQDIFF_NORMED_8U( int w, int h, const PtrStep<double> image_sqsum, double templ_sqsum, PtrStepSzf result) { const int x = blockIdx.x * blockDim.x + threadIdx.x; const int y = blockIdx.y * blockDim.y + threadIdx.y; if (x < result.cols && y < result.rows) { float image_sqsum_ = (float)( (image_sqsum.ptr(y + h)[(x + w) * cn] - image_sqsum.ptr(y)[(x + w) * cn]) - (image_sqsum.ptr(y + h)[x * cn] - image_sqsum.ptr(y)[x * cn])); float ccorr = result.ptr(y)[x]; result.ptr(y)[x] = normAcc_SQDIFF(image_sqsum_ - 2.f * ccorr + templ_sqsum, sqrtf(image_sqsum_ * templ_sqsum)); } } template <int cn> void matchTemplatePrepared_SQDIFF_NORMED_8U(int w, int h, const PtrStepSz<double> image_sqsum, double templ_sqsum, PtrStepSzf result, cudaStream_t stream) { const dim3 threads(32, 8); const dim3 grid(divUp(result.cols, threads.x), divUp(result.rows, threads.y)); matchTemplatePreparedKernel_SQDIFF_NORMED_8U<cn><<<grid, threads, 0, stream>>>(w, h, image_sqsum, templ_sqsum, result); cudaSafeCall( cudaGetLastError() ); if (stream == 0) cudaSafeCall( cudaDeviceSynchronize() ); } void matchTemplatePrepared_SQDIFF_NORMED_8U(int w, int h, const PtrStepSz<double> image_sqsum, double templ_sqsum, PtrStepSzf result, int cn, cudaStream_t stream) { typedef void (*caller_t)(int w, int h, const PtrStepSz<double> image_sqsum, double templ_sqsum, PtrStepSzf result, cudaStream_t stream); static const caller_t callers[] = { 0, matchTemplatePrepared_SQDIFF_NORMED_8U<1>, matchTemplatePrepared_SQDIFF_NORMED_8U<2>, matchTemplatePrepared_SQDIFF_NORMED_8U<3>, matchTemplatePrepared_SQDIFF_NORMED_8U<4> }; callers[cn](w, h, image_sqsum, templ_sqsum, result, stream); } ////////////////////////////////////////////////////////////////////// // Prepared_CCOFF __global__ void matchTemplatePreparedKernel_CCOFF_8U(int w, int h, float templ_sum_scale, const PtrStep<int> image_sum, PtrStepSzf result) { const int x = blockIdx.x * blockDim.x + threadIdx.x; const int y = blockIdx.y * blockDim.y + threadIdx.y; if (x < result.cols && y < result.rows) { float image_sum_ = (float)( (image_sum.ptr(y + h)[x + w] - image_sum.ptr(y)[x + w]) - (image_sum.ptr(y + h)[x] - image_sum.ptr(y)[x])); float ccorr = result.ptr(y)[x]; result.ptr(y)[x] = ccorr - image_sum_ * templ_sum_scale; } } void matchTemplatePrepared_CCOFF_8U(int w, int h, const PtrStepSz<int> image_sum, int templ_sum, PtrStepSzf result, cudaStream_t stream) { dim3 threads(32, 8); dim3 grid(divUp(result.cols, threads.x), divUp(result.rows, threads.y)); matchTemplatePreparedKernel_CCOFF_8U<<<grid, threads, 0, stream>>>(w, h, (float)templ_sum / (w * h), image_sum, result); cudaSafeCall( cudaGetLastError() ); if (stream == 0) cudaSafeCall( cudaDeviceSynchronize() ); } __global__ void matchTemplatePreparedKernel_CCOFF_8UC2( int w, int h, float templ_sum_scale_r, float templ_sum_scale_g, const PtrStep<int> image_sum_r, const PtrStep<int> image_sum_g, PtrStepSzf result) { const int x = blockIdx.x * blockDim.x + threadIdx.x; const int y = blockIdx.y * blockDim.y + threadIdx.y; if (x < result.cols && y < result.rows) { float image_sum_r_ = (float)( (image_sum_r.ptr(y + h)[x + w] - image_sum_r.ptr(y)[x + w]) - (image_sum_r.ptr(y + h)[x] - image_sum_r.ptr(y)[x])); float image_sum_g_ = (float)( (image_sum_g.ptr(y + h)[x + w] - image_sum_g.ptr(y)[x + w]) - (image_sum_g.ptr(y + h)[x] - image_sum_g.ptr(y)[x])); float ccorr = result.ptr(y)[x]; result.ptr(y)[x] = ccorr - image_sum_r_ * templ_sum_scale_r - image_sum_g_ * templ_sum_scale_g; } } void matchTemplatePrepared_CCOFF_8UC2( int w, int h, const PtrStepSz<int> image_sum_r, const PtrStepSz<int> image_sum_g, int templ_sum_r, int templ_sum_g, PtrStepSzf result, cudaStream_t stream) { dim3 threads(32, 8); dim3 grid(divUp(result.cols, threads.x), divUp(result.rows, threads.y)); matchTemplatePreparedKernel_CCOFF_8UC2<<<grid, threads, 0, stream>>>( w, h, (float)templ_sum_r / (w * h), (float)templ_sum_g / (w * h), image_sum_r, image_sum_g, result); cudaSafeCall( cudaGetLastError() ); if (stream == 0) cudaSafeCall( cudaDeviceSynchronize() ); } __global__ void matchTemplatePreparedKernel_CCOFF_8UC3( int w, int h, float templ_sum_scale_r, float templ_sum_scale_g, float templ_sum_scale_b, const PtrStep<int> image_sum_r, const PtrStep<int> image_sum_g, const PtrStep<int> image_sum_b, PtrStepSzf result) { const int x = blockIdx.x * blockDim.x + threadIdx.x; const int y = blockIdx.y * blockDim.y + threadIdx.y; if (x < result.cols && y < result.rows) { float image_sum_r_ = (float)( (image_sum_r.ptr(y + h)[x + w] - image_sum_r.ptr(y)[x + w]) - (image_sum_r.ptr(y + h)[x] - image_sum_r.ptr(y)[x])); float image_sum_g_ = (float)( (image_sum_g.ptr(y + h)[x + w] - image_sum_g.ptr(y)[x + w]) - (image_sum_g.ptr(y + h)[x] - image_sum_g.ptr(y)[x])); float image_sum_b_ = (float)( (image_sum_b.ptr(y + h)[x + w] - image_sum_b.ptr(y)[x + w]) - (image_sum_b.ptr(y + h)[x] - image_sum_b.ptr(y)[x])); float ccorr = result.ptr(y)[x]; result.ptr(y)[x] = ccorr - image_sum_r_ * templ_sum_scale_r - image_sum_g_ * templ_sum_scale_g - image_sum_b_ * templ_sum_scale_b; } } void matchTemplatePrepared_CCOFF_8UC3( int w, int h, const PtrStepSz<int> image_sum_r, const PtrStepSz<int> image_sum_g, const PtrStepSz<int> image_sum_b, int templ_sum_r, int templ_sum_g, int templ_sum_b, PtrStepSzf result, cudaStream_t stream) { dim3 threads(32, 8); dim3 grid(divUp(result.cols, threads.x), divUp(result.rows, threads.y)); matchTemplatePreparedKernel_CCOFF_8UC3<<<grid, threads, 0, stream>>>( w, h, (float)templ_sum_r / (w * h), (float)templ_sum_g / (w * h), (float)templ_sum_b / (w * h), image_sum_r, image_sum_g, image_sum_b, result); cudaSafeCall( cudaGetLastError() ); if (stream == 0) cudaSafeCall( cudaDeviceSynchronize() ); } __global__ void matchTemplatePreparedKernel_CCOFF_8UC4( int w, int h, float templ_sum_scale_r, float templ_sum_scale_g, float templ_sum_scale_b, float templ_sum_scale_a, const PtrStep<int> image_sum_r, const PtrStep<int> image_sum_g, const PtrStep<int> image_sum_b, const PtrStep<int> image_sum_a, PtrStepSzf result) { const int x = blockIdx.x * blockDim.x + threadIdx.x; const int y = blockIdx.y * blockDim.y + threadIdx.y; if (x < result.cols && y < result.rows) { float image_sum_r_ = (float)( (image_sum_r.ptr(y + h)[x + w] - image_sum_r.ptr(y)[x + w]) - (image_sum_r.ptr(y + h)[x] - image_sum_r.ptr(y)[x])); float image_sum_g_ = (float)( (image_sum_g.ptr(y + h)[x + w] - image_sum_g.ptr(y)[x + w]) - (image_sum_g.ptr(y + h)[x] - image_sum_g.ptr(y)[x])); float image_sum_b_ = (float)( (image_sum_b.ptr(y + h)[x + w] - image_sum_b.ptr(y)[x + w]) - (image_sum_b.ptr(y + h)[x] - image_sum_b.ptr(y)[x])); float image_sum_a_ = (float)( (image_sum_a.ptr(y + h)[x + w] - image_sum_a.ptr(y)[x + w]) - (image_sum_a.ptr(y + h)[x] - image_sum_a.ptr(y)[x])); float ccorr = result.ptr(y)[x]; result.ptr(y)[x] = ccorr - image_sum_r_ * templ_sum_scale_r - image_sum_g_ * templ_sum_scale_g - image_sum_b_ * templ_sum_scale_b - image_sum_a_ * templ_sum_scale_a; } } void matchTemplatePrepared_CCOFF_8UC4( int w, int h, const PtrStepSz<int> image_sum_r, const PtrStepSz<int> image_sum_g, const PtrStepSz<int> image_sum_b, const PtrStepSz<int> image_sum_a, int templ_sum_r, int templ_sum_g, int templ_sum_b, int templ_sum_a, PtrStepSzf result, cudaStream_t stream) { dim3 threads(32, 8); dim3 grid(divUp(result.cols, threads.x), divUp(result.rows, threads.y)); matchTemplatePreparedKernel_CCOFF_8UC4<<<grid, threads, 0, stream>>>( w, h, (float)templ_sum_r / (w * h), (float)templ_sum_g / (w * h), (float)templ_sum_b / (w * h), (float)templ_sum_a / (w * h), image_sum_r, image_sum_g, image_sum_b, image_sum_a, result); cudaSafeCall( cudaGetLastError() ); if (stream == 0) cudaSafeCall( cudaDeviceSynchronize() ); } ////////////////////////////////////////////////////////////////////// // Prepared_CCOFF_NORMED __global__ void matchTemplatePreparedKernel_CCOFF_NORMED_8U( int w, int h, float weight, float templ_sum_scale, float templ_sqsum_scale, const PtrStep<int> image_sum, const PtrStep<double> image_sqsum, PtrStepSzf result) { const int x = blockIdx.x * blockDim.x + threadIdx.x; const int y = blockIdx.y * blockDim.y + threadIdx.y; if (x < result.cols && y < result.rows) { float ccorr = result.ptr(y)[x]; float image_sum_ = (float)( (image_sum.ptr(y + h)[x + w] - image_sum.ptr(y)[x + w]) - (image_sum.ptr(y + h)[x] - image_sum.ptr(y)[x])); float image_sqsum_ = (float)( (image_sqsum.ptr(y + h)[x + w] - image_sqsum.ptr(y)[x + w]) - (image_sqsum.ptr(y + h)[x] - image_sqsum.ptr(y)[x])); result.ptr(y)[x] = normAcc(ccorr - image_sum_ * templ_sum_scale, sqrtf(templ_sqsum_scale * (image_sqsum_ - weight * image_sum_ * image_sum_))); } } void matchTemplatePrepared_CCOFF_NORMED_8U( int w, int h, const PtrStepSz<int> image_sum, const PtrStepSz<double> image_sqsum, int templ_sum, double templ_sqsum, PtrStepSzf result, cudaStream_t stream) { dim3 threads(32, 8); dim3 grid(divUp(result.cols, threads.x), divUp(result.rows, threads.y)); float weight = 1.f / (w * h); float templ_sum_scale = templ_sum * weight; float templ_sqsum_scale = templ_sqsum - weight * templ_sum * templ_sum; matchTemplatePreparedKernel_CCOFF_NORMED_8U<<<grid, threads, 0, stream>>>( w, h, weight, templ_sum_scale, templ_sqsum_scale, image_sum, image_sqsum, result); cudaSafeCall( cudaGetLastError() ); if (stream == 0) cudaSafeCall( cudaDeviceSynchronize() ); } __global__ void matchTemplatePreparedKernel_CCOFF_NORMED_8UC2( int w, int h, float weight, float templ_sum_scale_r, float templ_sum_scale_g, float templ_sqsum_scale, const PtrStep<int> image_sum_r, const PtrStep<double> image_sqsum_r, const PtrStep<int> image_sum_g, const PtrStep<double> image_sqsum_g, PtrStepSzf result) { const int x = blockIdx.x * blockDim.x + threadIdx.x; const int y = blockIdx.y * blockDim.y + threadIdx.y; if (x < result.cols && y < result.rows) { float image_sum_r_ = (float)( (image_sum_r.ptr(y + h)[x + w] - image_sum_r.ptr(y)[x + w]) - (image_sum_r.ptr(y + h)[x] - image_sum_r.ptr(y)[x])); float image_sqsum_r_ = (float)( (image_sqsum_r.ptr(y + h)[x + w] - image_sqsum_r.ptr(y)[x + w]) - (image_sqsum_r.ptr(y + h)[x] - image_sqsum_r.ptr(y)[x])); float image_sum_g_ = (float)( (image_sum_g.ptr(y + h)[x + w] - image_sum_g.ptr(y)[x + w]) - (image_sum_g.ptr(y + h)[x] - image_sum_g.ptr(y)[x])); float image_sqsum_g_ = (float)( (image_sqsum_g.ptr(y + h)[x + w] - image_sqsum_g.ptr(y)[x + w]) - (image_sqsum_g.ptr(y + h)[x] - image_sqsum_g.ptr(y)[x])); float num = result.ptr(y)[x] - image_sum_r_ * templ_sum_scale_r - image_sum_g_ * templ_sum_scale_g; float denum = sqrtf(templ_sqsum_scale * (image_sqsum_r_ - weight * image_sum_r_ * image_sum_r_ + image_sqsum_g_ - weight * image_sum_g_ * image_sum_g_)); result.ptr(y)[x] = normAcc(num, denum); } } void matchTemplatePrepared_CCOFF_NORMED_8UC2( int w, int h, const PtrStepSz<int> image_sum_r, const PtrStepSz<double> image_sqsum_r, const PtrStepSz<int> image_sum_g, const PtrStepSz<double> image_sqsum_g, int templ_sum_r, double templ_sqsum_r, int templ_sum_g, double templ_sqsum_g, PtrStepSzf result, cudaStream_t stream) { dim3 threads(32, 8); dim3 grid(divUp(result.cols, threads.x), divUp(result.rows, threads.y)); float weight = 1.f / (w * h); float templ_sum_scale_r = templ_sum_r * weight; float templ_sum_scale_g = templ_sum_g * weight; float templ_sqsum_scale = templ_sqsum_r - weight * templ_sum_r * templ_sum_r + templ_sqsum_g - weight * templ_sum_g * templ_sum_g; matchTemplatePreparedKernel_CCOFF_NORMED_8UC2<<<grid, threads, 0, stream>>>( w, h, weight, templ_sum_scale_r, templ_sum_scale_g, templ_sqsum_scale, image_sum_r, image_sqsum_r, image_sum_g, image_sqsum_g, result); cudaSafeCall( cudaGetLastError() ); if (stream == 0) cudaSafeCall( cudaDeviceSynchronize() ); } __global__ void matchTemplatePreparedKernel_CCOFF_NORMED_8UC3( int w, int h, float weight, float templ_sum_scale_r, float templ_sum_scale_g, float templ_sum_scale_b, float templ_sqsum_scale, const PtrStep<int> image_sum_r, const PtrStep<double> image_sqsum_r, const PtrStep<int> image_sum_g, const PtrStep<double> image_sqsum_g, const PtrStep<int> image_sum_b, const PtrStep<double> image_sqsum_b, PtrStepSzf result) { const int x = blockIdx.x * blockDim.x + threadIdx.x; const int y = blockIdx.y * blockDim.y + threadIdx.y; if (x < result.cols && y < result.rows) { float image_sum_r_ = (float)( (image_sum_r.ptr(y + h)[x + w] - image_sum_r.ptr(y)[x + w]) - (image_sum_r.ptr(y + h)[x] - image_sum_r.ptr(y)[x])); float image_sqsum_r_ = (float)( (image_sqsum_r.ptr(y + h)[x + w] - image_sqsum_r.ptr(y)[x + w]) - (image_sqsum_r.ptr(y + h)[x] - image_sqsum_r.ptr(y)[x])); float image_sum_g_ = (float)( (image_sum_g.ptr(y + h)[x + w] - image_sum_g.ptr(y)[x + w]) - (image_sum_g.ptr(y + h)[x] - image_sum_g.ptr(y)[x])); float image_sqsum_g_ = (float)( (image_sqsum_g.ptr(y + h)[x + w] - image_sqsum_g.ptr(y)[x + w]) - (image_sqsum_g.ptr(y + h)[x] - image_sqsum_g.ptr(y)[x])); float image_sum_b_ = (float)( (image_sum_b.ptr(y + h)[x + w] - image_sum_b.ptr(y)[x + w]) - (image_sum_b.ptr(y + h)[x] - image_sum_b.ptr(y)[x])); float image_sqsum_b_ = (float)( (image_sqsum_b.ptr(y + h)[x + w] - image_sqsum_b.ptr(y)[x + w]) - (image_sqsum_b.ptr(y + h)[x] - image_sqsum_b.ptr(y)[x])); float num = result.ptr(y)[x] - image_sum_r_ * templ_sum_scale_r - image_sum_g_ * templ_sum_scale_g - image_sum_b_ * templ_sum_scale_b; float denum = sqrtf(templ_sqsum_scale * (image_sqsum_r_ - weight * image_sum_r_ * image_sum_r_ + image_sqsum_g_ - weight * image_sum_g_ * image_sum_g_ + image_sqsum_b_ - weight * image_sum_b_ * image_sum_b_)); result.ptr(y)[x] = normAcc(num, denum); } } void matchTemplatePrepared_CCOFF_NORMED_8UC3( int w, int h, const PtrStepSz<int> image_sum_r, const PtrStepSz<double> image_sqsum_r, const PtrStepSz<int> image_sum_g, const PtrStepSz<double> image_sqsum_g, const PtrStepSz<int> image_sum_b, const PtrStepSz<double> image_sqsum_b, int templ_sum_r, double templ_sqsum_r, int templ_sum_g, double templ_sqsum_g, int templ_sum_b, double templ_sqsum_b, PtrStepSzf result, cudaStream_t stream) { dim3 threads(32, 8); dim3 grid(divUp(result.cols, threads.x), divUp(result.rows, threads.y)); float weight = 1.f / (w * h); float templ_sum_scale_r = templ_sum_r * weight; float templ_sum_scale_g = templ_sum_g * weight; float templ_sum_scale_b = templ_sum_b * weight; float templ_sqsum_scale = templ_sqsum_r - weight * templ_sum_r * templ_sum_r + templ_sqsum_g - weight * templ_sum_g * templ_sum_g + templ_sqsum_b - weight * templ_sum_b * templ_sum_b; matchTemplatePreparedKernel_CCOFF_NORMED_8UC3<<<grid, threads, 0, stream>>>( w, h, weight, templ_sum_scale_r, templ_sum_scale_g, templ_sum_scale_b, templ_sqsum_scale, image_sum_r, image_sqsum_r, image_sum_g, image_sqsum_g, image_sum_b, image_sqsum_b, result); cudaSafeCall( cudaGetLastError() ); if (stream == 0) cudaSafeCall( cudaDeviceSynchronize() ); } __global__ void matchTemplatePreparedKernel_CCOFF_NORMED_8UC4( int w, int h, float weight, float templ_sum_scale_r, float templ_sum_scale_g, float templ_sum_scale_b, float templ_sum_scale_a, float templ_sqsum_scale, const PtrStep<int> image_sum_r, const PtrStep<double> image_sqsum_r, const PtrStep<int> image_sum_g, const PtrStep<double> image_sqsum_g, const PtrStep<int> image_sum_b, const PtrStep<double> image_sqsum_b, const PtrStep<int> image_sum_a, const PtrStep<double> image_sqsum_a, PtrStepSzf result) { const int x = blockIdx.x * blockDim.x + threadIdx.x; const int y = blockIdx.y * blockDim.y + threadIdx.y; if (x < result.cols && y < result.rows) { float image_sum_r_ = (float)( (image_sum_r.ptr(y + h)[x + w] - image_sum_r.ptr(y)[x + w]) - (image_sum_r.ptr(y + h)[x] - image_sum_r.ptr(y)[x])); float image_sqsum_r_ = (float)( (image_sqsum_r.ptr(y + h)[x + w] - image_sqsum_r.ptr(y)[x + w]) - (image_sqsum_r.ptr(y + h)[x] - image_sqsum_r.ptr(y)[x])); float image_sum_g_ = (float)( (image_sum_g.ptr(y + h)[x + w] - image_sum_g.ptr(y)[x + w]) - (image_sum_g.ptr(y + h)[x] - image_sum_g.ptr(y)[x])); float image_sqsum_g_ = (float)( (image_sqsum_g.ptr(y + h)[x + w] - image_sqsum_g.ptr(y)[x + w]) - (image_sqsum_g.ptr(y + h)[x] - image_sqsum_g.ptr(y)[x])); float image_sum_b_ = (float)( (image_sum_b.ptr(y + h)[x + w] - image_sum_b.ptr(y)[x + w]) - (image_sum_b.ptr(y + h)[x] - image_sum_b.ptr(y)[x])); float image_sqsum_b_ = (float)( (image_sqsum_b.ptr(y + h)[x + w] - image_sqsum_b.ptr(y)[x + w]) - (image_sqsum_b.ptr(y + h)[x] - image_sqsum_b.ptr(y)[x])); float image_sum_a_ = (float)( (image_sum_a.ptr(y + h)[x + w] - image_sum_a.ptr(y)[x + w]) - (image_sum_a.ptr(y + h)[x] - image_sum_a.ptr(y)[x])); float image_sqsum_a_ = (float)( (image_sqsum_a.ptr(y + h)[x + w] - image_sqsum_a.ptr(y)[x + w]) - (image_sqsum_a.ptr(y + h)[x] - image_sqsum_a.ptr(y)[x])); float num = result.ptr(y)[x] - image_sum_r_ * templ_sum_scale_r - image_sum_g_ * templ_sum_scale_g - image_sum_b_ * templ_sum_scale_b - image_sum_a_ * templ_sum_scale_a; float denum = sqrtf(templ_sqsum_scale * (image_sqsum_r_ - weight * image_sum_r_ * image_sum_r_ + image_sqsum_g_ - weight * image_sum_g_ * image_sum_g_ + image_sqsum_b_ - weight * image_sum_b_ * image_sum_b_ + image_sqsum_a_ - weight * image_sum_a_ * image_sum_a_)); result.ptr(y)[x] = normAcc(num, denum); } } void matchTemplatePrepared_CCOFF_NORMED_8UC4( int w, int h, const PtrStepSz<int> image_sum_r, const PtrStepSz<double> image_sqsum_r, const PtrStepSz<int> image_sum_g, const PtrStepSz<double> image_sqsum_g, const PtrStepSz<int> image_sum_b, const PtrStepSz<double> image_sqsum_b, const PtrStepSz<int> image_sum_a, const PtrStepSz<double> image_sqsum_a, int templ_sum_r, double templ_sqsum_r, int templ_sum_g, double templ_sqsum_g, int templ_sum_b, double templ_sqsum_b, int templ_sum_a, double templ_sqsum_a, PtrStepSzf result, cudaStream_t stream) { dim3 threads(32, 8); dim3 grid(divUp(result.cols, threads.x), divUp(result.rows, threads.y)); float weight = 1.f / (w * h); float templ_sum_scale_r = templ_sum_r * weight; float templ_sum_scale_g = templ_sum_g * weight; float templ_sum_scale_b = templ_sum_b * weight; float templ_sum_scale_a = templ_sum_a * weight; float templ_sqsum_scale = templ_sqsum_r - weight * templ_sum_r * templ_sum_r + templ_sqsum_g - weight * templ_sum_g * templ_sum_g + templ_sqsum_b - weight * templ_sum_b * templ_sum_b + templ_sqsum_a - weight * templ_sum_a * templ_sum_a; matchTemplatePreparedKernel_CCOFF_NORMED_8UC4<<<grid, threads, 0, stream>>>( w, h, weight, templ_sum_scale_r, templ_sum_scale_g, templ_sum_scale_b, templ_sum_scale_a, templ_sqsum_scale, image_sum_r, image_sqsum_r, image_sum_g, image_sqsum_g, image_sum_b, image_sqsum_b, image_sum_a, image_sqsum_a, result); cudaSafeCall( cudaGetLastError() ); if (stream == 0) cudaSafeCall( cudaDeviceSynchronize() ); } ////////////////////////////////////////////////////////////////////// // normalize template <int cn> __global__ void normalizeKernel_8U( int w, int h, const PtrStep<double> image_sqsum, double templ_sqsum, PtrStepSzf result) { const int x = blockIdx.x * blockDim.x + threadIdx.x; const int y = blockIdx.y * blockDim.y + threadIdx.y; if (x < result.cols && y < result.rows) { float image_sqsum_ = (float)( (image_sqsum.ptr(y + h)[(x + w) * cn] - image_sqsum.ptr(y)[(x + w) * cn]) - (image_sqsum.ptr(y + h)[x * cn] - image_sqsum.ptr(y)[x * cn])); result.ptr(y)[x] = normAcc(result.ptr(y)[x], sqrtf(image_sqsum_ * templ_sqsum)); } } void normalize_8U(int w, int h, const PtrStepSz<double> image_sqsum, double templ_sqsum, PtrStepSzf result, int cn, cudaStream_t stream) { dim3 threads(32, 8); dim3 grid(divUp(result.cols, threads.x), divUp(result.rows, threads.y)); switch (cn) { case 1: normalizeKernel_8U<1><<<grid, threads, 0, stream>>>(w, h, image_sqsum, templ_sqsum, result); break; case 2: normalizeKernel_8U<2><<<grid, threads, 0, stream>>>(w, h, image_sqsum, templ_sqsum, result); break; case 3: normalizeKernel_8U<3><<<grid, threads, 0, stream>>>(w, h, image_sqsum, templ_sqsum, result); break; case 4: normalizeKernel_8U<4><<<grid, threads, 0, stream>>>(w, h, image_sqsum, templ_sqsum, result); break; } cudaSafeCall( cudaGetLastError() ); if (stream == 0) cudaSafeCall( cudaDeviceSynchronize() ); } ////////////////////////////////////////////////////////////////////// // extractFirstChannel template <int cn> __global__ void extractFirstChannel_32F(const PtrStepb image, PtrStepSzf result) { typedef typename TypeVec<float, cn>::vec_type Typef; int x = blockDim.x * blockIdx.x + threadIdx.x; int y = blockDim.y * blockIdx.y + threadIdx.y; if (x < result.cols && y < result.rows) { Typef val = ((const Typef*)image.ptr(y))[x]; result.ptr(y)[x] = first(val); } } void extractFirstChannel_32F(const PtrStepSzb image, PtrStepSzf result, int cn, cudaStream_t stream) { dim3 threads(32, 8); dim3 grid(divUp(result.cols, threads.x), divUp(result.rows, threads.y)); switch (cn) { case 1: extractFirstChannel_32F<1><<<grid, threads, 0, stream>>>(image, result); break; case 2: extractFirstChannel_32F<2><<<grid, threads, 0, stream>>>(image, result); break; case 3: extractFirstChannel_32F<3><<<grid, threads, 0, stream>>>(image, result); break; case 4: extractFirstChannel_32F<4><<<grid, threads, 0, stream>>>(image, result); break; } cudaSafeCall( cudaGetLastError() ); if (stream == 0) cudaSafeCall( cudaDeviceSynchronize() ); } } //namespace match_template }}} // namespace cv { namespace cuda { namespace cudev #endif /* CUDA_DISABLER */
the_stack
#pragma once #include <chrono> #include <thread> #include <gunrock/app/enactor_kernel.cuh> #include <gunrock/app/enactor_helper.cuh> #include <gunrock/util/latency_utils.cuh> /* this is the "stringize macro macro" hack */ #define STR(x) #x #define XSTR(x) STR(x) namespace gunrock { namespace app { /* * @brief Iteration loop. * * @tparam EnactorT * @tparam IterationT * @tparam NUM_VERTEX_ASSOCIATES * @tparam NUM_VALUE__ASSOCIATES * * @param[in] thread_data */ template <int NUM_VERTEX_ASSOCIATES, int NUM_VALUE__ASSOCIATES, typename IterationT> void Iteration_Loop(ThreadSlice &thread_data, IterationT &iteration) { typedef typename IterationT::Enactor Enactor; typedef typename Enactor::Problem Problem; typedef typename Enactor::SizeT SizeT; typedef typename Enactor::VertexT VertexT; typedef typename Enactor::ValueT ValueT; typedef typename Problem::DataSlice DataSlice; typedef typename Problem::GraphT GraphT; Enactor &enactor = ((Enactor *)thread_data.enactor)[0]; auto &problem = enactor.problem[0]; int num_gpus = enactor.num_gpus; int gpu_num = thread_data.thread_num; auto &data_slice = problem.data_slices[gpu_num][0]; auto &mgpu_slice = enactor.mgpu_slices[gpu_num]; // auto &data_slices = problem.data_slices; auto &mgpu_slices = enactor.mgpu_slices; auto &graph = problem.sub_graphs[gpu_num]; auto enactor_slices = enactor.enactor_slices + gpu_num * num_gpus; // auto &streams = data_slice.streams; auto &stages = mgpu_slice.stages; auto &to_shows = mgpu_slice.to_show; std::string mssg = ""; SizeT total_length = 0; SizeT received_length = 0; SizeT communicate_latency = enactor.communicate_latency; // float communicate_multipy = enactor.communicate_multipy; SizeT expand_latency = enactor.expand_latency; SizeT subqueue_latency = enactor.subqueue_latency; SizeT fullqueue_latency = enactor.fullqueue_latency; SizeT makeout_latency = enactor.makeout_latency; auto &frontier0 = enactor_slices[0].frontier; auto &enactor_stats0 = enactor_slices[0].enactor_stats; auto &stream0 = enactor_slices[0].stream; #ifdef ENABLE_PERFORMANCE_PROFILING util::CpuTimer cpu_timer; std::vector<double> &iter_full_queue_time = enactor.iter_full_queue_time[gpu_num].back(); std::vector<double> &iter_sub_queue_time = enactor.iter_sub_queue_time[gpu_num].back(); std::vector<double> &iter_total_time = enactor.iter_total_time[gpu_num].back(); std::vector<SizeT> &iter_full_queue_nodes_queued = enactor.iter_full_queue_nodes_queued[gpu_num].back(); std::vector<SizeT> &iter_full_queue_edges_queued = enactor.iter_full_queue_edges_queued[gpu_num].back(); cpu_timer.Start(); double iter_start_time = cpu_timer.MillisSinceStart(); double iter_stop_time = 0; double subqueue_finish_time = 0; SizeT h_edges_queued[16]; SizeT h_nodes_queued[16]; SizeT previous_edges_queued[16]; SizeT previous_nodes_queued[16]; SizeT h_full_queue_edges_queued = 0; SizeT h_full_queue_nodes_queued = 0; SizeT previous_full_queue_edges_queued = 0; SizeT previous_full_queue_nodes_queued = 0; for (int peer_ = 0; peer_ < num_gpus; peer_++) { h_edges_queued[peer_] = 0; h_nodes_queued[peer_] = 0; previous_nodes_queued[peer_] = 0; previous_edges_queued[peer_] = 0; } #endif util::PrintMsg("Iteration entered", enactor.flag & Debug); while (!iteration.Stop_Condition(gpu_num)) { total_length = 0; received_length = frontier0.queue_length; mgpu_slice.wait_counter = 0; // tretval = cudaSuccess; // frontier0.queue_offset = 0; frontier0.queue_reset = true; if (num_gpus > 1) { if (enactor_stats0.iteration != 0) for (int i = 1; i < num_gpus; i++) { auto &frontier = enactor_slices[i].frontier; // frontier.selector = frontier0.selector; // frontier.advance_type = frontier0.advance_type; // frontier.queue_offset = 0; frontier.queue_reset = true; frontier.queue_index = frontier0.queue_index; // frontier.current_label= frontier0.current_label; enactor_slices[i].enactor_stats.iteration = enactor_slices[0].enactor_stats.iteration; } if (IterationT::FLAG & Unified_Receive) { // printf("%d, %d : start_received_length = %d\n", // gpu_num, enactor_stats0.iteration, received_length); mgpu_slice.in_length_out[0] = received_length; mgpu_slice.in_length_out.Move(util::HOST, util::DEVICE, 1, 0, stream0); if (enactor_stats0.retval = util::GRError( cudaStreamSynchronize(stream0), "cudaStreamSynchronize failed", __FILE__, __LINE__)) break; } } else { // auto &frontier = enactor_slices[0].frontier; // frontier.queue_reset = true; // frontier.queue_offset = 0; mgpu_slice.in_length_out[0] = received_length; } for (int peer = 0; peer < num_gpus; peer++) { stages[peer] = 0; stages[peer + num_gpus] = 0; to_shows[peer] = true; to_shows[peer + num_gpus] = true; for (int i = 0; i < mgpu_slice.num_stages; i++) mgpu_slice.events_set[enactor_stats0.iteration % 4][peer][i] = false; } // util::cpu_mt::PrintGPUArray<SizeT, VertexId>( // "labels", data_slice.labels.GetPointer(util::DEVICE), graph.nodes, // gpu_num, iteration, -1, streams[0]); while (mgpu_slice.wait_counter < num_gpus * 2 && (!iteration.Stop_Condition(gpu_num))) { // util::cpu_mt::PrintCPUArray<int, int>("stages", stages, num_gpus * 2, // thread_num, iteration); for (int peer__ = 0; peer__ < num_gpus * 2; peer__++) { auto peer_ = (peer__ % num_gpus); auto peer = peer_ <= gpu_num ? peer_ - 1 : peer_; auto gpu_ = peer < gpu_num ? gpu_num : gpu_num + 1; auto &enactor_slice = enactor_slices[peer_]; auto &enactor_stats = enactor_slice.enactor_stats; auto &frontier = enactor_slice.frontier; auto iteration_num = enactor_stats.iteration; auto iteration_num_ = iteration_num % 4; auto pre_stage = stages[peer__]; auto &stage = stages[peer__]; auto &stream = (peer__ <= num_gpus) ? enactor_slice.stream : enactor_slice.stream2; auto &to_show = to_shows[peer__]; auto &retval = enactor_stats.retval; // selector = frontier_attribute[peer_].selector; // scanned_edges_ = &(data_slice->scanned_edges [peer_]); // frontier_attribute_ = &(frontier_attribute [peer_]); // work_progress_ = &(work_progress [peer_]); if ((enactor.flag & Debug) != 0 && to_show) { // mssg=" ";mssg[0]='0'+data_slice->wait_counter; mssg = std::to_string(mgpu_slice.wait_counter); ShowDebugInfo(enactor, gpu_num, peer__, mssg, stream); } to_show = true; switch (stage) { case 0: // Assign marker & Scan if (peer__ == 0) { if (frontier.queue_length == 0) { // empty local queue // SetRecord(mgpu_slice, iteration_num, peer__, 3, stream); stage = 4; break; } else { if (IterationT::FLAG & Use_SubQ) { stage = 1; break; } else { SetRecord(mgpu_slice, iteration_num, peer__, 2, stream); stage = 3; break; } } } if (peer__ < num_gpus) { // wait and expand incoming if (!(mgpu_slices[peer] .events_set[iteration_num_][gpu_ + num_gpus][0])) { to_show = false; break; } frontier.queue_length = mgpu_slice.in_length[iteration_num % 2][peer_]; #ifdef ENABLE_PERFORMANCE_PROFILING enactor_stats.iter_in_length.back().push_back( mgpu_slice.in_length[iteration_num % 2][peer_]); #endif if (frontier.queue_length != 0) { if (retval = util::GRError( cudaStreamWaitEvent( stream, mgpu_slices[peer] .events[iteration_num_][gpu_ + num_gpus][0], 0), "cudaStreamWaitEvent failed", __FILE__, __LINE__)) break; } mgpu_slice.in_length[iteration_num % 2][peer_] = 0; mgpu_slices[peer].events_set[iteration_num_][gpu_ + num_gpus][0] = false; if (frontier.queue_length == 0) { // SetRecord(mgpu_slice, iteration_num, peer__, 3, stream); // printf(" %d\t %d\t %d\t Expand and subQ skipped\n", // gpu_num, iteration_num, peer__); stage = 4; break; } if (expand_latency != 0) util::latency::Insert_Latency( expand_latency, frontier.queue_length, stream, mgpu_slice.latency_data.GetPointer(util::DEVICE)); iteration.template ExpandIncoming<NUM_VERTEX_ASSOCIATES, NUM_VALUE__ASSOCIATES>( received_length, peer_); // printf("%d, Expand, selector = %d, keys = %p\n", // thread_num, selector^1, // frontier.keys[selector^1] .GetPointer(util::DEVICE)); // frontier.selector ^= 1; // frontier.queue_index++; if ((IterationT::FLAG & Use_SubQ) == 0) { if (IterationT::FLAG & Unified_Receive) { // SetRecord(mgpu_slice, iteration_num, peer__, 3, stream); stage = 4; } else { SetRecord(mgpu_slice, iteration_num, peer__, 2, stream); stage = 3; } } else { SetRecord(mgpu_slice, iteration_num, peer__, stage, stream); stage = 1; } break; } if (peer__ == num_gpus) { // out-going to local, not in use stage = 4; break; } if (peer__ > num_gpus) { if (iteration_num == 0) { // first iteration, nothing to send SetRecord(mgpu_slice, iteration_num, peer__, 0, stream); stage = 4; break; } // Push Neighbor if (communicate_latency != 0) util::latency::Insert_Latency( communicate_latency, mgpu_slice.out_length[peer_], stream, mgpu_slice.latency_data.GetPointer(util::DEVICE)); PushNeighbor<Enactor, NUM_VERTEX_ASSOCIATES, NUM_VALUE__ASSOCIATES>(enactor, gpu_num, peer); SetRecord(mgpu_slice, iteration_num, peer__, stage, stream); stage = 4; break; } break; case 1: // Comp Length if (peer_ != 0) { if (retval = CheckRecord(mgpu_slice, iteration_num, peer_, stage - 1, stage, to_show)) break; if (!to_show) break; frontier.queue_length = mgpu_slice.in_length_out[peer_]; } if (retval = iteration.Compute_OutputLength(peer_)) break; // TODO: Verify this // if (enactor -> size_check || // (Iteration::AdvanceKernelPolicy::ADVANCE_MODE // != oprtr::advance::TWC_FORWARD && // Iteration::AdvanceKernelPolicy::ADVANCE_MODE /// != oprtr::advance::TWC_BACKWARD)) // if (Enactor::FLAG & Size_Check) { SetRecord(mgpu_slice, iteration_num, peer_, stage, stream); } stage = 2; break; case 2: // SubQueue Core // TODO: Verify this // if (enactor -> size_check || // (Iteration::AdvanceKernelPolicy::ADVANCE_MODE // != oprtr::advance::TWC_FORWARD && // Iteration::AdvanceKernelPolicy::ADVANCE_MODE // != oprtr::advance::TWC_BACKWARD)) // if (Enactor::FLAG & Size_Check) { if (retval = CheckRecord(mgpu_slice, iteration_num, peer_, stage - 1, stage, to_show)) break; if (!to_show) break; /*if (Iteration::AdvanceKernelPolicy::ADVANCE_MODE == oprtr::advance::TWC_FORWARD || Iteration::AdvanceKernelPolicy::ADVANCE_MODE == oprtr::advance::TWC_BACKWARD) { frontier_attribute_->output_length[0] *= 1.1; }*/ if (enactor.flag & Size_Check) iteration.Check_Queue_Size(peer_); if (retval) break; } if (subqueue_latency != 0) util::latency::Insert_Latency( subqueue_latency, frontier.queue_length, stream, mgpu_slice.latency_data.GetPointer(util::DEVICE)); iteration.Core(peer_); if (retval) break; #ifdef ENABLE_PERFORMANCE_PROFILING h_nodes_queued[peer_] = enactor_stats.nodes_queued[0]; h_edges_queued[peer_] = enactor_stats.edges_queued[0]; enactor_stats.nodes_queued.Move(util::DEVICE, util::HOST, 1, 0, stream); enactor_stats.edges_queued.Move(util::DEVICE, util::HOST, 1, 0, stream); #endif if (num_gpus > 1) { SetRecord(mgpu_slice, iteration_num, peer__, stage, stream); stage = 3; } else { // SetRecord(mgpu_slice, iteration_num, peer__, 3, // streams[peer__]); stage = 4; } break; case 3: // Copy // if (Iteration::HAS_SUBQ || peer_ != 0) { if (retval = CheckRecord(mgpu_slice, iteration_num, peer_, stage - 1, stage, to_show)) break; if (!to_show) break; } // printf("size_check = %s\n", enactor -> size_check ? "true" : // "false");fflush(stdout); if ((IterationT::FLAG & Use_SubQ) == 0 && peer_ > 0) { frontier.queue_length = mgpu_slice.in_length_out[peer_]; } if ((enactor.flag & Size_Check) == 0 && (/*(enactor.flag & Debug) !=0 ||*/ num_gpus > 1)) { bool over_sized = false; if (IterationT::FLAG & Use_SubQ) { if (retval = CheckSize<SizeT, VertexT>( false, "queue3", frontier.output_length[0] + 2, frontier.Next_V_Q(), over_sized, gpu_num, iteration_num, peer_, false)) break; } if (frontier.queue_length == 0) break; if (retval = CheckSize<SizeT, VertexT>( false, "total_queue", total_length + frontier.queue_length, enactor_slices[num_gpus].frontier.V_Q(), over_sized, gpu_num, iteration_num, peer_, false)) break; // util::MemsetCopyVectorKernel<<<256, 256, 0, stream>>>( // enactor_slices[num_gpus].frontier.keys[0] // .GetPointer(util::DEVICE) + total_length, // frontier.keys[selector].GetPointer(util::DEVICE), // frontier.queue_length); if (retval = frontier.V_Q()->ForAll( enactor_slices[num_gpus].frontier.V_Q()[0], [total_length] __host__ __device__( VertexT * key0, VertexT * key1, const SizeT &pos) { key1[pos + total_length] = key0[pos]; }, frontier.queue_length, util::LOCATION_DEFAULT, stream)) break; // if (problem -> use_double_buffer) if (IterationT::FLAG & Use_Double_Buffer) { // util::MemsetCopyVectorKernel<<<256,256,0,streams[peer_]>>>( // data_slice->frontier_queues[num_gpus].values[0] // .GetPointer(util::DEVICE) + Total_Length, // frontier_queue_->values[selector].GetPointer(util::DEVICE), // frontier.queue_length); // TODO: Use other ways to do this // if (retval = frontier.values[selector].ForAll( // enactor_slices[num_gpus].frontier.values[0], // [total_length]__host__ __device__ // (ValueT* val0, ValueT *val1, const SizeT &pos) // { // val1[pos + total_length] = val0[pos]; // }, frontier.queue_length, // util::LOCATION_DEFAULT, stream)) // break; } } total_length += frontier.queue_length; // SetRecord(mgpu_slice, iteration_num, peer__, 3, streams[peer__]); stage = 4; break; case 4: // End mgpu_slice.wait_counter++; to_show = false; stage = 5; break; default: // stage--; to_show = false; } if ((enactor.flag & Debug) && !(retval)) { mssg = "stage 0 @ gpu 0, peer_ 0 failed"; mssg[6] = char(pre_stage + '0'); mssg[14] = char(gpu_num + '0'); mssg[23] = char(peer__ + '0'); retval = util::GRError(mssg, __FILE__, __LINE__); if (retval) break; } // stages[peer__]++; if (retval) break; } } if (!iteration.Stop_Condition(gpu_num)) { for (int peer_ = 0; peer_ < num_gpus; peer_++) mgpu_slice.wait_marker[peer_] = 0; int wait_count = 0; while (wait_count < num_gpus && !iteration.Stop_Condition(gpu_num)) { for (int peer_ = 0; peer_ < num_gpus; peer_++) { if (peer_ == num_gpus || mgpu_slice.wait_marker[peer_] != 0) continue; cudaError_t tretval = cudaStreamQuery(enactor_slices[peer_].stream); if (tretval == cudaSuccess) { mgpu_slice.wait_marker[peer_] = 1; wait_count++; continue; } else if (tretval != cudaErrorNotReady) { enactor_slices[peer_ % num_gpus].enactor_stats.retval = tretval; break; } } } if (IterationT::FLAG & Unified_Receive) { total_length = mgpu_slice.in_length_out[0]; } else if (num_gpus == 1) total_length = frontier0.queue_length; #ifdef ENABLE_PERFORMANCE_PROFILING subqueue_finish_time = cpu_timer.MillisSinceStart(); iter_sub_queue_time.push_back(subqueue_finish_time - iter_start_time); if (IterationT::FLAG & Use_SubQ) for (int peer_ = 0; peer_ < num_gpus; peer_++) { auto &enactor_stats = enactor_slices[peer_].enactor_stats; enactor_stats.iter_nodes_queued.back().push_back( h_nodes_queued[peer_] + enactor_stats.nodes_queued[0] - previous_nodes_queued[peer_]); previous_nodes_queued[peer_] = h_nodes_queued[peer_] + enactor_stats.nodes_queued[0]; enactor_stats.nodes_queued[0] = h_nodes_queued[peer_]; enactor_stats.iter_edges_queued.back().push_back( h_edges_queued[peer_] + enactor_stats.edges_queued[0] - previous_edges_queued[peer_]); previous_edges_queued[peer_] = h_edges_queued[peer_] + enactor_stats.edges_queued[0]; enactor_stats.edges_queued[0] = h_edges_queued[peer_]; } #endif if (enactor.flag & Debug) { util::PrintMsg(std::to_string(gpu_num) + "\t " + std::to_string(enactor_stats0.iteration) + "\t \t Subqueue finished. Total_Length= " + std::to_string(total_length)); } // grid_size = Total_Length/256+1; // if (grid_size > 512) grid_size = 512; if ((enactor.flag & Size_Check) && (IterationT::FLAG & Unified_Receive) == 0) { bool over_sized = false; if (enactor_stats0.retval = CheckSize<SizeT, VertexT>( true, "total_queue", total_length, frontier0.V_Q(), over_sized, gpu_num, enactor_stats0.iteration, num_gpus, true)) break; // if (problem -> use_double_buffer) // if (enactor_stats[0].retval = // CheckSize</*true,*/ SizeT, Value> ( // true, "total_queue", Total_Length, // &data_slice->frontier_queues[0].values[frontier_attribute[0].selector], // over_sized, thread_num, enactor_stats0.iteration, // num_gpus, true)) // break; SizeT offset = frontier0.queue_length; for (int peer_ = 1; peer_ < num_gpus; peer_++) if (enactor_slices[peer_].frontier.queue_length != 0) { auto &frontier = enactor_slices[peer_].frontier; // util::MemsetCopyVectorKernel<<<256,256, 0, streams[0]>>>( // data_slice->frontier_queues[0 ] // .keys[frontier_attribute[0 ].selector] // .GetPointer(util::DEVICE) + offset, // data_slice->frontier_queues[peer_] // .keys[frontier_attribute[peer_].selector] // .GetPointer(util::DEVICE), // frontier_attribute[peer_].queue_length); frontier0.V_Q()->ForAll( frontier.V_Q()[0], [offset] __host__ __device__(VertexT * key0, VertexT * key1, const SizeT &pos) { key0[pos + offset] = key1[pos]; }, frontier.queue_length, util::LOCATION_DEFAULT, stream0); // TODO // if (problem -> use_double_buffer) // util::MemsetCopyVectorKernel<<<256,256,0,streams[0]>>>( // data_slice->frontier_queues[0 ] // .values[frontier_attribute[0 ].selector] // .GetPointer(util::DEVICE) + offset, // data_slice->frontier_queues[peer_] // .values[frontier_attribute[peer_].selector] // .GetPointer(util::DEVICE), // frontier_attribute[peer_].queue_length); offset += frontier.queue_length; } } frontier0.queue_length = total_length; // TODO: match here // if ((enactor.flag & Size_Check) == 0) // frontier0.selector = 0; if (IterationT::FLAG & Use_FullQ) { int peer_ = ((enactor.flag & Size_Check) != 0 || num_gpus == 1) ? 0 : num_gpus; auto &enactor_slice = enactor_slices[peer_]; auto &frontier = enactor_slice.frontier; auto &enactor_stats = enactor_slice.enactor_stats; // frontier_queue_ = &(data_slice->frontier_queues // [(enactor -> size_check || num_gpus==1) ? 0 : num_gpus]); // scanned_edges_ = &(data_slice->scanned_edges // [(enactor -> size_check || num_gpus==1) ? 0 : num_gpus]); // frontier_attribute_ = &(frontier_attribute[peer_]); // enactor_stats_ = &(enactor_stats[peer_]); // work_progress_ = &(work_progress[peer_]); auto &iteration_num = enactor_stats.iteration; auto stream = enactor_slice.stream; auto &retval = enactor_stats.retval; // frontier.queue_offset = 0; frontier.queue_reset = true; // TODO: match here // if ((enactor.flag & Size_Check) == 0) // frontier.selector = 0; iteration.Gather(peer_); // selector = frontier.selector; if (retval) break; if (frontier.queue_length != 0) { if (enactor.flag & Debug) { mssg = ""; ShowDebugInfo(enactor, gpu_num, peer_, mssg, stream); } retval = iteration.Compute_OutputLength(peer_); if (retval) break; // frontier_attribute_->output_length.Move( // util::DEVICE, util::HOST, 1, 0, streams[peer_]); if (enactor.flag & Size_Check) { cudaError_t tretval = cudaStreamSynchronize(stream); if (tretval != cudaSuccess) { retval = tretval; break; } iteration.Check_Queue_Size(peer_); if (retval) break; } if (fullqueue_latency != 0) util::latency::Insert_Latency( fullqueue_latency, frontier.queue_length, stream, mgpu_slice.latency_data.GetPointer(util::DEVICE)); iteration.Core(peer_); if (retval) break; #ifdef ENABLE_PERFORMANCE_PROFILING h_full_queue_nodes_queued = enactor_stats.nodes_queued[0]; h_full_queue_edges_queued = enactor_stats.edges_queued[0]; enactor_stats.edges_queued.Move(util::DEVICE, util::HOST, 1, 0, stream); enactor_stats.nodes_queued.Move(util::DEVICE, util::HOST, 1, 0, stream); #endif if (retval = util::GRError(cudaStreamSynchronize(stream), "FullQueue_Core failed.", __FILE__, __LINE__)) break; // cudaError_t tretval = cudaErrorNotReady; // while (tretval == cudaErrorNotReady) //{ // tretval = cudaStreamQuery(stream); // if (tretval == cudaErrorNotReady) // { // //sleep(0); // std::this_thread::sleep_for(std::chrono::microseconds(0)); // } //} // if (retval = util::GRError(tretval, // "FullQueue_Core failed.", __FILE__, __LINE__)) // break; #ifdef ENABLE_PERFORMANCE_PROFILING iter_full_queue_nodes_queued.push_back( h_full_queue_nodes_queued + enactor_stats.nodes_queued[0] - previous_full_queue_nodes_queued); previous_full_queue_nodes_queued = h_full_queue_nodes_queued + enactor_stats.nodes_queued[0]; enactor_stats.nodes_queued[0] = h_full_queue_nodes_queued; iter_full_queue_edges_queued.push_back( h_full_queue_edges_queued + enactor_stats.edges_queued[0] - previous_full_queue_edges_queued); previous_full_queue_edges_queued = h_full_queue_edges_queued + enactor_stats.edges_queued[0]; enactor_stats.edges_queued[0] = h_full_queue_edges_queued; #endif if ((enactor.flag & Size_Check) == 0) { bool over_sized = false; if (retval = CheckSize<SizeT, VertexT>( false, "queue3", frontier.output_length[0] + 2, frontier.Next_V_Q(), over_sized, gpu_num, iteration_num, peer_, false)) break; } // selector = frontier_attribute[peer_].selector; total_length = frontier.queue_length; } else { total_length = 0; for (int peer__ = 0; peer__ < num_gpus; peer__++) mgpu_slice.out_length[peer__] = 0; #ifdef ENABLE_PERFORMANCE_PROFILING iter_full_queue_nodes_queued.push_back(0); iter_full_queue_edges_queued.push_back(0); #endif } #ifdef ENABLE_PERFORMANCE_PROFILING iter_full_queue_time.push_back(cpu_timer.MillisSinceStart() - subqueue_finish_time); #endif if (enactor.flag & Debug) { util::PrintMsg(std::to_string(gpu_num) + "\t " + std::to_string(enactor_stats0.iteration) + "\t \t Fullqueue finished. Total_Length= " + std::to_string(total_length)); } // frontier_queue_ = &(data_slice->frontier_queues[enactor -> // size_check?0:num_gpus]); if (num_gpus == 1) mgpu_slice.out_length[0] = total_length; } if (num_gpus > 1) { for (int peer_ = num_gpus + 1; peer_ < num_gpus * 2; peer_++) mgpu_slice.wait_marker[peer_] = 0; int wait_count = 0; while (wait_count < num_gpus - 1 && !iteration.Stop_Condition(gpu_num)) { for (int peer_ = num_gpus + 1; peer_ < num_gpus * 2; peer_++) { if (peer_ == num_gpus || mgpu_slice.wait_marker[peer_] != 0) continue; cudaError_t tretval = cudaStreamQuery(enactor_slices[peer_ - num_gpus].stream2); if (tretval == cudaSuccess) { mgpu_slice.wait_marker[peer_] = 1; wait_count++; continue; } else if (tretval != cudaErrorNotReady) { enactor_slices[peer_ % num_gpus].enactor_stats.retval = tretval; break; } } } iteration.UpdatePreds(total_length); if (makeout_latency != 0) util::latency::Insert_Latency( makeout_latency, total_length, stream0, mgpu_slice.latency_data.GetPointer(util::DEVICE)); iteration .template MakeOutput<NUM_VERTEX_ASSOCIATES, NUM_VALUE__ASSOCIATES>( total_length); } else { mgpu_slice.out_length[0] = total_length; } for (int peer_ = 0; peer_ < num_gpus; peer_++) { enactor_slices[peer_].frontier.queue_length = mgpu_slice.out_length[peer_]; #ifdef ENABLE_PERFORMANCE_PROFILING // if (peer_ == 0) enactor_slices[peer_].enactor_stats.iter_out_length.back().push_back( mgpu_slice.out_length[peer_]); #endif } } #ifdef ENABLE_PERFORMANCE_PROFILING iter_stop_time = cpu_timer.MillisSinceStart(); iter_total_time.push_back(iter_stop_time - iter_start_time); iter_start_time = iter_stop_time; #endif iteration.Change(); } } /** * @brief Thread controls. * @tparam Enactor Enactor type we process on. * @param[in] thread_data_ Thread data. */ template <typename Enactor> static CUT_THREADPROC GunrockThread(void *thread_data_) { // typedef typename Enactor::Problem Problem ; // typedef typename Enactor::SizeT SizeT ; // typedef typename Enactor::VertexT VertexT ; // typedef typename Enactor::ValueT ValueT ; // typedef typename Problem::GraphT GraphT ; // typedef typename GraphT ::CsrT CsrT ; // typedef typename GraphT ::GpT GpT ; ThreadSlice &thread_data = ((ThreadSlice *)thread_data_)[0]; // Problem *problem = (Problem*) thread_data -> problem; Enactor &enactor = ((Enactor *)thread_data.enactor)[0]; // int num_gpus = problem -> num_gpus; int thread_num = thread_data.thread_num; int gpu_idx = enactor.gpu_idx[thread_num]; auto &thread_status = thread_data.status; auto &retval = enactor.enactor_slices[thread_num * enactor.num_gpus] .enactor_stats.retval; if (retval = util::SetDevice(gpu_idx)) { thread_status = ThreadSlice::Status::Ended; CUT_THREADEND; } // util::PrintMsg("Thread entered."); thread_status = ThreadSlice::Status::Idle; while (thread_status != ThreadSlice::Status::ToKill) { while (thread_status == ThreadSlice::Status::Wait || thread_status == ThreadSlice::Status::Idle) { // sleep(0); std::this_thread::sleep_for(std::chrono::microseconds(0)); // std::this_thread::yield(); } if (thread_status == ThreadSlice::Status::ToKill) break; // util::PrintMsg("Run started"); enactor.Run(thread_data); thread_status = ThreadSlice::Status::Idle; } thread_status = ThreadSlice::Status::Ended; CUT_THREADEND; } } // namespace app } // namespace gunrock // Leave this at the end of the file // Local Variables: // mode:c++ // c-file-style: "NVIDIA" // End:
the_stack
#include "badslam/cuda_depth_processing.cuh" #include <cub/cub.cuh> #include <libvis/cuda/cuda_auto_tuner.h> #include <math_constants.h> #include "badslam/cuda_util.cuh" #include "badslam/cuda_matrix.cuh" #include "badslam/kernels.cuh" #include "badslam/util.cuh" namespace vis { __global__ void BilateralFilteringAndDepthCutoffCUDAKernel( float denom_xy, float denom_value, int radius, int radius_squared, u16 max_depth, float raw_to_float_depth, CUDABuffer_<u16> input_depth, CUDABuffer_<u16> output_depth) { unsigned int x = blockIdx.x * blockDim.x + threadIdx.x; unsigned int y = blockIdx.y * blockDim.y + threadIdx.y; if (x < output_depth.width() && y < output_depth.height()) { // Depth cutoff. const u16 center_value = input_depth(y, x); if (center_value == 0 || center_value > max_depth) { output_depth(y, x) = kUnknownDepth; return; } const float inv_center_value = 1.0f / (raw_to_float_depth * center_value); // Bilateral filtering. float sum = 0; float weight = 0; const int min_y = max(static_cast<int>(0), static_cast<int>(y - radius)); const int max_y = min(static_cast<int>(output_depth.height() - 1), static_cast<int>(y + radius)); for (int sample_y = min_y; sample_y <= max_y; ++ sample_y) { const int dy = sample_y - y; const int min_x = max(static_cast<int>(0), static_cast<int>(x - radius)); const int max_x = min(static_cast<int>(output_depth.width() - 1), static_cast<int>(x + radius)); for (int sample_x = min_x; sample_x <= max_x; ++ sample_x) { const int dx = sample_x - x; const int grid_distance_squared = dx * dx + dy * dy; if (grid_distance_squared > radius_squared) { continue; } const u16 sample = input_depth(sample_y, sample_x); if (sample == 0) { continue; } const float inv_sample = 1.0f / (raw_to_float_depth * sample); float value_distance_squared = inv_center_value - inv_sample; value_distance_squared *= value_distance_squared; float w = exp(-grid_distance_squared / denom_xy + -value_distance_squared / denom_value); sum += w * inv_sample; weight += w; } } output_depth(y, x) = (weight == 0) ? kUnknownDepth : (1.0f / (raw_to_float_depth * sum / weight)); } } void BilateralFilteringAndDepthCutoffCUDA( cudaStream_t stream, float sigma_xy, float sigma_value, float radius_factor, u16 max_depth, float raw_to_float_depth, const CUDABuffer_<u16>& input_depth, CUDABuffer_<u16>* output_depth) { CUDA_CHECK(); int radius = radius_factor * sigma_xy + 0.5f; CUDA_AUTO_TUNE_2D( BilateralFilteringAndDepthCutoffCUDAKernel, 32, 32, output_depth->width(), output_depth->height(), 0, stream, /* kernel parameters */ 2.0f * sigma_xy * sigma_xy, 2.0f * sigma_value * sigma_value, radius, radius * radius, max_depth, raw_to_float_depth, input_depth, *output_depth); CUDA_CHECK(); } // ----------------------------------------------------------------------------- __global__ void ComputeNormalsCUDAKernel( PixelCenterUnprojector unprojector, DepthParameters depth_params, CUDABuffer_<u16> in_depth, CUDABuffer_<u16> out_depth, CUDABuffer_<u16> out_normals) { unsigned int x = blockIdx.x * blockDim.x + threadIdx.x; unsigned int y = blockIdx.y * blockDim.y + threadIdx.y; if (x < in_depth.width() && y < in_depth.height()) { constexpr int kBorder = 1; if (x < kBorder || y < kBorder || x >= in_depth.width() - kBorder || y >= in_depth.height() - kBorder) { out_depth(y, x) = kUnknownDepth; out_normals(y, x) = ImageSpaceNormalToU16(0, 0); return; } u16 center_raw_depth = in_depth(y, x); if (center_raw_depth & kInvalidDepthBit) { out_depth(y, x) = kUnknownDepth; out_normals(y, x) = ImageSpaceNormalToU16(0, 0); return; } u16 right_raw_depth = in_depth(y, x + 1); u16 left_raw_depth = in_depth(y, x - 1); u16 bottom_raw_depth = in_depth(y + 1, x); u16 top_raw_depth = in_depth(y - 1, x); if (right_raw_depth & kInvalidDepthBit || left_raw_depth & kInvalidDepthBit || bottom_raw_depth & kInvalidDepthBit || top_raw_depth & kInvalidDepthBit) { // TODO: Still use this pixel by only using the valid neighbors (if there are enough). // Should test the effect on accuracy though since pixels at borders might be likely to be uncertain! out_depth(y, x) = kUnknownDepth; out_normals(y, x) = ImageSpaceNormalToU16(0, 0); return; } float center_depth = RawToCalibratedDepth( depth_params.a, depth_params.cfactor_buffer(y / depth_params.sparse_surfel_cell_size, x / depth_params.sparse_surfel_cell_size), depth_params.raw_to_float_depth, center_raw_depth); float left_depth = RawToCalibratedDepth( depth_params.a, depth_params.cfactor_buffer(y / depth_params.sparse_surfel_cell_size, (x - 1) / depth_params.sparse_surfel_cell_size), depth_params.raw_to_float_depth, left_raw_depth); float top_depth = RawToCalibratedDepth( depth_params.a, depth_params.cfactor_buffer((y - 1) / depth_params.sparse_surfel_cell_size, x / depth_params.sparse_surfel_cell_size), depth_params.raw_to_float_depth, top_raw_depth); float right_depth = RawToCalibratedDepth( depth_params.a, depth_params.cfactor_buffer(y / depth_params.sparse_surfel_cell_size, (x + 1) / depth_params.sparse_surfel_cell_size), depth_params.raw_to_float_depth, right_raw_depth); float bottom_depth = RawToCalibratedDepth( depth_params.a, depth_params.cfactor_buffer((y + 1) / depth_params.sparse_surfel_cell_size, x / depth_params.sparse_surfel_cell_size), depth_params.raw_to_float_depth, bottom_raw_depth); float3 left_point = unprojector.UnprojectPoint(x - 1, y, left_depth); float3 top_point = unprojector.UnprojectPoint(x, y - 1, top_depth); float3 right_point = unprojector.UnprojectPoint(x + 1, y, right_depth); float3 bottom_point = unprojector.UnprojectPoint(x, y + 1, bottom_depth); float3 center_point = unprojector.UnprojectPoint(x, y, center_depth); constexpr float kRatioThreshold = 2.f; constexpr float kRatioThresholdSquared = kRatioThreshold * kRatioThreshold; float left_dist_squared = SquaredLength(left_point - center_point); float right_dist_squared = SquaredLength(right_point - center_point); float left_right_ratio = left_dist_squared / right_dist_squared; float3 left_to_right; if (left_right_ratio < kRatioThresholdSquared && left_right_ratio > 1.f / kRatioThresholdSquared) { left_to_right = right_point - left_point; } else if (left_dist_squared < right_dist_squared) { left_to_right = center_point - left_point; } else { // left_dist_squared >= right_dist_squared left_to_right = right_point - center_point; } float bottom_dist_squared = SquaredLength(bottom_point - center_point); float top_dist_squared = SquaredLength(top_point - center_point); float bottom_top_ratio = bottom_dist_squared / top_dist_squared; float3 bottom_to_top; if (bottom_top_ratio < kRatioThresholdSquared && bottom_top_ratio > 1.f / kRatioThresholdSquared) { bottom_to_top = top_point - bottom_point; } else if (bottom_dist_squared < top_dist_squared) { bottom_to_top = center_point - bottom_point; } else { // bottom_dist_squared >= top_dist_squared bottom_to_top = top_point - center_point; } float3 normal; CrossProduct(left_to_right, bottom_to_top, &normal); float length = Norm(normal); if (!(length > 1e-6f)) { normal = make_float3(0, 0, -1); // avoid NaNs } else { // This accounts for negative fy in ICL-NUIM data. Though such weird // things should best be avoided in dataset creation ... float inv_length = ((unprojector.fy_inv < 0) ? -1.0f : 1.0f) / length; normal.x *= inv_length; normal.y *= inv_length; // normal.z *= inv_length; // not used later, thus not assigned. } out_normals(y, x) = ImageSpaceNormalToU16(normal.x, normal.y); out_depth(y, x) = in_depth(y, x); } } void ComputeNormalsCUDA( cudaStream_t stream, const PixelCenterUnprojector& unprojector, const DepthParameters& depth_params, const CUDABuffer_<u16>& input_depth, CUDABuffer_<u16>* output_depth, CUDABuffer_<u16>* normals_buffer) { CUDA_CHECK(); CUDA_AUTO_TUNE_2D( ComputeNormalsCUDAKernel, 32, 32, output_depth->width(), output_depth->height(), 0, stream, /* kernel parameters */ unprojector, depth_params, input_depth, *output_depth, *normals_buffer); CUDA_CHECK(); } // ----------------------------------------------------------------------------- // Computes the minimum squared distance of the point to one of its neighbor // points, which is used as the point radius. __forceinline__ __device__ float ComputePointRadius( float fx_inv, float fy_inv, float cx_inv, float cy_inv, float raw_to_float_depth, const CUDABuffer_<u16>& depth_buffer, unsigned int x, unsigned int y, u16 depth_u16, int* neighbor_count) { float depth = raw_to_float_depth * depth_u16; float3 local_position = make_float3(depth * (fx_inv * x + cx_inv), depth * (fy_inv * y + cy_inv), depth); // Determine the radius of the pixel's 3D point as the minimum distance to // a point from its 4-neighborhood. *neighbor_count = 0; float min_neighbor_distance_squared = CUDART_INF_F; for (int dy = y - 1, end_dy = y + 2; dy < end_dy; ++ dy) { for (int dx = x - 1, end_dx = x + 2; dx < end_dx; ++ dx) { u16 d_depth = depth_buffer(dy, dx); if ((dx != x && dy != y) || // no diagonal directions (dx == x && dy == y) || d_depth & kInvalidDepthBit) { continue; } ++ (*neighbor_count); float ddepth = raw_to_float_depth * d_depth; float3 other_point = make_float3(ddepth * (fx_inv * dx + cx_inv), ddepth * (fy_inv * dy + cy_inv), ddepth); float3 local_to_other = other_point - local_position; float distance_squared = SquaredLength(local_to_other); if (distance_squared < min_neighbor_distance_squared) { min_neighbor_distance_squared = distance_squared; } } } return min_neighbor_distance_squared; } template <int min_neighbors_for_radius_computation> __global__ void ComputePointRadiiAndRemoveIsolatedPixelsCUDAKernel( float fx_inv, float fy_inv, float cx_inv, float cy_inv, float raw_to_float_depth, CUDABuffer_<u16> depth_buffer, CUDABuffer_<u16> radius_buffer, CUDABuffer_<u16> out_depth) { unsigned int x = blockIdx.x * blockDim.x + threadIdx.x; unsigned int y = blockIdx.y * blockDim.y + threadIdx.y; if (x < depth_buffer.width() && y < depth_buffer.height()) { const u16 depth_u16 = depth_buffer(y, x); if (depth_u16 & kInvalidDepthBit) { out_depth(y, x) = kUnknownDepth; return; } int neighbor_count; float point_radius = ComputePointRadius( fx_inv, fy_inv, cx_inv, cy_inv, raw_to_float_depth, depth_buffer, x, y, depth_u16, &neighbor_count); // Require all neighbors to have depth values. bool valid = neighbor_count >= min_neighbors_for_radius_computation; radius_buffer(y, x) = __half_as_ushort(__float2half_rn(valid ? point_radius : 0)); out_depth(y, x) = valid ? depth_u16 : kUnknownDepth; } } void ComputePointRadiiAndRemoveIsolatedPixelsCUDA( cudaStream_t stream, const PixelCenterUnprojector& unprojector, float raw_to_float_depth, const CUDABuffer_<u16>& depth_buffer, CUDABuffer_<u16>* radius_buffer, CUDABuffer_<u16>* out_depth) { CUDA_CHECK(); constexpr int kMinNeighborsForRadiusComputation = 4; CUDA_AUTO_TUNE_2D_TEMPLATED( ComputePointRadiiAndRemoveIsolatedPixelsCUDAKernel, 32, 32, depth_buffer.width(), depth_buffer.height(), 0, stream, TEMPLATE_ARGUMENTS(kMinNeighborsForRadiusComputation), /* kernel parameters */ unprojector.fx_inv, unprojector.fy_inv, unprojector.cx_inv, unprojector.cy_inv, raw_to_float_depth, depth_buffer, *radius_buffer, *out_depth); CUDA_CHECK(); } // ----------------------------------------------------------------------------- template <int block_width, int block_height> __global__ void ComputeMinMaxDepthCUDAKernel( float raw_to_float_depth, CUDABuffer_<u16> depth_buffer, CUDABuffer_<float> result_buffer) { unsigned int x = blockIdx.x * blockDim.x + threadIdx.x; unsigned int y = blockIdx.y * blockDim.y + threadIdx.y; float depth = CUDART_NAN_F; if (x < depth_buffer.width() && y < depth_buffer.height()) { const u16 depth_u16 = depth_buffer(y, x); if (!(depth_u16 & kInvalidDepthBit)) { depth = raw_to_float_depth * depth_u16; } } typedef cub::BlockReduce<float, block_width, cub::BLOCK_REDUCE_RAKING_COMMUTATIVE_ONLY, block_height> BlockReduceFloat; __shared__ typename BlockReduceFloat::TempStorage float_storage; const float min_depth = BlockReduceFloat(float_storage).Reduce(::isnan(depth) ? CUDART_INF_F : depth, cub::Min()); // TODO: Would it be faster to use different shared memory buffers for the reduce operations to avoid the __syncthreads() call? __syncthreads(); // Required before re-use of shared memory. const float max_depth = BlockReduceFloat(float_storage).Reduce(::isnan(depth) ? 0 : depth, cub::Max()); if (threadIdx.x == 0) { // Should behave properly as long as all the floats are positive. atomicMin(reinterpret_cast<int*>(&result_buffer(0, 0)), __float_as_int(min_depth)); atomicMax(reinterpret_cast<int*>(&result_buffer(0, 1)), __float_as_int(max_depth)); } } void ComputeMinMaxDepthCUDA( cudaStream_t stream, const CUDABuffer_<u16>& depth_buffer, float raw_to_float_depth, const CUDABuffer_<float>& init_buffer, CUDABuffer_<float>* result_buffer, float* keyframe_min_depth, float* keyframe_max_depth) { CUDA_CHECK(); cudaMemcpyAsync(result_buffer->address(), init_buffer.address(), 2 * sizeof(float), cudaMemcpyDeviceToDevice, stream); CUDA_AUTO_TUNE_2D_TEMPLATED( ComputeMinMaxDepthCUDAKernel, 32, 32, depth_buffer.width(), depth_buffer.height(), 0, stream, TEMPLATE_ARGUMENTS(block_width, block_height), /* kernel parameters */ raw_to_float_depth, depth_buffer, *result_buffer); CUDA_CHECK(); float results_cpu[2]; cudaMemcpyAsync(results_cpu, result_buffer->address(), 2 * sizeof(float), cudaMemcpyDeviceToHost, stream); cudaStreamSynchronize(stream); *keyframe_min_depth = results_cpu[0]; *keyframe_max_depth = results_cpu[1]; } }
the_stack
#include <gtest/gtest.h> #include <string> #include <tuple> #include <utility> #include <vector> #include "dali/core/cuda_event.h" #include "dali/core/cuda_stream.h" #include "dali/operators/math/expressions/arithmetic_meta.h" #include "dali/operators/math/expressions/expression_impl_gpu.cuh" #include "dali/test/dali_operator_test.h" #include "dali/test/tensor_test_utils.h" #include "dali/test/test_tensors.h" namespace dali { template <ArithmeticOp op_ = ArithmeticOp::add, typename Result_ = float, typename Left_ = float, typename Right_ = float, int IsLeftTensor_ = true, int IsRightTensor_ = false, int blocks_x_ = 128, int thread_num_ = 32, int batch_size_ = 256, int tile_size_ = 65536, int sample_size_ = 1024 * 1024> struct ArithmOpParams { static constexpr ArithmeticOp op = op_; using Result = Result_; using Left = Left_; using Right = Right_; static constexpr int IsLeftTensor = IsLeftTensor_; static constexpr int IsRightTensor = IsRightTensor_; static constexpr int blocks_x = blocks_x_; static constexpr int thread_num = thread_num_; static constexpr int batch_size = batch_size_; static constexpr int tile_size = tile_size_; static constexpr int sample_size = sample_size_; static constexpr int tiles_per_sample = sample_size / tile_size; static constexpr int num_tiles = batch_size * tiles_per_sample; static_assert(sample_size >= tile_size, "This test doesn't support samples smaller than tiles."); }; template <typename TestConfig> struct BinaryArithmeticOpGpuPerfTest : public ::testing::Test { void SetUp() override { stream = CUDAStream::Create(true); /// Fill tile descriptors (shapes) tile_descs.resize(TestConfig::num_tiles); for (int sample_id = 0; sample_id < TestConfig::batch_size; sample_id++) { for (int extent_id = 0; extent_id < TestConfig::tiles_per_sample; extent_id++) { int tile_id = sample_id * TestConfig::tiles_per_sample + extent_id; tile_descs[tile_id].sample_idx = sample_id; tile_descs[tile_id].extent_idx = extent_id; tile_descs[tile_id].tile_size = TestConfig::tile_size; tile_descs[tile_id].extent_size = TestConfig::tile_size; } } // Reshape memory for those tiles result.reshape(uniform_list_shape<1>(TestConfig::batch_size, {TestConfig::tile_size * TestConfig::tiles_per_sample})); if (TestConfig::IsLeftTensor) { left.reshape(uniform_list_shape<1>(TestConfig::batch_size, {TestConfig::tile_size * TestConfig::tiles_per_sample})); } else { left.reshape(uniform_list_shape<1>(TestConfig::batch_size, {1})); } if (TestConfig::IsRightTensor) { right.reshape(uniform_list_shape<1>(TestConfig::batch_size, {TestConfig::tile_size * TestConfig::tiles_per_sample})); } else { right.reshape(uniform_list_shape<1>(TestConfig::batch_size, {1})); } Left l{}; Right r{}; auto fill_left = [&l]() { return l += 1; }; auto fill_right = [&r]() { return r += 1; }; Fill(left.cpu(), fill_left); Fill(right.cpu(), fill_right); // Fill pointers for tiles tiles_data.reshape(uniform_list_shape<1>(1, {TestConfig::num_tiles})); auto tiles_cpu = tiles_data.cpu()[0]; // TestTensorList just allocates memory, this can leave SmallVector in weird state memset(tiles_cpu.data, 0, TestConfig::num_tiles * sizeof(ExtendedTileDesc)); for (int sample_id = 0; sample_id < TestConfig::batch_size; sample_id++) { for (int extent_id = 0; extent_id < TestConfig::tiles_per_sample; extent_id++) { int tile_id = sample_id * TestConfig::tiles_per_sample + extent_id; tiles_cpu(tile_id)->desc = tile_descs[tile_id]; tiles_cpu(tile_id)->output = result.gpu(stream)[sample_id].data + extent_id * TestConfig::tile_size; tiles_cpu(tile_id)->args.resize(2); tiles_cpu(tile_id)->args[0] = left.gpu(stream)[sample_id].data + (TestConfig::IsLeftTensor ? extent_id * TestConfig::tile_size : 0); tiles_cpu(tile_id)->args[1] = right.gpu(stream)[sample_id].data + (TestConfig::IsRightTensor ? extent_id * TestConfig::tile_size : 0); } } tiles_gpu = tiles_data.gpu(stream)[0].data; } void MeasurePerf() { ExecuteTiledBinOp<TestConfig::op, Result, Left, Right, TestConfig::IsLeftTensor, TestConfig::IsRightTensor><<<grid, block, 0, stream>>>(tiles_gpu); CUDAEvent start = CUDAEvent::CreateWithFlags(0); CUDAEvent end = CUDAEvent::CreateWithFlags(0); CUDA_CALL(cudaEventRecord(start, stream)); constexpr int kIters = 100; for (int i = 0; i < kIters; i++) { ExecuteTiledBinOp<TestConfig::op, Result, Left, Right, TestConfig::IsLeftTensor, TestConfig::IsRightTensor><<<grid, block, 0, stream>>>(tiles_gpu); } CUDA_CALL(cudaEventRecord(end, stream)); CUDA_CALL(cudaDeviceSynchronize()); float time; CUDA_CALL(cudaEventElapsedTime(&time, start, end)); time *= (1e+6f / kIters); // convert to nanoseconds / 100 samples int64_t data_size = 0; data_size += static_cast<int64_t>(TestConfig::num_tiles) * TestConfig::tile_size * sizeof(Result); if (TestConfig::IsLeftTensor) data_size += static_cast<int64_t>(TestConfig::num_tiles) * TestConfig::tile_size * sizeof(Left); if (TestConfig::IsRightTensor) data_size += static_cast<int64_t>(TestConfig::num_tiles) * TestConfig::tile_size * sizeof(Right); std::cerr << "Throughput: " << data_size / time << " GB/s\n"; } using Result = typename TestConfig::Result; using Left = typename TestConfig::Left; using Right = typename TestConfig::Right; // For kernel launch dim3 grid = dim3(TestConfig::blocks_x, TestConfig::num_tiles, 1); dim3 block = dim3(TestConfig::thread_num, 1, 1); // Tiles and data std::vector<TileDesc> tile_descs; kernels::TestTensorList<ExtendedTileDesc, 1> tiles_data; kernels::TestTensorList<Result, 1> result; kernels::TestTensorList<Left, 1> left; kernels::TestTensorList<Right, 1> right; CUDAStream stream; const ExtendedTileDesc *tiles_gpu; }; TYPED_TEST_SUITE_P(BinaryArithmeticOpGpuPerfTest); TYPED_TEST_P(BinaryArithmeticOpGpuPerfTest, Perf) { std::cerr << "Blocks_x: " << TypeParam::blocks_x << ", thread_num: " << TypeParam::thread_num << ", tile_size: " << TypeParam::tile_size / 1024.f << "KB, sample_size: " << TypeParam::sample_size / 1048576.f << "MB" << std::endl; // TypeParam n = 0; this->MeasurePerf(); } REGISTER_TYPED_TEST_SUITE_P(BinaryArithmeticOpGpuPerfTest, Perf); using TestConfigs = ::testing::Types< // op, Result, Left, Right, IsLeftTensor, IsRightTensor, blocks_x, thread_num, batch, tile, // sample Test Tensor op Constant ArithmOpParams< // old config ArithmeticOp::add, float, float, float, true, false, 128, 256, 256, 16384, 1024 * 1024>, ArithmOpParams<ArithmeticOp::add, float, float, float, true, false, 128, 256, 256, 32768, 1024 * 1024>, ArithmOpParams<ArithmeticOp::add, float, float, float, true, false, 128, 256, 256, 65536, 1024 * 1024>, ArithmOpParams<ArithmeticOp::add, float, float, float, true, false, 128, 256, 256, 131072, 1024 * 1024>, // test small input data, forcing 1 tile per sample, a bit bigger batch, // to measure how performs with smaller inputs ArithmOpParams<ArithmeticOp::add, float, float, float, true, false, 128, 256, 512, 16384, 16384>, ArithmOpParams<ArithmeticOp::add, float, float, float, true, false, 128, 256, 512, 32768, 32768>, ArithmOpParams<ArithmeticOp::add, float, float, float, true, false, 128, 256, 512, 65536, 65536>, ArithmOpParams<ArithmeticOp::add, float, float, float, true, false, 64, 256, 256, 16384, 1024 * 1024>, ArithmOpParams<ArithmeticOp::add, float, float, float, true, false, 64, 256, 256, 32768, 1024 * 1024>, ArithmOpParams<ArithmeticOp::add, float, float, float, true, false, 64, 256, 256, 65536, 1024 * 1024>, ArithmOpParams<ArithmeticOp::add, float, float, float, true, false, 64, 256, 256, 131072, 1024 * 1024>, ArithmOpParams<ArithmeticOp::add, float, float, float, true, false, 64, 256, 512, 16384, 16384>, ArithmOpParams<ArithmeticOp::add, float, float, float, true, false, 64, 256, 512, 32768, 32768>, ArithmOpParams<ArithmeticOp::add, float, float, float, true, false, 64, 256, 512, 65536, 65536>, ArithmOpParams<ArithmeticOp::add, float, float, float, true, false, 128, 128, 256, 16384, 1024 * 1024>, ArithmOpParams<ArithmeticOp::add, float, float, float, true, false, 128, 128, 256, 32768, 1024 * 1024>, ArithmOpParams<ArithmeticOp::add, float, float, float, true, false, 128, 128, 256, 65536, 1024 * 1024>, ArithmOpParams<ArithmeticOp::add, float, float, float, true, false, 128, 128, 256, 131072, 1024 * 1024>, ArithmOpParams<ArithmeticOp::add, float, float, float, true, false, 128, 128, 512, 16384, 16384>, ArithmOpParams<ArithmeticOp::add, float, float, float, true, false, 128, 128, 512, 32768, 32768>, ArithmOpParams<ArithmeticOp::add, float, float, float, true, false, 128, 128, 512, 65536, 65536>, // Test Tensor op Tensor ArithmOpParams< // old config ArithmeticOp::add, float, float, float, true, true, 128, 256, 256, 16384, 1024 * 1024>, ArithmOpParams<ArithmeticOp::add, float, float, float, true, true, 128, 256, 256, 65536, 1024 * 1024>, ArithmOpParams<ArithmeticOp::add, float, float, float, true, true, 128, 256, 512, 16384, 16384>, ArithmOpParams<ArithmeticOp::add, float, float, float, true, true, 64, 256, 256, 65536, 1024 * 1024>, ArithmOpParams<ArithmeticOp::add, float, float, float, true, true, 64, 256, 512, 16384, 16384>, ArithmOpParams<ArithmeticOp::add, float, float, float, true, true, 128, 128, 256, 65536, 1024 * 1024>, ArithmOpParams<ArithmeticOp::add, float, float, float, true, true, 128, 128, 512, 16384, 16384>>; INSTANTIATE_TYPED_TEST_SUITE_P(BinaryArithmeticOpGpu, BinaryArithmeticOpGpuPerfTest, TestConfigs); } // namespace dali
the_stack
extern "C" { #include "lua.h" #include "lualib.h" #include "lauxlib.h" } #include "luaT.h" #include "THC.h" #include <stdio.h> #include <assert.h> #include <math_constants.h> #include <math_functions.h> #include <stdint.h> #include <unistd.h> #define TB 256 #define EPS 1e-4 THCState* getCutorchState(lua_State* L) { lua_getglobal(L, "cutorch"); lua_getfield(L, -1, "getState"); lua_call(L, 0, 1); THCState *state = (THCState*) lua_touserdata(L, -1); lua_pop(L, 2); return state; } void checkCudaError(lua_State *L) { cudaError_t status = cudaPeekAtLastError(); if (status != cudaSuccess) { luaL_error(L, cudaGetErrorString(status)); } } THCudaTensor *new_tensor_like(THCState *state, THCudaTensor *x) { THCudaTensor *y = THCudaTensor_new(state); THCudaTensor_resizeAs(state, y, x); return y; } __global__ void matting_laplacian_kernel( float *input, float *grad, int h, int w, int *CSR_rowIdx, int *CSR_colIdx, float *CSR_val, int N ) { int size = h * w; int _id = blockIdx.x * blockDim.x + threadIdx.x; if (_id < size) { int x = _id % w, y = _id / w; int id = x * h + y; /// Because matting laplacian L is systematic, sum row is sufficient // 1.1 Binary search int start = 0; int end = N-1; int mid = (start + end)/2; int index = -1; while (start <= end) { int rowIdx = (CSR_rowIdx[mid]) - 1; if (rowIdx == id) { index = mid; break; } if (rowIdx > id) { end = mid - 1; mid = (start + end)/2; } else { start = mid + 1; mid = (start + end)/2; } } if (index != -1) { // 1.2 Complete range int index_s = index, index_e = index; while ( index_s >= 0 && ((CSR_rowIdx[index_s] - 1) == id) ) index_s--; while ( index_e < N && ((CSR_rowIdx[index_e] - 1) == id) ) index_e++; // 1.3 Sum this row for (int i = index_s + 1; i < index_e; i++) { //int rowIdx = CSR_rowIdx[i] - 1; int _colIdx = (CSR_colIdx[i]) - 1; float val = CSR_val[i]; int _x = _colIdx / h, _y = _colIdx % h; int colIdx = _y *w + _x; grad[_id] += 2*val * input[colIdx]; grad[_id + size] += 2*val * input[colIdx + size]; grad[_id + 2*size] += 2*val * input[colIdx + 2*size]; } } } return ; } //cuda_utils.matting_laplacian(input, h, w, CSR_rowIdx, CSR_colIdx, CSR_val, CSC_rowIdx, CSC_colIdx, CSC_val, N) int matting_laplacian(lua_State *L) { THCState *state = getCutorchState(L); THCudaTensor *input = (THCudaTensor*)luaT_checkudata(L, 1, "torch.CudaTensor"); int h = luaL_checknumber(L, 2); int w = luaL_checknumber(L, 3); THCudaIntTensor *CSR_rowIdx = (THCudaIntTensor*)luaT_checkudata(L, 4, "torch.CudaIntTensor"); THCudaIntTensor *CSR_colIdx = (THCudaIntTensor*)luaT_checkudata(L, 5, "torch.CudaIntTensor"); THCudaTensor *CSR_val = (THCudaTensor*)luaT_checkudata(L, 6, "torch.CudaTensor"); int N = luaL_checknumber(L, 7); THCudaTensor *grad = new_tensor_like(state, input); THCudaTensor_zero(state, grad); matting_laplacian_kernel<<<(h*w-1)/TB+1, TB>>>( THCudaTensor_data(state, input), THCudaTensor_data(state, grad), h, w, THCudaIntTensor_data(state, CSR_rowIdx), THCudaIntTensor_data(state, CSR_colIdx), THCudaTensor_data(state, CSR_val), N ); checkCudaError(L); luaT_pushudata(L, grad, "torch.CudaTensor"); return 1; } __device__ bool InverseMat4x4(double m_in[4][4], double inv_out[4][4]) { double m[16], inv[16]; for (int i = 0; i < 4; i++) { for (int j = 0; j < 4; j++) { m[i * 4 + j] = m_in[i][j]; } } inv[0] = m[5] * m[10] * m[15] - m[5] * m[11] * m[14] - m[9] * m[6] * m[15] + m[9] * m[7] * m[14] + m[13] * m[6] * m[11] - m[13] * m[7] * m[10]; inv[4] = -m[4] * m[10] * m[15] + m[4] * m[11] * m[14] + m[8] * m[6] * m[15] - m[8] * m[7] * m[14] - m[12] * m[6] * m[11] + m[12] * m[7] * m[10]; inv[8] = m[4] * m[9] * m[15] - m[4] * m[11] * m[13] - m[8] * m[5] * m[15] + m[8] * m[7] * m[13] + m[12] * m[5] * m[11] - m[12] * m[7] * m[9]; inv[12] = -m[4] * m[9] * m[14] + m[4] * m[10] * m[13] + m[8] * m[5] * m[14] - m[8] * m[6] * m[13] - m[12] * m[5] * m[10] + m[12] * m[6] * m[9]; inv[1] = -m[1] * m[10] * m[15] + m[1] * m[11] * m[14] + m[9] * m[2] * m[15] - m[9] * m[3] * m[14] - m[13] * m[2] * m[11] + m[13] * m[3] * m[10]; inv[5] = m[0] * m[10] * m[15] - m[0] * m[11] * m[14] - m[8] * m[2] * m[15] + m[8] * m[3] * m[14] + m[12] * m[2] * m[11] - m[12] * m[3] * m[10]; inv[9] = -m[0] * m[9] * m[15] + m[0] * m[11] * m[13] + m[8] * m[1] * m[15] - m[8] * m[3] * m[13] - m[12] * m[1] * m[11] + m[12] * m[3] * m[9]; inv[13] = m[0] * m[9] * m[14] - m[0] * m[10] * m[13] - m[8] * m[1] * m[14] + m[8] * m[2] * m[13] + m[12] * m[1] * m[10] - m[12] * m[2] * m[9]; inv[2] = m[1] * m[6] * m[15] - m[1] * m[7] * m[14] - m[5] * m[2] * m[15] + m[5] * m[3] * m[14] + m[13] * m[2] * m[7] - m[13] * m[3] * m[6]; inv[6] = -m[0] * m[6] * m[15] + m[0] * m[7] * m[14] + m[4] * m[2] * m[15] - m[4] * m[3] * m[14] - m[12] * m[2] * m[7] + m[12] * m[3] * m[6]; inv[10] = m[0] * m[5] * m[15] - m[0] * m[7] * m[13] - m[4] * m[1] * m[15] + m[4] * m[3] * m[13] + m[12] * m[1] * m[7] - m[12] * m[3] * m[5]; inv[14] = -m[0] * m[5] * m[14] + m[0] * m[6] * m[13] + m[4] * m[1] * m[14] - m[4] * m[2] * m[13] - m[12] * m[1] * m[6] + m[12] * m[2] * m[5]; inv[3] = -m[1] * m[6] * m[11] + m[1] * m[7] * m[10] + m[5] * m[2] * m[11] - m[5] * m[3] * m[10] - m[9] * m[2] * m[7] + m[9] * m[3] * m[6]; inv[7] = m[0] * m[6] * m[11] - m[0] * m[7] * m[10] - m[4] * m[2] * m[11] + m[4] * m[3] * m[10] + m[8] * m[2] * m[7] - m[8] * m[3] * m[6]; inv[11] = -m[0] * m[5] * m[11] + m[0] * m[7] * m[9] + m[4] * m[1] * m[11] - m[4] * m[3] * m[9] - m[8] * m[1] * m[7] + m[8] * m[3] * m[5]; inv[15] = m[0] * m[5] * m[10] - m[0] * m[6] * m[9] - m[4] * m[1] * m[10] + m[4] * m[2] * m[9] + m[8] * m[1] * m[6] - m[8] * m[2] * m[5]; double det = m[0] * inv[0] + m[1] * inv[4] + m[2] * inv[8] + m[3] * inv[12]; if (abs(det) < 1e-9) { return false; } det = 1.0 / det; for (int i = 0; i < 4; i++) { for (int j = 0; j < 4; j++) { inv_out[i][j] = inv[i * 4 + j] * det; } } return true; } __global__ void best_local_affine_kernel( float *output, float *input, float *affine_model, int h, int w, float epsilon, int kernel_radius ) { int size = h * w; int id = blockIdx.x * blockDim.x + threadIdx.x; if (id < size) { int x = id % w, y = id / w; double Mt_M[4][4] = {}; // 4x4 double invMt_M[4][4] = {}; double Mt_S[3][4] = {}; // RGB -> 1x4 double A[3][4] = {}; for (int i = 0; i < 4; i++) for (int j = 0; j < 4; j++) { Mt_M[i][j] = 0, invMt_M[i][j] = 0; if (i != 3) { Mt_S[i][j] = 0, A[i][j] = 0; if (i == j) Mt_M[i][j] = 1e-3; } } for (int dy = -kernel_radius; dy <= kernel_radius; dy++) { for (int dx = -kernel_radius; dx <= kernel_radius; dx++) { int xx = x + dx, yy = y + dy; int id2 = yy * w + xx; if (0 <= xx && xx < w && 0 <= yy && yy < h) { Mt_M[0][0] += input[id2 + 2*size] * input[id2 + 2*size]; Mt_M[0][1] += input[id2 + 2*size] * input[id2 + size]; Mt_M[0][2] += input[id2 + 2*size] * input[id2]; Mt_M[0][3] += input[id2 + 2*size]; Mt_M[1][0] += input[id2 + size] * input[id2 + 2*size]; Mt_M[1][1] += input[id2 + size] * input[id2 + size]; Mt_M[1][2] += input[id2 + size] * input[id2]; Mt_M[1][3] += input[id2 + size]; Mt_M[2][0] += input[id2] * input[id2 + 2*size]; Mt_M[2][1] += input[id2] * input[id2 + size]; Mt_M[2][2] += input[id2] * input[id2]; Mt_M[2][3] += input[id2]; Mt_M[3][0] += input[id2 + 2*size]; Mt_M[3][1] += input[id2 + size]; Mt_M[3][2] += input[id2]; Mt_M[3][3] += 1; Mt_S[0][0] += input[id2 + 2*size] * output[id2 + 2*size]; Mt_S[0][1] += input[id2 + size] * output[id2 + 2*size]; Mt_S[0][2] += input[id2] * output[id2 + 2*size]; Mt_S[0][3] += output[id2 + 2*size]; Mt_S[1][0] += input[id2 + 2*size] * output[id2 + size]; Mt_S[1][1] += input[id2 + size] * output[id2 + size]; Mt_S[1][2] += input[id2] * output[id2 + size]; Mt_S[1][3] += output[id2 + size]; Mt_S[2][0] += input[id2 + 2*size] * output[id2]; Mt_S[2][1] += input[id2 + size] * output[id2]; Mt_S[2][2] += input[id2] * output[id2]; Mt_S[2][3] += output[id2]; } } } bool success = InverseMat4x4(Mt_M, invMt_M); for (int i = 0; i < 3; i++) { for (int j = 0; j < 4; j++) { for (int k = 0; k < 4; k++) { A[i][j] += invMt_M[j][k] * Mt_S[i][k]; } } } for (int i = 0; i < 3; i++) { for (int j = 0; j < 4; j++) { int affine_id = i * 4 + j; affine_model[12 * id + affine_id] = A[i][j]; } } } return ; } __global__ void bilateral_smooth_kernel( float *affine_model, float *filtered_affine_model, float *guide, int h, int w, int kernel_radius, float sigma1, float sigma2 ) { int id = blockIdx.x * blockDim.x + threadIdx.x; int size = h * w; if (id < size) { int x = id % w; int y = id / w; double sum_affine[12] = {}; double sum_weight = 0; for (int dx = -kernel_radius; dx <= kernel_radius; dx++) { for (int dy = -kernel_radius; dy <= kernel_radius; dy++) { int yy = y + dy, xx = x + dx; int id2 = yy * w + xx; if (0 <= xx && xx < w && 0 <= yy && yy < h) { float color_diff1 = guide[yy*w + xx] - guide[y*w + x]; float color_diff2 = guide[yy*w + xx + size] - guide[y*w + x + size]; float color_diff3 = guide[yy*w + xx + 2*size] - guide[y*w + x + 2*size]; float color_diff_sqr = (color_diff1*color_diff1 + color_diff2*color_diff2 + color_diff3*color_diff3) / 3; float v1 = exp(-(dx * dx + dy * dy) / (2 * sigma1 * sigma1)); float v2 = exp(-(color_diff_sqr) / (2 * sigma2 * sigma2)); float weight = v1 * v2; for (int i = 0; i < 3; i++) { for (int j = 0; j < 4; j++) { int affine_id = i * 4 + j; sum_affine[affine_id] += weight * affine_model[id2*12 + affine_id]; } } sum_weight += weight; } } } for (int i = 0; i < 3; i++) { for (int j = 0; j < 4; j++) { int affine_id = i * 4 + j; filtered_affine_model[id*12 + affine_id] = sum_affine[affine_id] / sum_weight; } } } return ; } __global__ void reconstruction_best_kernel( float *input, float *filtered_affine_model, float *filtered_best_output, int h, int w ) { int id = blockIdx.x * blockDim.x + threadIdx.x; int size = h * w; if (id < size) { double out1 = input[id + 2*size] * filtered_affine_model[id*12 + 0] + // A[0][0] + input[id + size] * filtered_affine_model[id*12 + 1] + // A[0][1] + input[id] * filtered_affine_model[id*12 + 2] + // A[0][2] + filtered_affine_model[id*12 + 3]; //A[0][3]; double out2 = input[id + 2*size] * filtered_affine_model[id*12 + 4] + //A[1][0] + input[id + size] * filtered_affine_model[id*12 + 5] + //A[1][1] + input[id] * filtered_affine_model[id*12 + 6] + //A[1][2] + filtered_affine_model[id*12 + 7]; //A[1][3]; double out3 = input[id + 2*size] * filtered_affine_model[id*12 + 8] + //A[2][0] + input[id + size] * filtered_affine_model[id*12 + 9] + //A[2][1] + input[id] * filtered_affine_model[id*12 + 10] + //A[2][2] + filtered_affine_model[id*12 + 11]; // A[2][3]; filtered_best_output[id] = out1; filtered_best_output[id + size] = out2; filtered_best_output[id + 2*size] = out3; } return ; } // local best01 = cuda_utils.smooth_local_affine(output01, input01, epsilon, patch, h, w, filter_radius, sigma1, sigma2) int smooth_local_affine(lua_State *L) { THCState *state = getCutorchState(L); THCudaTensor *output = (THCudaTensor*)luaT_checkudata(L, 1, "torch.CudaTensor"); THCudaTensor *input = (THCudaTensor*)luaT_checkudata(L, 2, "torch.CudaTensor"); float epsilon = luaL_checknumber(L, 3); int patch = luaL_checknumber(L, 4); int h = luaL_checknumber(L, 5); int w = luaL_checknumber(L, 6); int f_r = luaL_checknumber(L, 7); float sigma1 = luaL_checknumber(L, 8); float sigma2 = luaL_checknumber(L, 9); THCudaTensor *filtered_best_output = new_tensor_like(state, input); THCudaTensor_zero(state, filtered_best_output); THCudaTensor *affine_model = THCudaTensor_new(state); THCudaTensor_resize2d(state, affine_model, h*w, 12); THCudaTensor_zero(state, affine_model); THCudaTensor *filtered_affine_model = THCudaTensor_new(state); THCudaTensor_resize2d(state, filtered_affine_model, h*w, 12); THCudaTensor_zero(state, filtered_affine_model); int radius = (patch-1) / 2; best_local_affine_kernel<<<(h*w)/TB+1, TB>>>( THCudaTensor_data(state, output), THCudaTensor_data(state, input), THCudaTensor_data(state, affine_model), h, w, epsilon, radius ); checkCudaError(L); bilateral_smooth_kernel<<<(h*w)/TB+1, TB>>>( THCudaTensor_data(state, affine_model), THCudaTensor_data(state, filtered_affine_model), THCudaTensor_data(state, input), h, w, f_r, sigma1, sigma2 ); checkCudaError(L); THCudaTensor_free(state, affine_model); reconstruction_best_kernel<<<(h*w)/TB+1, TB>>>( THCudaTensor_data(state, input), THCudaTensor_data(state, filtered_affine_model), THCudaTensor_data(state, filtered_best_output), h, w ); checkCudaError(L); THCudaTensor_free(state, filtered_affine_model); luaT_pushudata(L, filtered_best_output, "torch.CudaTensor"); return 1; } static const struct luaL_Reg funcs[] = { {"matting_laplacian", matting_laplacian}, {"smooth_local_affine", smooth_local_affine}, {NULL, NULL} }; extern "C" int luaopen_libcuda_utils(lua_State *L) { luaL_openlib(L, "cuda_utils", funcs, 0); return 1; }
the_stack
#include "src/DeviceTensorUtils.h" #include "THCTensor.h" #include "cuda/CudaUtils.cuh" #include "cuda/DeviceTensor.cuh" #include "cuda/MemoryAccess.cuh" #include "cuda/util/CachedDeviceProperties.h" #define ENABLE_CUDA_DEBUG #include "cuda/CudaDebugUtils.cuh" #include <thrust/host_vector.h> #include <thrust/device_vector.h> #include <glog/logging.h> using namespace facebook::cuda; namespace facebook { namespace deeplearning { namespace torch { #define LOG_TARGET VLOG(1) // LOG(INFO) template<typename T, bool affine, typename ComputeT = float> __global__ void BatchNormalizationUpdateOutputInferenceUnrolled_kernel( const DeviceTensor<T, 2> input, DeviceTensor<T, 2> output, DeviceTensor<T, 1> runningMean, DeviceTensor<T, 1> runningStddev, const DeviceTensor<T, 1> weight, const DeviceTensor<T, 1> bias) { static_assert(std::is_same<ComputeT, double>::value , "type"); auto batch = blockIdx.y; auto x = blockIdx.x * blockDim.x + threadIdx.x; if (x >= input.getSize(1)) { return; } // stddev is actually 1 / stddev ComputeT stddev = runningStddev[x].ldg(); ComputeT mean = runningMean[x].ldg(); ComputeT inp = input[batch][x].ldg(); if (affine) { // multiply with gamma and add beta // TODO: everyone pulling this, optimize by reusing better ComputeT beta = bias[x].ldg(); ComputeT gamma = weight[x].ldg(); output[batch][x] = gamma * (inp - mean) * (stddev) + beta; } else { output[batch][x] = (inp - mean) * (stddev); } } template<typename T, bool affine, typename ComputeT = float> __global__ void BatchNormalizationUpdateOutput_kernel( const DeviceTensor<T, 2> input, DeviceTensor<T, 2> output, DeviceTensor<T, 2> centered, DeviceTensor<T, 1> std, DeviceTensor<T, 2> normalized, DeviceTensor<T, 1> runningMean, DeviceTensor<T, 1> runningStddev, const DeviceTensor<T, 1> weight, const DeviceTensor<T, 1> bias, T epsilon, T momentum) { static_assert(std::is_same<ComputeT, double>::value , "type"); auto x = blockIdx.x * blockDim.x + threadIdx.x; if (x >= output.getSize(1)) { return; } ComputeT norm = (ComputeT)1 / input.getSize(0); ComputeT batchMean = (ComputeT)0; for (auto batch = 0; batch < output.getSize(0); ++batch) { ComputeT b = input[batch][x].ldg(); batchMean += b; } batchMean *= norm; runningMean[x] = (1 - momentum) * runningMean[x] + momentum * batchMean; ComputeT stdMean = (ComputeT)0; for (auto batch = 0; batch < output.getSize(0); ++batch) { ComputeT inp = input[batch][x].ldg() ; centered[batch][x] = inp - batchMean; stdMean += (inp - batchMean) * (inp - batchMean); } stdMean = 1 / sqrt(stdMean * norm + epsilon); std[x] = stdMean; runningStddev[x] = (1 - momentum) * runningStddev[x] + momentum * stdMean; for (auto batch = 0; batch < output.getSize(0); ++batch) { output[batch][x] = centered[batch][x] * stdMean; normalized[batch][x] = centered[batch][x] * stdMean; if (affine) { ComputeT beta = bias[x]; ComputeT gamma = weight[x]; output[batch][x] = gamma * output[batch][x] + beta; } } } template<typename T, int BatchDims, int ImageDims, bool train, bool affine, typename ComputeT = float> void BatchNormalizationUpdateOutput( const DeviceTensor<T, BatchDims + ImageDims> input, DeviceTensor<T, BatchDims + ImageDims> output, DeviceTensor<T, BatchDims + ImageDims> centered, DeviceTensor<T, 1> std, DeviceTensor<T, BatchDims + ImageDims> normalized, DeviceTensor<T, 1> runningMean, DeviceTensor<T, 1> runningStddev, const DeviceTensor<T, 1> weight, const DeviceTensor<T, 1> bias, T epsilon, T momentum, cudaStream_t s) { static_assert(BatchDims == 2, "BatchDims == 2 only atm"); static_assert(ImageDims == 0, "ImageDims == 0 only atm"); dim3 threads(128); // auto prop = getCurrentDeviceProperties(); if (!train) { dim3 blocks(ceil(input.getSize(1), 128), input.getSize(0)); LOG_TARGET << blocks.x << " " << blocks.y << " " << blocks.z << " " << threads.x << " " << threads.y << " " << threads.z; BatchNormalizationUpdateOutputInferenceUnrolled_kernel <T, affine, ComputeT> <<<blocks, threads, 0, s>>> (input, output, runningMean, runningStddev, weight, bias); } else { dim3 blocks(ceil(input.getSize(1), 128)); LOG_TARGET << blocks.x << " " << blocks.y << " " << blocks.z << " " << threads.x << " " << threads.y << " " << threads.z; BatchNormalizationUpdateOutput_kernel<T, affine, ComputeT> <<<blocks, threads, 0, s>>>(input, output, centered, std, normalized, runningMean, runningStddev, weight, bias, epsilon, momentum); } } extern "C" void BatchNormalizationUpdateOutputFFI( THCState* state, THCudaTensor* input, THCudaTensor* output, THCudaTensor* centered, THCudaTensor* std, THCudaTensor* normalized, THCudaTensor* runningMean, THCudaTensor* runningStddev, THCudaTensor* weight, THCudaTensor* bias, float epsilon, float momentum, bool train, bool affine) { // The BatchNormalization lua module is designed for // 2-D only: batch, plane constexpr int BatchDims = 2; constexpr int ImageDims = 0; typedef double ComputeT; if (!train) { if (!affine) { // Collapse BatchNormalizationUpdateOutput <float, BatchDims, ImageDims, false, false, ComputeT> ( torchToDeviceTensor<float, BatchDims + ImageDims>(state, input), torchToDeviceTensor<float, BatchDims + ImageDims>(state, output), DeviceTensor<float, BatchDims + ImageDims>(), DeviceTensor<float, 1>(), DeviceTensor<float, BatchDims + ImageDims>(), torchToDeviceTensor<float, 1>(state, runningMean), torchToDeviceTensor<float, 1>(state, runningStddev), DeviceTensor<float, 1>(), DeviceTensor<float, 1>(), epsilon, momentum, THCState_getCurrentStream(state) ); } else { // Collapse BatchNormalizationUpdateOutput <float, BatchDims, ImageDims, false, true, ComputeT> ( torchToDeviceTensor<float, BatchDims + ImageDims>(state, input), torchToDeviceTensor<float, BatchDims + ImageDims>(state, output), DeviceTensor<float, BatchDims + ImageDims>(), DeviceTensor<float, 1>(), DeviceTensor<float, BatchDims + ImageDims>(), torchToDeviceTensor<float, 1>(state, runningMean), torchToDeviceTensor<float, 1>(state, runningStddev), torchToDeviceTensor<float, 1>(state, weight), torchToDeviceTensor<float, 1>(state, bias), epsilon, momentum, THCState_getCurrentStream(state) ); } } else { if (!affine) { BatchNormalizationUpdateOutput <float, BatchDims, ImageDims, true, false, ComputeT> ( torchToDeviceTensor<float, BatchDims + ImageDims>(state, input), torchToDeviceTensor<float, BatchDims + ImageDims>(state, output), torchToDeviceTensor<float, BatchDims + ImageDims>(state, centered), torchToDeviceTensor<float, 1>(state, std), torchToDeviceTensor<float, BatchDims + ImageDims>(state, normalized), torchToDeviceTensor<float, 1>(state, runningMean), torchToDeviceTensor<float, 1>(state, runningStddev), DeviceTensor<float, 1>(), DeviceTensor<float, 1>(), epsilon, momentum, THCState_getCurrentStream(state) ); } else { BatchNormalizationUpdateOutput <float, BatchDims, ImageDims, true, true, ComputeT> ( torchToDeviceTensor<float, BatchDims + ImageDims>(state, input), torchToDeviceTensor<float, BatchDims + ImageDims>(state, output), torchToDeviceTensor<float, BatchDims + ImageDims>(state, centered), torchToDeviceTensor<float, 1>(state, std), torchToDeviceTensor<float, BatchDims + ImageDims>(state, normalized), torchToDeviceTensor<float, 1>(state, runningMean), torchToDeviceTensor<float, 1>(state, runningStddev), torchToDeviceTensor<float, 1>(state, weight), torchToDeviceTensor<float, 1>(state, bias), epsilon, momentum, THCState_getCurrentStream(state) ); } } THCudaCheck(cudaGetLastError()); } template<typename T, bool affine, typename ComputeT = float> __global__ void BatchNormalizationUpdateGradInput_kernel( DeviceTensor<T, 2> gradInput, const DeviceTensor<T, 2> gradOutput, DeviceTensor<T, 2> centered, DeviceTensor<T, 1> std, const DeviceTensor<T, 1> weight) { static_assert(std::is_same<ComputeT, double>::value , "type"); auto x = blockIdx.x * blockDim.x + threadIdx.x; if (x >= gradOutput.getSize(1)) { return; } ComputeT norm = (ComputeT)1 / gradInput.getSize(0); ComputeT gradMean = (ComputeT)0; ComputeT centeredGradMean = (ComputeT)0; for (auto batch = 0; batch < gradOutput.getSize(0); ++batch) { ComputeT g = gradOutput[batch][x].ldg(); ComputeT c = centered[batch][x].ldg(); gradMean += g; centeredGradMean += c * g; } gradMean *= norm; centeredGradMean *= norm; ComputeT stdVal = std[x]; ComputeT weightVal = (ComputeT)0; if (affine) { weightVal = weight[x]; } for (auto batch = 0; batch < gradOutput.getSize(0); ++batch) { if (affine) { gradInput[batch][x] = ( - centeredGradMean * centered[batch][x] * stdVal * stdVal + gradOutput[batch][x] - gradMean ) * stdVal * weightVal; } else { gradInput[batch][x] = ( - centeredGradMean * centered[batch][x] * stdVal * stdVal + gradOutput[batch][x] - gradMean ) * stdVal; } } } template<typename T, int BatchDims, int ImageDims, bool affine, typename ComputeT = float> void BatchNormalizationUpdateGradInput( DeviceTensor<T, BatchDims + ImageDims> gradInput, const DeviceTensor<T, BatchDims + ImageDims> gradOutput, DeviceTensor<T, BatchDims + ImageDims> centered, DeviceTensor<T, 1> std, const DeviceTensor<T, 1> weight, cudaStream_t s) { static_assert(BatchDims == 2, "BatchDims == 2 only atm"); static_assert(ImageDims == 0, "ImageDims == 0 only atm"); dim3 blocks(ceil(gradOutput.getSize(1), 128)); dim3 threads(128); LOG_TARGET << blocks.x << " " << blocks.y << " " << blocks.z << " " << threads.x << " " << threads.y << " " << threads.z; BatchNormalizationUpdateGradInput_kernel<T, affine, ComputeT> <<<blocks, threads, 0, s>>>(gradInput, gradOutput, centered, std, weight); } extern "C" void BatchNormalizationUpdateGradInputFFI( THCState* state, THCudaTensor* gradInput, THCudaTensor* gradOutput, THCudaTensor* centered, THCudaTensor* std, THCudaTensor* weight, bool affine) { // The BatchNormalization lua module is designed for // 2-D only: batch, plane constexpr int BatchDims = 2; constexpr int ImageDims = 0; typedef double ComputeT; if (!affine) { // Collapse BatchNormalizationUpdateGradInput <float, BatchDims, ImageDims, false, ComputeT> ( torchToDeviceTensor<float, BatchDims + ImageDims>(state, gradInput), torchToDeviceTensor<float, BatchDims + ImageDims>(state, gradOutput), torchToDeviceTensor<float, BatchDims + ImageDims>(state, centered), torchToDeviceTensor<float, 1>(state, std), DeviceTensor<float, 1>(), THCState_getCurrentStream(state) ); } else { // Collapse BatchNormalizationUpdateGradInput <float, BatchDims, ImageDims, true, ComputeT> ( torchToDeviceTensor<float, BatchDims + ImageDims>(state, gradInput), torchToDeviceTensor<float, BatchDims + ImageDims>(state, gradOutput), torchToDeviceTensor<float, BatchDims + ImageDims>(state, centered), torchToDeviceTensor<float, 1>(state, std), torchToDeviceTensor<float, 1>(state, weight), THCState_getCurrentStream(state) ); } THCudaCheck(cudaGetLastError()); } template<typename T, typename ComputeT = float> __global__ void BatchNormalizationAccGradParameters_kernel( const DeviceTensor<T, 2> gradOutput, const DeviceTensor<T, 2> normalized, DeviceTensor<T, 1> gradWeight, DeviceTensor<T, 1> gradBias, T scale) { static_assert(std::is_same<ComputeT, double>::value , "type"); auto x = blockIdx.x * blockDim.x + threadIdx.x; if (x >= gradOutput.getSize(1)) { return; } ComputeT gradMean = (ComputeT)0; ComputeT normalizedGradMean = (ComputeT)0; for (auto batch = 0; batch < gradOutput.getSize(0); ++batch) { ComputeT g = gradOutput[batch][x].ldg(); ComputeT n = normalized[batch][x].ldg(); gradMean += g; normalizedGradMean += n * g; } gradBias[x] += scale * gradMean; gradWeight[x] += scale * normalizedGradMean; } template<typename T, int BatchDims, int ImageDims, typename ComputeT = float> void BatchNormalizationAccGradParameters( const DeviceTensor<T, BatchDims + ImageDims> gradOutput, const DeviceTensor<T, BatchDims + ImageDims> normalized, DeviceTensor<T, 1> gradWeight, DeviceTensor<T, 1> gradBias, T scale, cudaStream_t s) { static_assert(BatchDims == 2, "BatchDims == 2 only atm"); static_assert(ImageDims == 0, "ImageDims == 0 only atm"); dim3 blocks(ceil(gradOutput.getSize(1), 128)); dim3 threads(128); LOG_TARGET << blocks.x << " " << blocks.y << " " << blocks.z << " " << threads.x << " " << threads.y << " " << threads.z; BatchNormalizationAccGradParameters_kernel<T, ComputeT> <<<blocks, threads, 0, s>>>(gradOutput, normalized, gradWeight, gradBias, scale); } extern "C" void BatchNormalizationAccGradParametersFFI( THCState* state, THCudaTensor* gradOutput, THCudaTensor* normalized, THCudaTensor* gradWeight, THCudaTensor* gradBias, float scale) { // The BatchNormalization lua module is designed for // 2-D only: batch, plane constexpr int BatchDims = 2; constexpr int ImageDims = 0; typedef double ComputeT; // Collapse BatchNormalizationAccGradParameters <float, BatchDims, ImageDims, ComputeT> ( torchToDeviceTensor<float, BatchDims + ImageDims>(state, gradOutput), torchToDeviceTensor<float, BatchDims + ImageDims>(state, normalized), torchToDeviceTensor<float, 1>(state, gradWeight), torchToDeviceTensor<float, 1>(state, gradBias), scale, THCState_getCurrentStream(state) ); THCudaCheck(cudaGetLastError()); } }}}
the_stack
#include <cmath> #include "core/context_cuda.h" #include "core/tensor.h" #include "utils/cuda_device.h" #include "utils/op_kernel.h" #include "utils/math_functions.h" namespace dragon { namespace kernel { template <typename T> __global__ void _Empty() { } template<> void Empty<float, CUDAContext>() { _Empty<float> << <1, 1 >> >(); CUDA_POST_KERNEL_CHECK; } template<> void Empty<float16, CUDAContext>() { _Empty<float16> << <1, 1 >> >(); CUDA_POST_KERNEL_CHECK; } /******************** activation.dropout ********************/ template<typename T> __global__ void _Dropout(const int count, const uint32_t thresh, const T scale, const T* x, const uint32_t* mask, T* y) { CUDA_KERNEL_LOOP(idx, count) { y[idx] = x[idx] * (mask[idx] > thresh) * scale; } } template<> void Dropout<float, CUDAContext>(const int count, float prob, float scale, const float* x, uint32_t* mask, float* y, CUDAContext* context) { uint32_t thresh = static_cast<uint32_t>(UINT_MAX * prob); math::RandomUniform<uint32_t, CUDAContext>(count, float(0), float(UINT_MAX), mask); _Dropout<float> << <GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, thresh, scale, x, mask, y); CUDA_POST_KERNEL_CHECK; } template <typename T> __global__ void _DropoutGrad(const int count, const uint32_t thresh, const T scale, const T* dy, const uint32_t* mask, T* dx) { CUDA_KERNEL_LOOP(idx, count) { dx[idx] = dy[idx] * (mask[idx] > thresh) * scale; } } template<> void DropoutGrad<float, CUDAContext>(const int count, float prob, float scale, const float* dy, const uint32_t* mask, float* dx) { uint32_t thresh = static_cast<uint32_t>(UINT_MAX * prob); _DropoutGrad<float> << <GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, thresh, scale, dy, mask, dx); CUDA_POST_KERNEL_CHECK; } /******************** activation.prelu ********************/ template <typename T> __global__ void _PRelu(const int count, const int channels, const int dim, const T* x, const T* w, T* y) { CUDA_KERNEL_LOOP(idx, count) { y[idx] = (x[idx] > 0) * x[idx] + (x[idx] < 0) * x[idx] * w[0]; } } template <typename T> __global__ void _PReluNCHW(const int count, const int channels, const int dim, const T* x, const T* w, T* y) { CUDA_KERNEL_LOOP(idx, count) { const int c = (idx / dim) % channels; y[idx] = (x[idx] > 0) * x[idx] + (x[idx] < 0) * x[idx] * w[c]; } } template <typename T> __global__ void _PReluNHWC(const int count, const int channels, const int dim, const T* x, const T* w, T* y) { CUDA_KERNEL_LOOP(idx, count) { const int c = idx % channels; y[idx] = (x[idx] > 0) * x[idx] + (x[idx] < 0) * x[idx] * w[c]; } } template<> void PRelu<float, CUDAContext>(const int count, const int channels, const int dim, const bool channel_shared, const string& data_format, const float* x, const float* w, float* y) { if (channel_shared) { _PRelu<float> << < GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, channels, dim, x, w, y); } else { if (data_format == "NCHW") { _PReluNCHW<float> << < GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, channels, dim, x, w, y); } else if (data_format == "NHWC") { _PReluNHWC<float> << < GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, channels, dim, x, w, y); } else LOG(FATAL) << "Unknown data format: " << data_format; } CUDA_POST_KERNEL_CHECK; } template <typename T> __global__ void _PReluGrad(const int count, const int channels, const int dim, const T* dy, const T* x, const T* w, T* dx) { CUDA_KERNEL_LOOP(idx, count) { dx[idx] = dy[idx] * ((x[idx] > 0) + (x[idx] <= 0) * w[0]); } } template <typename T> __global__ void _PReluGradNCHW(const int count, const int channels, const int dim, const T* dy, const T* x, const T* w, T* dx) { CUDA_KERNEL_LOOP(idx, count) { const int c = (idx / dim) % channels; dx[idx] = dy[idx] * ((x[idx] > 0) + (x[idx] <= 0) * w[c]); } } template <typename T> __global__ void _PReluGradNHWC(const int count, const int channels, const int dim, const T* dy, const T* x, const T* w, T* dx) { CUDA_KERNEL_LOOP(idx, count) { const int c = idx % channels; dx[idx] = dy[idx] * ((x[idx] > 0) + (x[idx] <= 0) * w[c]); } } template<> void PReluGrad<float, CUDAContext>(const int count, const int channels, const int dim, const bool channel_shared, const string& data_format, const float* dy, const float* x, const float* w, float* dx) { if (channel_shared) { _PReluGrad<float> << < GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, channels, dim, dy, x, w, dx); } else { if (data_format == "NCHW") { _PReluGradNCHW<float> << < GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, channels, dim, dy, x, w, dx); } else if (data_format == "NHWC") { _PReluGradNHWC<float> << < GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, channels, dim, dy, x, w, dx); } else LOG(FATAL) << "Unknown data format: " << data_format; } CUDA_POST_KERNEL_CHECK; } template <typename T> __global__ void _PReluWGradBcast(const int count, const int rows, const int row_offset, const T* dy, const T* x, T* bcast_dw) { CUDA_KERNEL_LOOP(idx, count) { bcast_dw[idx] = dy[idx] * x[idx] * (x[idx] <= 0); for (int n = 1; n < rows; n++) { const int cur_idx = idx + n * row_offset; bcast_dw[idx] += dy[cur_idx] * x[cur_idx] * (x[cur_idx] <= 0); } } } template<> void PReluWGrad<float, CUDAContext>(const int rows, const int row_offset, const int channels, const int dim, const bool channel_shared, const string& data_format, const float* dy, const float* x, const float* multiplier, float* bcast_dw, float* dw) { const int cdim = channels * dim; _PReluWGradBcast<float> << < GET_BLOCKS(cdim), CUDA_NUM_THREADS >> >(cdim, rows, row_offset, dy, x, bcast_dw); CUDA_POST_KERNEL_CHECK; if (channel_shared) { float w_sum = math::Dot<float, CUDAContext>(channels * dim, bcast_dw, multiplier); math::AddScalar<float, CUDAContext>(1, w_sum, dw); } else { if (data_format == "NCHW") { math::Gemv<float, CUDAContext>(CblasNoTrans, channels, dim, 1.0, bcast_dw, multiplier, 1.0, dw); } else if (data_format == "NHWC") { math::Gemv<float, CUDAContext>(CblasTrans, dim, channels, 1.0, bcast_dw, multiplier, 1.0, dw); } else LOG(FATAL) << "Unknown data format: " << data_format; } } /******************** activation.elu ********************/ template <typename T> __global__ void _Elu(const int count, const T* x, const float alpha, T* y) { CUDA_KERNEL_LOOP(idx, count) { y[idx] = x[idx] > 0 ? x[idx] : alpha * (std::exp(x[idx]) - 1); } } template<> void Elu<float, CUDAContext>(const int count, const float* x, const float alpha, float* y) { _Elu<float> << < GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, x, alpha, y); CUDA_POST_KERNEL_CHECK; } template <typename T> __global__ void _EluGrad(const int count, const T* dy, const T* y, const float alpha, T* dx) { CUDA_KERNEL_LOOP(idx, count) { dx[idx] = dy[idx] * ((y[idx] > 0) + (alpha + y[idx]) * (y[idx] <= 0)); } } template<> void EluGrad<float, CUDAContext>(const int count, const float* dy, const float* y, const float alpha, float* dx) { _EluGrad<float> << < GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, dy, y, alpha, dx); CUDA_POST_KERNEL_CHECK; } /******************** activation.relu ********************/ template <typename T> __global__ void _Relu(const int count, const T* x, const float slope, T* y) { CUDA_KERNEL_LOOP(idx, count) { y[idx] = x[idx] > 0 ? x[idx] : x[idx] * slope; } } template<> void Relu<float, CUDAContext>(const int count, const float* x, const float slope, float* y) { _Relu<float> << < GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, x, slope, y); CUDA_POST_KERNEL_CHECK; } #ifdef WITH_CUDA_FP16 template <typename T> __global__ void _ReluHalf(const int count, const half* x, const float slope, half* y) { const half kSlope = __float2half(slope); const half kZero = __float2half(0.0); CUDA_KERNEL_LOOP(idx, count) { #if __CUDA_ARCH__ >= 530 y[idx] = __hgt(x[idx], kZero) ? x[idx] : __hmul(x[idx], kSlope); #endif } } template<> void Relu<float16, CUDAContext>(const int count, const float16* x, const float slope, float16* y) { _ReluHalf<half> << < GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, reinterpret_cast<const half*>(x), slope, reinterpret_cast<half*>(y)); CUDA_POST_KERNEL_CHECK; } #endif template <typename T> __global__ void _ReluGrad(const int count, const T* dy, const T* y, const float slope, T* dx) { CUDA_KERNEL_LOOP(idx, count) { dx[idx] = dy[idx] * ((y[idx] > 0) + slope * (y[idx] <= 0)); } } template<> void ReluGrad<float, CUDAContext>(const int count, const float* dy, const float* y, const float slope, float* dx) { _ReluGrad<float> << < GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, dy, y, slope, dx); CUDA_POST_KERNEL_CHECK; } /******************** activation.selu ********************/ template <typename T> __global__ void _SElu(const int count, const T* x, T* y) { CUDA_KERNEL_LOOP(idx, count) { y[idx] = x[idx] > 0 ? 1.0507 * x[idx] : 1.7581 * (std::exp(x[idx]) - 1); } } template<> void SElu<float, CUDAContext>(const int count, const float* x, float* y) { _SElu<float> << < GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, x, y); CUDA_POST_KERNEL_CHECK; } template <typename T> __global__ void _SEluGrad(const int count, const T* dy, const T* y, T* dx) { CUDA_KERNEL_LOOP(idx, count) { dx[idx] = y[idx] > 0 ? 1.0507 * dy[idx] : (1.7581 + y[idx]) * dy[idx]; } } template<> void SEluGrad<float, CUDAContext>(const int count, const float* dy, const float* y, float* dx) { _SEluGrad<float> << < GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, dy, y, dx); CUDA_POST_KERNEL_CHECK; } /******************** activation.sigmoid ********************/ template <typename T> __device__ T _SigmoidUnit(const T x) { return T(1) / (T(1) + exp(-x)); } template <typename T> __global__ void _Sigmoid(const int n, const T* x, T* y) { CUDA_KERNEL_LOOP(idx, n) { y[idx] = _SigmoidUnit<T>(x[idx]); } } template<> void Sigmoid<float, CUDAContext>(const int count, const float* x, float* y) { _Sigmoid<float> << < GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, x, y); CUDA_POST_KERNEL_CHECK; } template <typename T> __global__ void _SigmoidGrad(const int count, const T* dy, const T* y, T* dx) { CUDA_KERNEL_LOOP(idx, count) { dx[idx] = dy[idx] * y[idx] * (1 - y[idx]); } } template<> void SigmoidGrad<float, CUDAContext>(const int count, const float* dy, const float* y, float* dx) { _SigmoidGrad<float> << < GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, dy, y, dx); CUDA_POST_KERNEL_CHECK; } /******************** activation.softmax ********************/ template <typename T> __global__ void _SoftmaxMaxClass(const int outer_dim, const int classes, const int inner_dim, const T* x, T* scale) { CUDA_KERNEL_LOOP(idx, outer_dim * inner_dim) { int o_idx = idx / inner_dim; int i_idx = idx % inner_dim; T max_val = -FLT_MAX; for (int c = 0; c < classes; c++) max_val = max(x[(o_idx * classes + c) * inner_dim + i_idx], max_val); scale[idx] = max_val; } } template <typename T> __global__ void _SoftmaxSubtract(const int count, const int classes, const int inner_dim, const T* scale, T* y) { CUDA_KERNEL_LOOP(idx, count) { int o_idx = idx / inner_dim / classes; int i_idx = idx % inner_dim; y[idx] -= scale[o_idx * inner_dim + i_idx]; } } template <typename T> __global__ void _SoftmaxExp(const int count, T* y) { CUDA_KERNEL_LOOP(idx, count) { y[idx] = std::exp(y[idx]); } } template <typename T> __global__ void _SoftmaxSumClass(const int outer_dim, const int classes, const int inner_dim, const T* y, T* scale) { CUDA_KERNEL_LOOP(idx, outer_dim * inner_dim) { int o_idx = idx / inner_dim; int i_idx = idx % inner_dim; T sum = 0; for (int c = 0; c < classes; c++) sum += y[(o_idx * classes + c) * inner_dim + i_idx]; scale[idx] = sum; } } template <typename T> __global__ void _SoftmaxDiv(const int count, const int classes, const int inner_dim, const T* scale, T* y) { CUDA_KERNEL_LOOP(idx, count) { int o_idx = idx / inner_dim / classes; int i_idx = idx % inner_dim; y[idx] /= scale[o_idx * inner_dim + i_idx]; } } template<> void Softmax<float, CUDAContext>(const int count, const int classes, const int outer_dim, const int inner_dim, const float* sum_multiplier, const float* x, float* scale, float* y, CUDAContext* context) { const int num_preds = inner_dim * outer_dim; _SoftmaxMaxClass<float> << <GET_BLOCKS(num_preds), CUDA_NUM_THREADS >> >(outer_dim, classes, inner_dim, x, scale); _SoftmaxSubtract<float> << <GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, classes, inner_dim, scale, y); _SoftmaxExp<float> << <GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, y); _SoftmaxSumClass<float> << <GET_BLOCKS(num_preds), CUDA_NUM_THREADS >> >(outer_dim, classes, inner_dim, y, scale); _SoftmaxDiv<float> << <GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, classes, inner_dim, scale, y); CUDA_POST_KERNEL_CHECK; } template <typename T> __global__ void _SoftmaxDot(const int outer_dim, const int classes, const int inner_dim, const T* dy, const T* y, T* scale) { CUDA_KERNEL_LOOP(idx, outer_dim * inner_dim) { int o_idx = idx / inner_dim; int i_idx = idx % inner_dim; T dot = 0; for (int c = 0; c < classes; c++) dot += (y[(o_idx * classes + c) * inner_dim + i_idx] * dy[(o_idx * classes + c) * inner_dim + i_idx]); scale[idx] = dot; } } template<> void SoftmaxGrad<float, CUDAContext>(const int count, const int classes, const int outer_dim, const int inner_dim, const float* sum_multiplier, const float* dy, const float* y, float* scale, float* dx) { const int num_preds = inner_dim * outer_dim; _SoftmaxDot<float> << <GET_BLOCKS(num_preds), CUDA_NUM_THREADS >> >(outer_dim, classes, inner_dim, dy, y, scale); _SoftmaxSubtract<float> << <GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, classes, inner_dim, scale, dx); math::Mul<float, CUDAContext>(count, dx, y, dx); CUDA_POST_KERNEL_CHECK; } /******************** activation.tanh ********************/ template <typename T> __global__ void _Tanh(const int count, const T* x, T* y) { CUDA_KERNEL_LOOP(i, count) { y[i] = std::tanh(x[i]); } } template<> void Tanh<float, CUDAContext>(const int count, const float* x, float* y) { _Tanh<float> << < GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, x, y); CUDA_POST_KERNEL_CHECK; } template <typename T> __global__ void _TanhGrad(const int count, const T* dy, const T* y, T* dx) { CUDA_KERNEL_LOOP(i, count) { dx[i] = dy[i] * (1 - y[i] * y[i]); } } template<> void TanhGrad<float, CUDAContext>(const int count, const float* dy, const float* y, float* dx) { _TanhGrad<float> << < GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, dy, y, dx); CUDA_POST_KERNEL_CHECK; } /******************** arithmetic.bias_add ********************/ template <typename T> __global__ void _BiasAdd_NCHW(const int count, const int dim, const int inner_dim, const T* bias, T* y) { CUDA_KERNEL_LOOP(idx, count) { const int bias_idx = (idx / inner_dim) % dim; y[idx] += bias[bias_idx]; } } template <typename T> __global__ void _BiasAdd_NHWC(const int count, const int dim, const int inner_dim, const T* bias, T* y) { CUDA_KERNEL_LOOP(idx, count) { y[idx] += bias[idx % dim]; } } template<> void BiasAdd<float, CUDAContext>(const int count, const int outer_dim, const int dim, const int inner_dim, const string& data_format, const float* bias, const float* bias_multiplier, float* y) { if (data_format == "NCHW") { _BiasAdd_NCHW<float> << <GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, dim, inner_dim, bias, y); } else if (data_format == "NHWC") { _BiasAdd_NHWC<float> << <GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, dim, inner_dim, bias, y); } else LOG(FATAL) << "Unknown data format: " << data_format; } /******************** arithmetic.clip ********************/ template <typename T> __global__ void _Clip(const int count, const T low, const T high, const T* x, T* mask, T* y) { CUDA_KERNEL_LOOP(idx, count) { mask[idx] = 1.0; if (x[idx] > high || x[idx] < low) mask[idx] = 0.0; y[idx] = x[idx] > high ? high : x[idx]; y[idx] = x[idx] < low ? low : x[idx]; } } template <> void Clip<float, CUDAContext>(const int count, const float low, const float high, const float* x, float* mask, float* y) { _Clip<float> << <GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, low, high, x, mask, y); } /******************** arithmetic.scale ********************/ template <typename T> __global__ void _ScaleWithoutBias(const int n, const T* x, const T* scale, const int scale_dim, const int inner_dim, T* y) { CUDA_KERNEL_LOOP(idx, n) { const int scale_idx = (idx / inner_dim) % scale_dim; y[idx] = x[idx] * scale[scale_idx]; } } template <typename T> __global__ void _ScaleWithBias(const int n, const T* x, const T* scale, const T* bias, const int scale_dim, const int inner_dim, T* y) { CUDA_KERNEL_LOOP(idx, n) { const int scale_idx = (idx / inner_dim) % scale_dim; y[idx] = x[idx] * scale[scale_idx] + bias[scale_idx]; } } template<> void Scale<float, CUDAContext>(const int axis, Tensor* x, Tensor* gamma, Tensor* beta, Tensor* BMul, Tensor* y) { const int count = x->count(); const int inner_dim = x->count(axis + gamma->ndim()); const int scale_dim = gamma->count(); auto* Xdata = x->data<float, CUDAContext>(); auto* Ydata = y->mutable_data<float, CUDAContext>(); auto* Sdata = gamma->data<float, CUDAContext>(); auto* Bdata = beta != nullptr ? beta->data<float, CUDAContext>() : nullptr; if (Bdata != nullptr) _ScaleWithBias<float> << <GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, Xdata, Sdata, Bdata, scale_dim, inner_dim, Ydata); else _ScaleWithoutBias<float> << <GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, Xdata, Sdata, scale_dim, inner_dim, Ydata); } #ifdef WITH_CUDA_FP16 template <typename T> __global__ void _ScaleWithoutBiasHalf(const int n, const half* x, const half* scale, const int scale_dim, const int inner_dim, half* y) { CUDA_KERNEL_LOOP(idx, n) { #if __CUDA_ARCH__ >= 530 const int scale_idx = (idx / inner_dim) % scale_dim; y[idx] = __hmul(x[idx], scale[scale_idx]); #endif } } template <typename T> __global__ void _ScaleWithBiasHalf(const int n, const half* x, const half* scale, const half* bias, const int scale_dim, const int inner_dim, half* y) { CUDA_KERNEL_LOOP(idx, n) { #if __CUDA_ARCH__ >= 530 const int scale_idx = (idx / inner_dim) % scale_dim; y[idx] = __hadd(__hmul(x[idx], scale[scale_idx]), bias[scale_idx]); #endif } } template<> void Scale<float16, CUDAContext>(const int axis, Tensor* x, Tensor* gamma, Tensor* beta, Tensor* BMul, Tensor* y) { const int count = x->count(); const int inner_dim = x->count(axis + gamma->ndim()); const int scale_dim = gamma->count(); auto* Xdata = x->data<float16, CUDAContext>(); auto* Ydata = y->mutable_data<float16, CUDAContext>(); auto* Sdata = gamma->data<float16, CUDAContext>(); auto* Bdata = beta != nullptr ? beta->data<float16, CUDAContext>() : nullptr; if (Bdata != nullptr) _ScaleWithBiasHalf<half> << <GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, reinterpret_cast<const half*>(Xdata), reinterpret_cast<const half*>(Sdata), reinterpret_cast<const half*>(Bdata), scale_dim, inner_dim, reinterpret_cast<half*>(Ydata)); else _ScaleWithoutBiasHalf<half> << <GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, reinterpret_cast<const half*>(Xdata), reinterpret_cast<const half*>(Sdata), scale_dim, inner_dim, reinterpret_cast<half*>(Ydata)); } #endif template <> void ScaleGrad<float, CUDAContext>(const int axis, Tensor* dy, Tensor* gamma, Tensor* dx) { const int count = dx->count(); const int inner_dim = dx->count(axis + gamma->ndim()); const int scale_dim = gamma->count(); auto* dYdata = dy->data<float, CUDAContext>(); auto* dXdata = dx->mutable_data<float, CUDAContext>(); auto* Sdata = gamma->data<float, CUDAContext>(); _ScaleWithoutBias<float> << <GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, dYdata, Sdata, scale_dim, inner_dim, dXdata); } /******************** cast.float2half ********************/ #ifdef WITH_CUDA_FP16 template <typename T> __global__ void _FloatToHalfKernel(const int count, const float* x, half* y) { CUDA_KERNEL_LOOP(idx, count) { y[idx] = __float2half(x[idx]); } } template <> void Float2Half<float, CUDAContext>(const int count, const float* x, float16* y) { _FloatToHalfKernel<float> << <GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, x, reinterpret_cast<half*>(y)); CUDA_POST_KERNEL_CHECK; } #endif /******************** control_flow.compare ********************/ template <typename T> __global__ void _Equal(const int count, const T* a, const T* b, T* y) { CUDA_KERNEL_LOOP(idx, count) { y[idx] = fabs(a[idx] - b[idx]) < FLT_EPSILON ? 1.0 : 0.0; } } template <> void Equal<float, CUDAContext>(const int count, const float* a, const float* b, float* y) { _Equal<float> << <GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, a, b, y); CUDA_POST_KERNEL_CHECK; } /******************** loss.l1_loss ********************/ template <typename T> __global__ void _AbsGrad(const int count, const T* dy, T* dx) { CUDA_KERNEL_LOOP(idx, count) { const T val = dy[idx]; // val > 0: 1 | val == 0: 0 | val < 0: -1 dx[idx] = (val > T(0)) - (val < T(0)); } } template<> void AbsGrad<float, CUDAContext>(const int count, const float* dy, float* dx) { _AbsGrad<float> << <GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, dy, dx); CUDA_POST_KERNEL_CHECK; } /******************** loss.sigmoid_cross_entropy ********************/ template <typename T> __global__ void _SigmoidCrossEntropy(const int count, const T* x, const T* target, T* loss, T* valid) { CUDA_KERNEL_LOOP(idx, count) { if (target[idx] < 0) { loss[idx] = 0.; valid[idx] = 0.; } else { loss[idx] = std::log(1 + std::exp(x[idx] - 2 * x[idx] * (x[idx] >= 0))) + x[idx] * ((x[idx] >= 0) - target[idx]); valid[idx] = 1.; } } } template <> void SigmoidCrossEntropy<float, CUDAContext>(const int count, const float* x, const float* target, float* loss, float* valid) { _SigmoidCrossEntropy<float> << <GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, x, target, loss, valid); CUDA_POST_KERNEL_CHECK; } template <typename T> __global__ void _SigmoidCrossEntropyGrad(const int count, const T* x, const T* target, T* dx, T* valid) { CUDA_KERNEL_LOOP(idx, count) { if (target[idx] < 0) { dx[idx] = 0.; valid[idx] = 0.; } else { dx[idx] = 1. / (1. + expf(-x[idx])) - target[idx]; valid[idx] = 1.; } } } template <> void SigmoidCrossEntropyGrad<float, CUDAContext>(const int count, const float* x, const float* target, float* dx, float* valid) { _SigmoidCrossEntropyGrad<float> << <GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, x, target, dx, valid); CUDA_POST_KERNEL_CHECK; } /******************** loss.smooth_l1_loss ********************/ template <typename T> __global__ void _SmoothL1(const int count, const float sigma2, const T* x, T* y) { CUDA_KERNEL_LOOP(idx, count) { const T val = x[idx]; const T abs_val = abs(val); if (abs_val < 1.0 / sigma2) y[idx] = 0.5 * val * val * sigma2; else y[idx] = abs_val - 0.5 / sigma2; } } template<> void SmoothL1<float, CUDAContext>(const int count, const float sigma2, const float* x, float* y) { _SmoothL1<float> << <GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, sigma2, x, y); CUDA_POST_KERNEL_CHECK; } template <typename T> __global__ void _SmoothL1Grad(const int count, const float sigma2, const T* dy, T* dx) { CUDA_KERNEL_LOOP(idx, count) { const T val = dy[idx]; const T abs_val = abs(val); if (abs_val < 1.0 / sigma2) dx[idx] = val * sigma2; // val > 0: 1 | val == 0: 0 | val < 0: -1 else dx[idx] = (val > T(0)) - (val < T(0)); } } template<> void SmoothL1Grad<float, CUDAContext>(const int count, const float sigma2, const float* dy, float* dx) { _SmoothL1Grad<float> << <GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, sigma2, dy, dx); CUDA_POST_KERNEL_CHECK; } /******************** loss.softmax_cross_entropy ********************/ template <typename T> __global__ void _SoftmaxCrossEntropy(const int count, const T* prob, const T* target, T* loss) { CUDA_KERNEL_LOOP(idx, count) { loss[idx] = -target[idx] * log(max(prob[idx], FLT_MIN)); } } template <> void SoftmaxCrossEntropy<float, CUDAContext>(const int count, const float* prob, const float* target, float* loss) { _SoftmaxCrossEntropy<float> << <GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, prob, target, loss); CUDA_POST_KERNEL_CHECK; } /******************** loss.sparse_softmax_cross_entropy ********************/ template <typename T> __global__ void _SparseSoftmaxCrossEntropy(const int count, const T* prob, const T* labels, T* loss, const int classes, const int inner_dim, const int* ignores, const int ignore_num, T* valid) { CUDA_KERNEL_LOOP(idx, count) { const int o_idx = idx / inner_dim; const int i_idx = idx % inner_dim; const int label = labels[o_idx * inner_dim + i_idx]; int k; for (k = 0; k < ignore_num; k++) { if (label == ignores[k]) { loss[idx] = valid[idx] = 0; break; } } if (k == ignore_num) { loss[idx] = -log(max(prob[(o_idx * classes + label) * inner_dim + i_idx], FLT_MIN)); valid[idx] = 1; } } } template <> void SparseSoftmaxCrossEntropy<float, CUDAContext>(const int count, const int classes, const int outer_dim, const int inner_dim, const float* prob, const float* labels, float* loss, float* valid, Tensor* ignore) { const int* ignores = ignore->count() > 0 ? ignore->data<int, CUDAContext>() : nullptr; const int num_preds = outer_dim * inner_dim; _SparseSoftmaxCrossEntropy<float> << <GET_BLOCKS(num_preds), CUDA_NUM_THREADS >> >(num_preds, prob, labels, loss, classes, inner_dim, ignores, ignore->count(), valid); CUDA_POST_KERNEL_CHECK; } template <typename T> __global__ void _SparseSoftmaxCrossEntropyGrad(const int count, const T* prob, const T* labels, T* dx, const int classes, const int inner_dim, const int* ignores, const int ignore_num, T* valid) { CUDA_KERNEL_LOOP(idx, count) { const int o_idx = idx / inner_dim; const int i_idx = idx % inner_dim; const int label = labels[o_idx * inner_dim + i_idx]; int k; for (k = 0; k < ignore_num; k++) if (label == ignores[k]) break; if (k != ignore_num) { for (int c = 0; c < classes; c++) dx[(o_idx * classes + c) * inner_dim + i_idx] = 0; valid[idx] = 0; } else { dx[(o_idx * classes + label) * inner_dim + i_idx] -= 1; valid[idx] = 1; } } } template<> void SparseSoftmaxCrossEntropyGrad<float, CUDAContext>(const int count, const int classes, const int outer_dim, const int inner_dim, const float* prob, const float* labels, float* valid, Tensor* ignore, float* dXdata) { const int* ignores = ignore->count() > 0 ? ignore->data <int, CUDAContext >() : nullptr; const int num_preds = outer_dim * inner_dim; _SparseSoftmaxCrossEntropyGrad<float> << <GET_BLOCKS(num_preds), CUDA_NUM_THREADS >> >(num_preds, prob, labels, dXdata, classes, inner_dim, ignores, ignore->count(), valid); CUDA_POST_KERNEL_CHECK; } /******************** loss.sparse_softmax_focal_loss ********************/ template <typename T> __global__ void _SparseSoftmaxFocalScale(const int count, const float gamma, const T* prob, T* scale) { CUDA_KERNEL_LOOP(idx, count) { scale[idx] = std::pow((1.0f - prob[idx]), gamma); } } template <typename T> __global__ void _SparseSoftmaxFocalLoss(const int count, const float pos_alpha, const float neg_alpha, const int neg_id, T* scale, const T* prob, const T* labels, T* loss, const int classes, const int inner_dim, const int* ignores, const int ignore_num, T* valid) { CUDA_KERNEL_LOOP(idx, count) { const int o_idx = idx / inner_dim; const int i_idx = idx % inner_dim; const int label = labels[o_idx * inner_dim + i_idx]; int k; for (k = 0; k < ignore_num; k++) { if (label == ignores[k]) { loss[idx] = valid[idx] = 0; break; } } if (k == ignore_num) { const int t_ = (o_idx * classes + label) * inner_dim + i_idx; scale[t_] = label > neg_id ? pos_alpha * scale[t_] : neg_alpha * scale[t_]; loss[idx] = -scale[t_] * std::log(max(prob[t_], FLT_MIN)); valid[idx] = label > neg_id ? 1 : 0; } } } template <> void SparseSoftmaxFocalLoss<float, CUDAContext>(const int count, const int classes, const int outer_dim, const int inner_dim, const float pos_alpha, const float neg_alpha, const float gamma, const int neg_id, const float* prob, const float* labels, float* scale, float* loss, float* valid, Tensor* ignore) { const int* ignores = ignore->count() > 0 ? ignore->data<int, CUDAContext>() : nullptr; const int num_preds = outer_dim * inner_dim; _SparseSoftmaxFocalScale<float> << <GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, gamma, prob, scale); _SparseSoftmaxFocalLoss<float> << <GET_BLOCKS(num_preds), CUDA_NUM_THREADS >> >(num_preds, pos_alpha, neg_alpha, neg_id, scale, prob, labels, loss, classes, inner_dim, ignores, ignore->count(), valid); CUDA_POST_KERNEL_CHECK; } template <typename T> __global__ void _SparseSoftmaxFocalLossGrad(const int count, const float gamma, const int neg_id, const float eps, const T* scale, const T* prob, const T* labels, T* dx, const int classes, const int inner_dim, const int* ignores, const int ignore_num, T* valid) { CUDA_KERNEL_LOOP(idx, count) { const int o_idx = idx / inner_dim; const int i_idx = idx % inner_dim; const int label = labels[o_idx * inner_dim + i_idx]; int k; for (k = 0; k < ignore_num; k++) if (label == ignores[k]) break; if (k != ignore_num) { for (int c = 0; c < classes; c++) dx[(o_idx * classes + c) * inner_dim + i_idx] = 0; valid[idx] = 0; } else { const int t_ = (o_idx * classes + label) * inner_dim + i_idx; T grad = -gamma * (scale[t_] / max((1.0f - prob[t_]), eps)) * std::log(max(prob[t_], FLT_MIN)) * prob[t_] + scale[t_]; for (int c = 0; c < classes; c++) { const int i_ = (o_idx * classes + c) * inner_dim + i_idx; if (c == label) { dx[i_] = grad * (prob[t_] - 1); } else { dx[i_] = grad * prob[i_]; } } valid[idx] = label > neg_id ? 1 : 0; } } } template<> void SparseSoftmaxFocalLossGrad<float, CUDAContext>(const int count, const int classes, const int outer_dim, const int inner_dim, const float gamma, const int neg_id, const float eps, const float* scale, const float* prob, const float* labels, float* valid, Tensor* ignore, float* dXdata) { const int* ignores = ignore->count() > 0 ? ignore->data <int, CUDAContext >() : nullptr; const int num_preds = outer_dim * inner_dim; _SparseSoftmaxFocalLossGrad<float> << <GET_BLOCKS(num_preds), CUDA_NUM_THREADS >> >(num_preds, gamma, neg_id, eps, scale, prob, labels, dXdata, classes, inner_dim, ignores, ignore->count(), valid); CUDA_POST_KERNEL_CHECK; } /******************** misc.image_data ********************/ template <typename Tx, typename Ty> __global__ void _ImageData_NCHW(const int count, const int N, const int C, const int H, const int W, const float* mean_values, const float* std_values, const Tx* x, Ty* y) { CUDA_KERNEL_LOOP(idx, count) { const int w = idx % W; const int h = (idx / W) % H; const int c = (idx / W / H) % C; const int n = idx / W / H / C; Ty raw_value = x[((n * H + h) * W + w) * C + c]; if (mean_values != nullptr) raw_value -= mean_values[c]; if (std_values != nullptr) raw_value /= std_values[c]; y[idx] = raw_value; } } template <typename Tx, typename Ty> __global__ void _ImageData_NHWC(const int count, const int N, const int C, const int H, const int W, const float* mean_values, const float* std_values, const Tx* x, Ty* y) { CUDA_KERNEL_LOOP(idx, count) { const int c = idx % C; Ty raw_value = x[idx]; if (mean_values != nullptr) raw_value -= mean_values[c]; if (std_values != nullptr) raw_value /= std_values[c]; y[idx] = raw_value; } } template <typename Tx, typename Ty> __global__ void _ImageDataHalf_NCHW(const int count, const int N, const int C, const int H, const int W, const float* mean_values, const float* std_values, const Tx* x, Ty* y) { CUDA_KERNEL_LOOP(idx, count) { const int w = idx % W; const int h = (idx / W) % H; const int c = (idx / W / H) % C; const int n = idx / W / H / C; float raw_value = x[((n * H + h) * W + w) * C + c]; if (mean_values != nullptr) raw_value -= mean_values[c]; if (std_values != nullptr) raw_value /= std_values[c]; y[idx] = __float2half(raw_value); } } template <typename Tx, typename Ty> __global__ void _ImageDataHalf_NHWC(const int count, const int N, const int C, const int H, const int W, const float* mean_values, const float* std_values, const Tx* x, Ty* y) { CUDA_KERNEL_LOOP(idx, count) { const int c = idx % C; float raw_value = x[idx]; if (mean_values != nullptr) raw_value -= mean_values[c]; if (std_values != nullptr) raw_value /= std_values[c]; y[idx] = __float2half(raw_value); } } template <> void ImageData<float, float, CUDAContext>(const int count, const int N, const int C, const int H, const int W, const float* mean_values, const float* std_values, const string& data_format, const float* x, float* y) { if (data_format == "NCHW") { _ImageData_NCHW<float, float> << <GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, N, C, H, W, mean_values, std_values, x, y); } else if (data_format == "NHWC") { _ImageData_NHWC<float, float> << <GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, N, C, H, W, mean_values, std_values, x, y); } else LOG(FATAL) << "Unknown data format: " << data_format; CUDA_POST_KERNEL_CHECK; } template <> void ImageData<uint8_t, float, CUDAContext>(const int count, const int N, const int C, const int H, const int W, const float* mean_values, const float* std_values, const string& data_format, const uint8_t* x, float* y) { if (data_format == "NCHW") { _ImageData_NCHW<uint8_t, float> << <GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, N, C, H, W, mean_values, std_values, x, y); } else if (data_format == "NHWC") { _ImageData_NHWC<uint8_t, float> << <GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, N, C, H, W, mean_values, std_values, x, y); } else LOG(FATAL) << "Unknown data format: " << data_format; CUDA_POST_KERNEL_CHECK; } #ifdef WITH_CUDA_FP16 template <> void ImageData<float, float16, CUDAContext>(const int count, const int N, const int C, const int H, const int W, const float* mean_values, const float* std_values, const string& data_format, const float* x, float16* y) { if (data_format == "NCHW") { _ImageDataHalf_NCHW<float, half> << <GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, N, C, H, W, mean_values, std_values, x, reinterpret_cast<half*>(y)); } else if (data_format == "NHWC") { _ImageDataHalf_NHWC<float, half> << <GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, N, C, H, W, mean_values, std_values, x, reinterpret_cast<half*>(y)); } else LOG(FATAL) << "Unknown data format: " << data_format; CUDA_POST_KERNEL_CHECK; } template <> void ImageData<uint8_t, float16, CUDAContext>(const int count, const int N, const int C, const int H, const int W, const float* mean_values, const float* std_values, const string& data_format, const uint8_t* x, float16* y) { if (data_format == "NCHW") { _ImageDataHalf_NCHW<uint8_t, half> << <GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, N, C, H, W, mean_values, std_values, x, reinterpret_cast<half*>(y)); } else if (data_format == "NHWC") { _ImageDataHalf_NHWC<uint8_t, half> << <GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, N, C, H, W, mean_values, std_values, x, reinterpret_cast<half*>(y)); } else LOG(FATAL) << "Unknown data format: " << data_format; CUDA_POST_KERNEL_CHECK; } #endif /******************** ndarray.argmax ********************/ template <typename T> __global__ void _Arange(const int count, const int start, const int step, T* y) { CUDA_KERNEL_LOOP(idx, count) { y[idx] = start + idx * step; } } template<> void Arange<float, CUDAContext>(const int count, const int start, const int step, float* y) { _Arange<float> << <GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, start, step, y); CUDA_POST_KERNEL_CHECK; } template<> void Arange<int, CUDAContext>(const int count, const int start, const int step, int* y) { _Arange<int> << <GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, start, step, y); CUDA_POST_KERNEL_CHECK; } /******************** ndarray.argmax ********************/ template <typename T> __global__ void _Argmax(const int count, const int axis_dim, const int inner_dim, const T* x, T* y) { CUDA_KERNEL_LOOP(idx, count) { T max_val = -FLT_MAX; int max_idx = -1; for (int j = 0; j < axis_dim; ++j) { const T val = x[(idx / inner_dim * axis_dim + j) * inner_dim + idx % inner_dim]; if (val > max_val) { max_val = val; max_idx = j; } } y[idx] = max_idx; } } template<> void Argmax<float, CUDAContext>(const int count, const int axis_dim, const int inner_dim, const int top_k, const float* x, float* y) { CHECK_EQ(top_k, 1) << "top_k > 1 is not supported with CUDA"; _Argmax<float> << <GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, axis_dim, inner_dim, x, y); CUDA_POST_KERNEL_CHECK; } /******************** ndarray.argmin ********************/ template <typename T> __global__ void _Argmin(const int count, const int axis_dim, const int inner_dim, const T* x, T* y) { CUDA_KERNEL_LOOP(idx, count) { T min_val = FLT_MAX; int min_idx = -1; for (int j = 0; j < axis_dim; ++j) { const T val = x[(idx / inner_dim * axis_dim + j) * inner_dim + idx % inner_dim]; if (val < min_val) { min_val = val; min_idx = j; } } y[idx] = min_idx; } } template<> void Argmin<float, CUDAContext>(const int count, const int axis_dim, const int inner_dim, const int top_k, const float* x, float* y) { CHECK_EQ(top_k, 1) << "top_k > 1 is not supported with CUDA"; _Argmin<float> << <GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, axis_dim, inner_dim, x, y); CUDA_POST_KERNEL_CHECK; } /******************** ndarray.gather ********************/ template <typename T> __global__ void _CanonicalAxis(const int count, const int dim, T* y) { CUDA_KERNEL_LOOP(idx, count) { if (y[idx] < 0) y[idx] += dim; } } template <> void CanonicalAxis<int, CUDAContext>(const int count, const int dim, int* y) { _CanonicalAxis<int> << <GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, dim, y); CUDA_POST_KERNEL_CHECK; } template <typename T> __global__ void _Gather(const int count, const int outer_dim, const int inner_dim, const int x_slice_dim, const int y_slice_dim, const int* indices, const T* x, T* y) { CUDA_KERNEL_LOOP(idx, count) { const int outer_idx = idx / inner_dim / y_slice_dim; const int slice_idx = idx % inner_dim; const int y_idx_offset = (idx / inner_dim) % y_slice_dim; const int x_idx_offset = indices[y_idx_offset]; const int x_idx = (outer_idx * x_slice_dim + x_idx_offset) * inner_dim + slice_idx; y[idx] = x[x_idx]; } } template <> void Gather<float, CUDAContext>(const int count, const int outer_dim, const int inner_dim, const int x_slice_dim, const int y_slice_dim, const int* indices, const float* x, float* y, CUDAContext* context) { _Gather<float> << <GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, outer_dim, inner_dim, x_slice_dim, y_slice_dim, indices, x, y); CUDA_POST_KERNEL_CHECK; } template <> void Gather<int, CUDAContext>(const int count, const int outer_dim, const int inner_dim, const int x_slice_dim, const int y_slice_dim, const int* indices, const int* x, int* y, CUDAContext* context) { _Gather<int> << <GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, outer_dim, inner_dim, x_slice_dim, y_slice_dim, indices, x, y); CUDA_POST_KERNEL_CHECK; } template <typename T> __global__ void _GatherGrad(const int count, const int outer_dim, const int inner_dim, const int x_slice_dim, const int y_slice_dim, const int* indices, const T* dy, T* dx) { CUDA_KERNEL_LOOP(idx, count) { const int outer_idx = idx / inner_dim / y_slice_dim; const int slice_idx = idx % inner_dim; const int y_idx_offset = (idx / inner_dim) % y_slice_dim; const int x_idx_offset = indices[y_idx_offset]; const int x_idx = (outer_idx * x_slice_dim + x_idx_offset) * inner_dim + slice_idx; atomicAdd(dx + x_idx, dy[idx]); } } template <> void GatherGrad<float, CUDAContext>(const int count, const int outer_dim, const int inner_dim, const int x_slice_dim, const int y_slice_dim, const int* indices, const float* dy, float* dx) { _GatherGrad<float> << <GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, outer_dim, inner_dim, x_slice_dim, y_slice_dim, indices, dy, dx); CUDA_POST_KERNEL_CHECK; } template <> void GatherGrad<int, CUDAContext>(const int count, const int outer_dim, const int inner_dim, const int x_slice_dim, const int y_slice_dim, const int* indices, const int* dy, int* dx) { _GatherGrad<int> << <GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, outer_dim, inner_dim, x_slice_dim, y_slice_dim, indices, dy, dx); CUDA_POST_KERNEL_CHECK; } /******************** ndarray.concat ********************/ template <typename T> __global__ void _Concat(const int count, const int outer_dim, const int inner_dim, const int x_concat_dim, const int y_concat_dim, const int concat_offset, const T* x, T* y) { CUDA_KERNEL_LOOP(idx, count) { const int tmp = x_concat_dim * inner_dim; const int outer_idx = idx / tmp; const int concat_idx = idx % tmp; const int y_idx = (outer_idx * y_concat_dim + concat_offset) * inner_dim + concat_idx; y[y_idx] = x[idx]; } } template <> void Concat<float, CUDAContext>(const int count, const int outer_dim, const int inner_dim, const int x_concat_dim, const int y_concat_dim, const int concat_offset, const float* x, float* y, CUDAContext* context) { _Concat<float> << <GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, outer_dim, inner_dim, x_concat_dim, y_concat_dim, concat_offset, x, y); CUDA_POST_KERNEL_CHECK; } #ifdef WITH_CUDA_FP16 template <> void Concat<float16, CUDAContext>(const int count, const int outer_dim, const int inner_dim, const int x_concat_dim, const int y_concat_dim, const int concat_offset, const float16* x, float16* y, CUDAContext* context) { _Concat<half> << <GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, outer_dim, inner_dim, x_concat_dim, y_concat_dim, concat_offset, reinterpret_cast<const half*>(x), reinterpret_cast<half*>(y)); CUDA_POST_KERNEL_CHECK; } #endif template <typename T> __global__ void _ConcatGrad(const int count, const int outer_dim, const int inner_dim, const int x_concat_dim, const int y_concat_dim, const int concat_offset, const T* dy, T* dx) { CUDA_KERNEL_LOOP(idx, count) { const int tmp = x_concat_dim * inner_dim; const int outer_idx = idx / tmp; const int concat_idx = idx % tmp; const int y_idx = (outer_idx * y_concat_dim + concat_offset) * inner_dim + concat_idx; dx[idx] = dy[y_idx]; } } template <> void ConcatGrad<float, CUDAContext>(const int count, const int outer_dim, const int inner_dim, const int x_concat_dim, const int y_concat_dim, const int concat_offset, const float* dy, float* dx, CUDAContext* context) { _ConcatGrad<float> << <GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, outer_dim, inner_dim, x_concat_dim, y_concat_dim, concat_offset, dy, dx); CUDA_POST_KERNEL_CHECK; } #ifdef WITH_CUDA_FP16 template <> void ConcatGrad<float16, CUDAContext>(const int count, const int outer_dim, const int inner_dim, const int x_concat_dim, const int y_concat_dim, const int concat_offset, const float16* dy, float16* dx, CUDAContext* context) { _ConcatGrad<half> << <GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, outer_dim, inner_dim, x_concat_dim, y_concat_dim, concat_offset, reinterpret_cast<const half*>(dy), reinterpret_cast<half*>(dx)); CUDA_POST_KERNEL_CHECK; } #endif /******************** ndarray.crop ********************/ template<typename T> __global__ void _Crop1D(const int count, const int dim, const int ex_dim, const int inner_dim, const int start, const T* x, T* y) { CUDA_KERNEL_LOOP(idx, count) { const int i = idx % inner_dim; const int ex_d = (idx / inner_dim) % ex_dim; const int o = idx / inner_dim / ex_dim; y[idx] = x[(o * dim + ex_d + start) * inner_dim + i]; } } template<> void Crop1D<float, CUDAContext>(const int count, const int dim, const int ex_dim, const int inner_dim, const int start, const float* x, float* y, CUDAContext* context) { _Crop1D<float> << <GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, dim, ex_dim, inner_dim, start, x, y); CUDA_POST_KERNEL_CHECK; } template<typename T> __global__ void _Crop1DGrad(const int count, const int dim, const int ex_dim, const int inner_dim, const int start, const int end, const T* dy, T* dx) { CUDA_KERNEL_LOOP(idx, count) { const int i = idx % inner_dim; const int d = (idx / inner_dim) % dim; const int o = idx / inner_dim / dim; if (d >= start && d < end) dx[idx] = dy[(o * ex_dim + d - start) * inner_dim + i]; } } template<> void Crop1DGrad<float, CUDAContext>(const int count, const int dim, const int ex_dim, const int inner_dim, const int start, const int end, const float* dy, float* dx, CUDAContext* context) { _Crop1DGrad<float> << <GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, dim, ex_dim, inner_dim, start, end, dy, dx); CUDA_POST_KERNEL_CHECK; } /******************** ndarray.pad ********************/ template <typename T> __global__ void _ConstPad1D(const int count, const int dim, const int ex_dim, const int inner_dim, const int pad_l, const T value, const T* x, T* y) { CUDA_KERNEL_LOOP(idx, count) { const int i = idx % inner_dim; const int ex_d = (idx / inner_dim) % ex_dim; const int o = idx / inner_dim / ex_dim; const int d = ex_d - pad_l; y[idx] = (d < 0 || d >= dim) ? value : x[(o * dim + d) * inner_dim + i]; } } template <> void ConstPad1D<float, CUDAContext>(const int count, const int dim, const int ex_dim, const int inner_dim, const int pad_l, const float value, const float* x, float* y, CUDAContext* context) { _ConstPad1D<float> << <GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, dim, ex_dim, inner_dim, pad_l, value, x, y); } template <typename T> __global__ void _ReflectPad1D(const int count, const int dim, const int ex_dim, const int inner_dim, const int pad_l, const T* x, T* y) { CUDA_KERNEL_LOOP(idx, count) { const int i = idx % inner_dim; const int ex_d = (idx / inner_dim) % ex_dim; const int o = idx / inner_dim / ex_dim; int d = ex_d - pad_l; d = max(d, -d); d = min(d, 2 * dim - d - 2); y[idx] = x[(o * dim + d) * inner_dim + i]; } } template <> void ReflectPad1D<float, CUDAContext>(const int count, const int dim, const int ex_dim, const int inner_dim, const int pad_l, const float* x, float* y, CUDAContext* context) { _ReflectPad1D<float> << <GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, dim, ex_dim, inner_dim, pad_l, x, y); } template <typename T> __global__ void _EdgePad1D(const int count, const int dim, const int ex_dim, const int inner_dim, const int pad_l, const T* x, T* y) { CUDA_KERNEL_LOOP(idx, count) { const int i = idx % inner_dim; const int ex_d = (idx / inner_dim) % ex_dim; const int o = idx / inner_dim / ex_dim; const int d = min(dim - 1, max(ex_d - pad_l, 0)); y[idx] = x[(o * dim + d) * inner_dim + i]; } } template <> void EdgePad1D<float, CUDAContext>(const int count, const int dim, const int ex_dim, const int inner_dim, const int pad_l, const float* x, float* y, CUDAContext* context) { _EdgePad1D<float> << <GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, dim, ex_dim, inner_dim, pad_l, x, y); } template <typename T> __global__ void _ConstPad1DGrad(const int count, const int dim, const int ex_dim, const int inner_dim, const int pad_l, const T* dy, T* dx) { CUDA_KERNEL_LOOP(idx, count) { const int i = idx % inner_dim; const int ex_d = (idx / inner_dim) % dim + pad_l; const int o = idx / inner_dim / dim; dx[idx] = dy[(o * ex_dim + ex_d) * inner_dim + i]; } } template <> void ConstPad1DGrad<float, CUDAContext>(const int count, const int dim, const int ex_dim, const int inner_dim, const int pad_l, const float* dy, float* dx, CUDAContext* context) { _ConstPad1DGrad<float> << <GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, dim, ex_dim, inner_dim, pad_l, dy, dx); } template <typename T> __global__ void _ReflectPad1DGrad(const int count, const int dim, const int ex_dim, const int inner_dim, const int pad_l, const T* dy, T* dx) { CUDA_KERNEL_LOOP(idx, count) { const int i = idx % inner_dim; const int ex_d = (idx / inner_dim) % ex_dim; const int o = idx / inner_dim / ex_dim; int d = ex_d - pad_l; d = max(d, -d); d = min(d, 2 * dim - d - 2); atomicAdd(&dx[(o * dim + d) * inner_dim + i], dy[idx]); } } template <> void ReflectPad1DGrad<float, CUDAContext>(const int count, const int dim, const int ex_dim, const int inner_dim, const int pad_l, const float* dy, float* dx) { _ReflectPad1DGrad<float> << <GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, dim, ex_dim, inner_dim, pad_l, dy, dx); } template <typename T> __global__ void _EdgePad1DGrad(const int count, const int dim, const int ex_dim, const int inner_dim, const int pad_l, const T* dy, T* dx) { CUDA_KERNEL_LOOP(idx, count) { const int i = idx % inner_dim; const int ex_d = (idx / inner_dim) % ex_dim; const int o = idx / inner_dim / ex_dim; const int d = min(dim - 1, max(ex_d - pad_l, 0)); atomicAdd(&dx[(o * dim + d) * inner_dim + i], dy[idx]); } } template <> void EdgePad1DGrad<float, CUDAContext>(const int count, const int dim, const int ex_dim, const int inner_dim, const int pad_l, const float* dy, float* dx, CUDAContext* context) { _EdgePad1DGrad<float> << <GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, dim, ex_dim, inner_dim, pad_l, dy, dx); } /******************** ndarray.one_hot ********************/ template <typename T> __global__ void _OneHot(const int count, const int depth, const int on_value, const float* x, float* y) { CUDA_KERNEL_LOOP(idx, count) { const int val = x[idx]; y[idx * depth + val] = on_value; } } template <> void OneHot<float, CUDAContext>(const int count, const int depth, const int on_value, const float* x, float* y) { _OneHot<float> << <GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, depth, on_value, x, y); CUDA_POST_KERNEL_CHECK; } /******************** ndarray.reduce ********************/ template <typename T> __global__ void _Sum(const int count, const int axis_dim, const int inner_dim, const T* x, float* y) { CUDA_KERNEL_LOOP(idx, count) { T sum_val = 0.0; for (int j = 0; j < axis_dim; j++) sum_val += x[(idx / inner_dim * axis_dim + j) * inner_dim + idx % inner_dim]; y[idx] = sum_val; } } template<> void Sum<float, CUDAContext>(const int count, const int axis_dim, const int inner_dim, const float* x, float* y) { _Sum<float> << <GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, axis_dim, inner_dim, x, y); CUDA_POST_KERNEL_CHECK; } template <typename T> __global__ void _SumGrad(const int count, const int axis_dim, const int inner_dim, const T coeff, const T* dy, float* dx) { CUDA_KERNEL_LOOP(idx, count) { for (int j = 0; j < axis_dim; j++) dx[(idx / inner_dim * axis_dim + j) * inner_dim + idx % inner_dim] = dy[idx] * coeff; } } template<> void SumGrad<float, CUDAContext>(const int count, const int axis_dim, const int inner_dim, const float coeff, const float* dy, float* dx) { _SumGrad<float> << <GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, axis_dim, inner_dim, coeff, dy, dx); CUDA_POST_KERNEL_CHECK; } /******************** ndarray.repeat ********************/ template <typename T> __global__ void _Repeat(const int count, const int inner_dim, const int repeats, const int dim, const T* x, T* y) { CUDA_KERNEL_LOOP(idx, count) { const int d = idx % inner_dim; const int b = (idx / inner_dim / repeats) % dim; const int n = idx / inner_dim / repeats / dim; const int x_idx = (n * dim + b) * inner_dim + d; y[idx] = x[x_idx]; } } template <> void Repeat<float, CUDAContext>(const int count, const int outer_dim, const int dim, const int inner_dim, const int repeats, const float* x, float* y, CUDAContext* context) { _Repeat<float> << <GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, inner_dim, repeats, dim, x, y); CUDA_POST_KERNEL_CHECK; } template <typename T> __global__ void _RepeatGrad(const int count, const int inner_dim, const int repeats, const int dim, const T* dy, T* dx) { CUDA_KERNEL_LOOP(idx, count) { const int d = idx % inner_dim; const int b = (idx / inner_dim) % dim; const int n = idx / inner_dim / dim; T gradient = 0; for (int t = 0; t < repeats; t++) gradient += dy[(((n * dim + b) * repeats) + t) * inner_dim + d]; dx[idx] = gradient; } } template <> void RepeatGrad<float, CUDAContext>(const int count, const int outer_dim, const int dim, const int inner_dim, const int repeats, const float* dy, float* dx, CUDAContext* context) { _RepeatGrad<float> << <GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, inner_dim, repeats, dim, dy, dx); CUDA_POST_KERNEL_CHECK; } /******************** ndarray.slice ********************/ template <typename T> __global__ void _Slice(const int count, const int outer_dim, const int inner_dim, const int x_slice_dim, const int y_slice_dim, const int slice_offset, const T* x, T* y) { CUDA_KERNEL_LOOP(idx, count) { const int tmp = y_slice_dim * inner_dim; const int outer_idx = idx / tmp; const int slice_idx = idx % tmp; const int x_idx = (outer_idx * x_slice_dim + slice_offset) * inner_dim + slice_idx; y[idx] = x[x_idx]; } } template <> void Slice<float, CUDAContext>(const int count, const int outer_dim, const int inner_dim, const int x_slice_dim, const int y_slice_dim, const int slice_offset, const float* x, float* y, CUDAContext* context) { _Slice<float> << <GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, outer_dim, inner_dim, x_slice_dim, y_slice_dim, slice_offset, x, y); CUDA_POST_KERNEL_CHECK; } template <typename T> __global__ void _SliceGrad(const int count, const int outer_dim, const int inner_dim, const int x_slice_dim, const int y_slice_dim, const int slice_offset, const T* dy, T* dx) { CUDA_KERNEL_LOOP(idx, count) { const int tmp = y_slice_dim * inner_dim; const int outer_idx = idx / tmp; const int slice_idx = idx % tmp; const int x_idx = (outer_idx * x_slice_dim + slice_offset) * inner_dim + slice_idx; dx[x_idx] = dy[idx]; } } template <> void SliceGrad<float, CUDAContext>(const int count, const int outer_dim, const int inner_dim, const int x_slice_dim, const int y_slice_dim, const int slice_offset, const float* dy, float* dx, CUDAContext* context) { _SliceGrad<float> << <GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, outer_dim, inner_dim, x_slice_dim, y_slice_dim, slice_offset, dy, dx); CUDA_POST_KERNEL_CHECK; } /******************** ndarray.tile ********************/ template <typename T> __global__ void _Tile(const int count, const int ex_inner_dim, const int multiple, const T* x, T* y) { CUDA_KERNEL_LOOP(idx, count) { const int d = idx % ex_inner_dim; const int n = idx / ex_inner_dim / multiple; const int x_idx = n * ex_inner_dim + d; y[idx] = x[x_idx]; } } template <> void Tile<float, CUDAContext>(const int count, const int outer_dim, const int ex_inner_dim, const int multiple, const float* x, float* y, CUDAContext* context) { _Tile<float> << <GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, ex_inner_dim, multiple, x, y); CUDA_POST_KERNEL_CHECK; } template <typename T> __global__ void _TileGrad(const int count, const int ex_inner_dim, const int multiple, const T* dy, T* dx) { CUDA_KERNEL_LOOP(idx, count) { const int d = idx % ex_inner_dim; const int n = idx / ex_inner_dim; T gradient = 0; for (int t = 0; t < multiple; t++) gradient += dy[(n * multiple + t) * ex_inner_dim + d]; dx[idx] = gradient; } } template <> void TileGrad<float, CUDAContext>(const int count, const int outer_dim, const int ex_inner_dim, const int multiple, const float* dy, float* dx, CUDAContext* context) { _TileGrad<float> << <GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, ex_inner_dim, multiple, dy, dx); CUDA_POST_KERNEL_CHECK; } /******************** ndarray.transpose ********************/ template <typename T> __global__ void _Transpose(const int count, const int ndim, const int* order, const int* old_steps, const int* new_steps, const T* x, T* y) { CUDA_KERNEL_LOOP(idx, count) { int x_idx = 0, y_idx = idx; for (int j = 0; j < ndim; ++j) { int k = order[j]; x_idx += (y_idx / new_steps[j]) * old_steps[k]; y_idx %= new_steps[j]; } y[idx] = x[x_idx]; } } template <> void Transpose<float, CUDAContext>(const int count, const int ndim, const int* order, const int* old_steps, const int* new_steps, const float* x, float* y) { _Transpose<float> << <GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, ndim, order, old_steps, new_steps, x, y); CUDA_POST_KERNEL_CHECK; } #ifdef WITH_CUDA_FP16 template <> void Transpose<float16, CUDAContext>(const int count, const int ndim, const int* order, const int* old_steps, const int* new_steps, const float16* x, float16* y) { _Transpose<half> << <GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, ndim, order, old_steps, new_steps, reinterpret_cast<const half*>(x), reinterpret_cast<half*>(y)); CUDA_POST_KERNEL_CHECK; } #endif template <typename T> __global__ void _TransposeGrad(const int count, const int ndim, const int* order, const int* old_steps, const int* new_steps, const T* dy, T* dx) { CUDA_KERNEL_LOOP(idx, count) { int x_idx = 0, y_idx = idx; for (int j = 0; j < ndim; ++j) { int k = order[j]; x_idx += (y_idx / new_steps[j]) * old_steps[k]; y_idx %= new_steps[j]; } dx[x_idx] = dy[idx]; } } template <> void TransposeGrad<float, CUDAContext>(const int count, const int ndim, const int* order, const int* old_steps, const int* new_steps, const float* dy, float* dx) { _TransposeGrad<float> << <GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, ndim, order, old_steps, new_steps, dy, dx); CUDA_POST_KERNEL_CHECK; } #ifdef WITH_CUDA_FP16 template <> void TransposeGrad<float16, CUDAContext>(const int count, const int ndim, const int* order, const int* old_steps, const int* new_steps, const float16* dy, float16* dx) { _TransposeGrad<half> << <GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, ndim, order, old_steps, new_steps, reinterpret_cast<const half*>(dy), reinterpret_cast<half*>(dx)); CUDA_POST_KERNEL_CHECK; } #endif /******************** recurrent.lstm_uint ********************/ template <typename T> __global__ void _LSTMUnitAct(const int count, const int channels, const int g_offset, const int x_offset, const T* x, T* x_act) { CUDA_KERNEL_LOOP(idx, count) { const int ch_4 = idx % x_offset; if (ch_4 < g_offset) x_act[idx] = _SigmoidUnit<float>(x[idx]); else x_act[idx] = std::tanh(x[idx]); } } template <typename T> __global__ void _LSTMUnit(const int count, const int channels, const int o_offset, const int g_offset, const int x_offset, const T* c_1, T* x_act, const T* cont, T* c, T* h) { CUDA_KERNEL_LOOP(idx, count) { const int n = idx / channels; const int ch = idx % channels; T* x_act_ = x_act + n * x_offset; const T i = x_act_[ch]; if (cont != nullptr && cont[n] != T(1)) x_act_[channels + ch] *= cont[n]; const T f = x_act_[channels + ch]; const T o = x_act_[o_offset + ch]; const T g = x_act_[g_offset + ch]; const T c_ = c[idx] = f * c_1[idx] + i * g; h[idx] = o * std::tanh(c_); } } template <> void LSTMUnit<float, CUDAContext>(const int count, const int num, const int channels, const float* c_1, const float* x, const float* cont, float* x_act, float* c, float* h) { const int o_offset = 2 * channels, g_offset = 3 * channels; const int x_offset = 4 * channels, y_count = count / 4; _LSTMUnitAct<float> << <GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, channels, g_offset, x_offset, x, x_act); _LSTMUnit<float> << <GET_BLOCKS(y_count), CUDA_NUM_THREADS >> >(y_count, channels, o_offset, g_offset, x_offset, c_1, x_act, cont, c, h); CUDA_POST_KERNEL_CHECK; } template <typename T> __global__ void _LSTMUnitGrad(const int count, const int channels, const int o_offset, const int g_offset, const int x_offset, const T* c_1, const T* x_act, const T* c, const T* dc, const T* dh, T* dc_1, T* dx) { CUDA_KERNEL_LOOP(idx, count) { const int n = idx / channels; const int ch = idx % channels; const T* x_act_ = x_act + n * x_offset; T* dx_ = dx + n * x_offset; const T i = x_act_[ch]; const T f = x_act_[channels + ch]; const T o = x_act_[o_offset + ch]; const T g = x_act_[g_offset + ch]; T* p_di = dx_ + ch; T* p_df = dx_ + channels + ch; T* p_do = dx_ + o_offset + ch; T* p_dg = dx_ + g_offset + ch; const T tanh_c_t = tanh(c[idx]); const T dc_1_sum_term = dh[idx] * o * (1 - tanh_c_t * tanh_c_t) + dc[idx]; dc_1[idx] = dc_1_sum_term * f; *p_di = dc_1_sum_term * g; *p_df = dc_1_sum_term * c_1[idx]; *p_do = dh[idx] * tanh_c_t; *p_dg = dc_1_sum_term * i; } } template <typename T> __global__ void _LSTMUnitGradAct(const int count, const int channels, const int g_offset, const int x_offset, const T* x_act, T* dx) { CUDA_KERNEL_LOOP(idx, count) { const int ch_4 = idx % x_offset; const T x_act_ = x_act[idx]; if (ch_4 < g_offset) dx[idx] = dx[idx] * x_act_ * (T(1) - x_act_); else dx[idx] = dx[idx] * (T(1) - x_act_ * x_act_); } } template <> void LSTMUnitGrad<float, CUDAContext>(const int count, const int num, const int channels, const float* c_1, const float* x_act, const float* c, const float* dc, const float* dh, float* dc_1, float* dx) { const int o_offset = 2 * channels, g_offset = 3 * channels; const int x_offset = 4 * channels, y_count = count / 4; _LSTMUnitGrad<float> << <GET_BLOCKS(y_count), CUDA_NUM_THREADS >> >(y_count, channels, o_offset, g_offset, x_offset, c_1, x_act, c, dc, dh, dc_1, dx); _LSTMUnitGradAct<float> << <GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, channels, g_offset, x_offset, x_act, dx); CUDA_POST_KERNEL_CHECK; } /******************** update.adam_update ********************/ template <typename T> __global__ void _AdamUpdate(const int n, T* g, T* m, T* v, const T beta1, const T beta2, const T eps, const T lr) { CUDA_KERNEL_LOOP(i, n) { T gi = g[i]; T mi = m[i] = m[i] * beta1 + gi * (1 - beta1); T vi = v[i] = v[i] * beta2 + gi * gi * (1 - beta2); g[i] = lr * mi / (sqrt(vi) + eps); } } template <> void AdamUpdate<float, CUDAContext>(Tensor* x, Tensor* m, Tensor* v, Tensor* t, const float beta1, const float beta2, const float eps, const float lr) { TIndex count = x->count(); auto* Xdata = x->mutable_data<float, CUDAContext>(); auto* Mdata = m->mutable_data<float, CUDAContext>(); auto* Vdata = v->mutable_data<float, CUDAContext>(); _AdamUpdate<float> << <GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, Xdata, Mdata, Vdata, beta1, beta2, eps, lr); CUDA_POST_KERNEL_CHECK; } /******************** update.nesterov_update ********************/ template <typename T> __global__ void _NesterovUpdate(const int n, T* g, T* h, const T momentum, const T lr) { CUDA_KERNEL_LOOP(i, n) { T hi = h[i]; T hi_new = h[i] = momentum * hi + lr * g[i]; g[i] = (1 + momentum) * hi_new - momentum * hi; } } template <> void NesterovUpdate<float, CUDAContext>(const int count, float* x, float* h, Tensor* t, const float momentum, const float lr, CUDAContext* ctx) { _NesterovUpdate<float> << <GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, x, h, momentum, lr); CUDA_POST_KERNEL_CHECK; } /******************** update.rmsprop_update ********************/ template <typename T> __global__ void _RMSPropUpdate(const int n, T* g, T* h, const T decay, const T eps, const T lr) { CUDA_KERNEL_LOOP(i, n) { T gi = g[i]; T hi = h[i] = decay * h[i] + (1 - decay) * gi * gi; g[i] = lr * g[i] / (sqrt(hi) + eps); } } template <> void RMSPropUpdate<float, CUDAContext>(const int count, float* x, float* h, Tensor* t, const float decay, const float eps, const float lr) { _RMSPropUpdate<float> << <GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, x, h, decay, eps, lr); CUDA_POST_KERNEL_CHECK; } /******************** vision.bilinear_resize ********************/ template <typename T> __global__ void _BilinearResize_NCHW(const int count, const int N, const int C, const int H, const int W, const int out_h, const int out_w, const float scale_h, const float scale_w, const T* x, T* y) { CUDA_KERNEL_LOOP(idx, count) { const int w = idx % out_w; const int h = (idx / out_w) % out_h; const int c = (idx / out_w / out_h) % C; const int n = idx / out_w / out_w / C; const float h_in = h * scale_h; const int top_y_idx = floorf(h_in); const int bottom_y_idx = (h_in < H - 1) ? ceilf(h_in) : H - 1; const float y_lerp = h_in - top_y_idx; const float w_in = w * scale_w; const int left_x_idx = floorf(w_in); const int right_x_idx = (w_in < W - 1) ? ceilf(w_in) : W - 1; const float x_lerp = w_in - left_x_idx; const int NCHT = (n * C + c) * H + top_y_idx; const int NCHB = (n * C + c) * H + bottom_y_idx; const float top_left(x[NCHT * W + left_x_idx]); const float top_right(x[NCHT * W + right_x_idx]); const float bottom_left(x[NCHB * W + left_x_idx]); const float bottom_right(x[NCHB * W + right_x_idx]); const float top = top_left + (top_right - top_left) * x_lerp; const float bottom = bottom_left + (bottom_right - bottom_left) * x_lerp; y[idx] = top + (bottom - top) * y_lerp; } } template <typename T> __global__ void _BilinearResize_NHWC(const int count, const int N, const int C, const int H, const int W, const int out_h, const int out_w, const float scale_h, const float scale_w, const T* x, T* y) { CUDA_KERNEL_LOOP(idx, count) { const int c = idx % C; const int w = (idx / C) % out_w; const int h = (idx / C / out_w) % out_h; const int n = idx / C / out_w / out_h; const float h_in = h * scale_h; const int top_y_idx = floorf(h_in); const int bottom_y_idx = (h_in < H - 1) ? ceilf(h_in) : H - 1; const float y_lerp = h_in - top_y_idx; const float w_in = w * scale_w; const int left_x_idx = floorf(w_in); const int right_x_idx = (w_in < W - 1) ? ceilf(w_in) : W - 1; const float x_lerp = w_in - left_x_idx; const int NHT = n * H + top_y_idx; const int NHB = n * H + bottom_y_idx; const float top_left(x[(NHT * W + left_x_idx) * C + c]); const float top_right(x[(NHT * W + right_x_idx) * C + c]); const float bottom_left(x[(NHB * W + left_x_idx) * C + c]); const float bottom_right(x[(NHB * W + right_x_idx) * C + c]); const float top = top_left + (top_right - top_left) * x_lerp; const float bottom = bottom_left + (bottom_right - bottom_left) * x_lerp; y[idx] = top + (bottom - top) * y_lerp; } } template <> void BilinearResize<float, CUDAContext>(const int count, const int N, const int C, const int H, const int W, const int out_h, const int out_w, const string& data_format, const float* x, float* y) { const float scale_h = (float)H / out_h; const float scale_w = (float)W / out_w; if (data_format == "NCHW") { _BilinearResize_NCHW<float> << <GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, N, C, H, W, out_h, out_w, scale_h, scale_w, x, y); } else if(data_format == "NHWC") { _BilinearResize_NHWC<float> << <GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, N, C, H, W, out_h, out_w, scale_h, scale_w, x, y); } else LOG(FATAL) << "Unknown data format: " << data_format; CUDA_POST_KERNEL_CHECK; } template <typename T> __global__ void _BilinearResizeGrad_NCHW(const int count, const int N, const int C, const int H, const int W, const int out_h, const int out_w, const float scale_h, const float scale_w, const T* dy, T* dx) { CUDA_KERNEL_LOOP(idx, count) { const int w = idx % out_w; const int h = (idx / out_w) % out_h; const int c = (idx / out_w / out_h) % C; const int n = idx / out_w / out_w / C; const float h_in = h * scale_h; const int top_y_idx = floorf(h_in); const int bottom_y_idx = (h_in < H - 1) ? ceilf(h_in) : H - 1; const float y_lerp = h_in - top_y_idx; const float w_in = w * scale_w; const int left_x_idx = floorf(w_in); const int right_x_idx = (w_in < W - 1) ? ceilf(w_in) : W - 1; const float x_lerp = w_in - left_x_idx; const int NCHT = (n * C + c) * H + top_y_idx; const int NCHB = (n * C + c) * H + bottom_y_idx; const float dtop = (1 - y_lerp) * dy[idx]; const float dbottom = y_lerp * dy[idx]; atomicAdd(&dx[NCHT * W + left_x_idx], static_cast<T>((1 - x_lerp) * dtop)); atomicAdd(&dx[NCHT * W + right_x_idx], static_cast<T>(x_lerp * dtop)); atomicAdd(&dx[NCHB * W + left_x_idx], static_cast<T>((1 - x_lerp) * dbottom)); atomicAdd(&dx[NCHB * W + right_x_idx], static_cast<T>(x_lerp * dbottom)); } } template <typename T> __global__ void _BilinearResizeGrad_NHWC(const int count, const int N, const int C, const int H, const int W, const int out_h, const int out_w, const float scale_h, const float scale_w, const T* dy, T* dx) { CUDA_KERNEL_LOOP(idx, count) { const int c = idx % C; const int w = (idx / C) % out_w; const int h = (idx / C / out_w) % out_h; const int n = idx / C / out_w / out_h; const float h_in = h * scale_h; const int top_y_idx = floorf(h_in); const int bottom_y_idx = (h_in < H - 1) ? ceilf(h_in) : H - 1; const float y_lerp = h_in - top_y_idx; const float w_in = w * scale_w; const int left_x_idx = floorf(w_in); const int right_x_idx = (w_in < W - 1) ? ceilf(w_in) : W - 1; const float x_lerp = w_in - left_x_idx; const int NHT = n * H + top_y_idx; const int NHB = n * H + bottom_y_idx; const float dtop = (1 - y_lerp) * dy[idx]; const float dbottom = y_lerp * dy[idx]; atomicAdd(&dx[(NHT * W + left_x_idx) * C + c], static_cast<T>((1 - x_lerp) * dtop)); atomicAdd(&dx[(NHT * W + right_x_idx) * C + c], static_cast<T>(x_lerp * dtop)); atomicAdd(&dx[(NHB * W + left_x_idx) * C + c], static_cast<T>((1 - x_lerp) * dbottom)); atomicAdd(&dx[(NHB * W + right_x_idx) * C + c], static_cast<T>(x_lerp * dbottom)); } } template <> void BilinearResizeGrad<float, CUDAContext>(const int count, const int N, const int C, const int H, const int W, const int out_h, const int out_w, const string& data_format, const float* dy, float* dx) { const float scale_h = (float)H / out_h; const float scale_w = (float)W / out_w; math::Set<float, CUDAContext>(N * C * H * W, 0, dx); if (data_format == "NCHW") { _BilinearResizeGrad_NCHW<float> << <GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, N, C, H, W, out_h, out_w, scale_h, scale_w, dy, dx); } else if(data_format == "NHWC") { _BilinearResizeGrad_NHWC<float> << <GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, N, C, H, W, out_h, out_w, scale_h, scale_w, dy, dx); } else LOG(FATAL) << "Unknown data format: " << data_format; CUDA_POST_KERNEL_CHECK; } /******************** vision.conv ********************/ template<typename T> __global__ void _Im2Col2d_NCHW(const int count, const int H, const int W, const int col_h, const int col_w, const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, const int pad_h, const int pad_w, const int dilation_h, const int dilation_w, const T* im, T* col) { CUDA_KERNEL_LOOP(idx, count) { const int w = idx % col_w; const int h_idx = idx / col_w; const int h = h_idx % col_h; const int im_c = h_idx / col_h; const int c = im_c * kernel_h * kernel_w; const int im_h_off = h * stride_h - pad_h; const int im_w_off = w * stride_w - pad_w; T* col_ptr = col; col_ptr += ((c * col_h + h) * col_w + w); const T* im_ptr = im; im_ptr += ((im_c * H + im_h_off) * W + im_w_off); for (int kh = 0; kh < kernel_h; kh++) { for (int kw = 0; kw < kernel_w; kw++) { const int im_h = kh * dilation_h + im_h_off; const int im_w = kw * dilation_w + im_w_off; *col_ptr = (im_h >= 0 && im_w >= 0 && im_h < H && im_w < W) ? im_ptr[kh * dilation_h * W + kw * dilation_w] : 0; col_ptr += (col_h * col_w); } } } } template<typename T> __global__ void _Im2Col2d_NHWC(const int count, const int C, const int H, const int W, const int col_h, const int col_w, const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, const int pad_h, const int pad_w, const int dilation_h, const int dilation_w, const T* im, T* col) { CUDA_KERNEL_LOOP(idx, count) { const int c = idx % C; const int w = (idx / C) % col_w; const int h = idx / C / col_w; const int im_h_off = h * stride_h - pad_h; const int im_w_off = w * stride_w - pad_w; const int base_col_idx = (h * col_w) + w; for (int kh = 0; kh < kernel_h; kh++) { for (int kw = 0; kw < kernel_w; kw++) { const int im_h = kh * dilation_h + im_h_off; const int im_w = kw * dilation_w + im_w_off; const int col_idx = (((base_col_idx * kernel_h + kh) * kernel_w + kw) * C + c); col[col_idx] = (im_h >= 0 && im_w >= 0 && im_h < H && im_w < W) ? im[(im_h * W + im_w) * C + c] : 0; } } } } template <> void Im2Col2d<float, CUDAContext>(const int C, const int H, const int W, const int col_h, const int col_w, const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, const int pad_h, const int pad_w, const int dilation_h, const int dilation_w, const string& data_format, const float* im, float* col) { if (data_format == "NCHW") { const int count = (C * col_h * col_w); _Im2Col2d_NCHW<float> << <GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, H, W, col_h, col_w, kernel_h, kernel_w, stride_h, stride_w, pad_h, pad_w, dilation_h, dilation_w, im, col); } else if (data_format == "NHWC") { const int count = (col_h * col_w * C); _Im2Col2d_NHWC<float> << <GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, C, H, W, col_h, col_w, kernel_h, kernel_w, stride_h, stride_w, pad_h, pad_w, dilation_h, dilation_w, im, col); } else LOG(FATAL) << "Unknown data format: " << data_format; CUDA_POST_KERNEL_CHECK; } template<typename T> __global__ void _Col2Im2d_NCHW(const int count, const int H, const int W, const int col_h, const int col_w, const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, const int pad_h, const int pad_w, const int dilation_h, const int dilation_w, const T* col, T* im) { CUDA_KERNEL_LOOP(idx, count) { T val = 0; const int im_w = idx % W + pad_w; const int im_h = (idx / W) % H + pad_h; const int im_c = idx / W / H; const int ex_kernel_h = (kernel_h - 1) * dilation_h + 1; const int ex_kernel_w = (kernel_w - 1) * dilation_w + 1; // redundant pixels will be ignored when conv // note to clip them by min(x,col_w) const int w_start = (im_w < ex_kernel_w) ? 0 : (im_w - ex_kernel_w) / stride_w + 1; const int w_end = min(im_w / stride_w + 1, col_w); const int h_start = (im_h < ex_kernel_h) ? 0 : (im_h - ex_kernel_h) / stride_h + 1; const int h_end = min(im_h / stride_h + 1, col_h); for (int h = h_start; h < h_end; ++h) { for (int w = w_start; w < w_end; ++w) { int kh_off = (im_h - h * stride_h); int kw_off = (im_w - w * stride_w); // only the serval im pixels used in dilated-conv // ignore the corresponding col pixels if (kh_off % dilation_h == 0 && kw_off % dilation_w == 0) { kh_off /= dilation_h; kw_off /= dilation_w; const int col_idx = (((im_c * kernel_h + kh_off) * kernel_w + kw_off) * col_h + h) * col_w + w; val += col[col_idx]; } } } im[idx] = val; } } template<typename T> __global__ void _Col2Im2d_NHWC(const int count, const int C, const int H, const int W, const int col_h, const int col_w, const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, const int pad_h, const int pad_w, const int dilation_h, const int dilation_w, const T* col, T* im) { CUDA_KERNEL_LOOP(idx, count) { T val = 0; const int im_c = idx % C; const int im_w = (idx / C) % W + pad_w; const int im_h = (idx / C / W) + pad_h; const int ex_kernel_h = (kernel_h - 1) * dilation_h + 1; const int ex_kernel_w = (kernel_w - 1) * dilation_w + 1; // redundant pixels will be ignored when conv // note to clip them by min(x,col_w) const int w_start = (im_w < ex_kernel_w) ? 0 : (im_w - ex_kernel_w) / stride_w + 1; const int w_end = min(im_w / stride_w + 1, col_w); const int h_start = (im_h < ex_kernel_h) ? 0 : (im_h - ex_kernel_h) / stride_h + 1; const int h_end = min(im_h / stride_h + 1, col_h); for (int h = h_start; h < h_end; ++h) { for (int w = w_start; w < w_end; ++w) { int kh_off = (im_h - h * stride_h); int kw_off = (im_w - w * stride_w); // only the serval im pixels used in dilated-conv // ignore the corresponding col pixels if (kh_off % dilation_h == 0 && kw_off % dilation_w == 0) { kh_off /= dilation_h; kw_off /= dilation_w; const int col_idx = (((h * col_w + w) * kernel_h + kh_off) * kernel_w + kw_off) * C + im_c; val += col[col_idx]; } } } im[idx] = val; } } template <> void Col2Im2d<float, CUDAContext>(const int C, const int H, const int W, const int col_h, const int col_w, const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, const int pad_h, const int pad_w, const int dilation_h, const int dilation_w, const string& data_format, const float* col, float* im) { if (data_format == "NCHW") { const int count = (C * H * W); _Col2Im2d_NCHW<float> << <GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, H, W, col_h, col_w, kernel_h, kernel_w, stride_h, stride_w, pad_h, pad_w, dilation_h, dilation_w, col, im); } else if (data_format == "NHWC") { const int count = (H * W * C); _Col2Im2d_NHWC<float> << <GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, C, H, W, col_h, col_w, kernel_h, kernel_w, stride_h, stride_w, pad_h, pad_w, dilation_h, dilation_w, col, im); } else LOG(FATAL) << "Unknown data format: " << data_format; CUDA_POST_KERNEL_CHECK; } /******************** vision.nn_resize ********************/ template <typename T> __global__ void _NNResize_NCHW(const int count, const int N, const int C, const int H, const int W, const int out_h, const int out_w, const float scale_h, const float scale_w, const T* x, T* y) { CUDA_KERNEL_LOOP(idx, count) { const int w = idx % out_w; const int h = (idx / out_w) % out_h; const int c = (idx / out_w / out_h) % C; const int n = idx / out_w / out_h / C; const int h_in = min(int(floorf(h * scale_h)), H - 1); const int w_in = min(int(floorf(w * scale_w)), W - 1); y[idx] = x[((n * C + c) * H + h_in) * W + w_in]; } } template <typename T> __global__ void _NNResize_NHWC(const int count, const int N, const int C, const int H, const int W, const int out_h, const int out_w, const float scale_h, const float scale_w, const T* x, T* y) { CUDA_KERNEL_LOOP(idx, count) { const int c = idx % C; const int w = (idx / C) % out_w; const int h = (idx / C / out_w) % out_h; const int n = idx / C / out_w / out_h; const int h_in = min(int(floorf(h * scale_h)), H - 1); const int w_in = min(int(floorf(w * scale_w)), W - 1); y[idx] = x[((n * H + h_in) * W + w_in) * C + c]; } } template <> void NNResize<float, CUDAContext>(const int count, const int N, const int C, const int H, const int W, const int out_h, const int out_w, const string& data_format, const float* x, float* y) { const float scale_h = (float)H / out_h; const float scale_w = (float)W / out_w; if (data_format == "NCHW") { _NNResize_NCHW<float> << <GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, N, C, H, W, out_h, out_w, scale_h, scale_w, x, y); } else if(data_format == "NHWC") { _NNResize_NHWC<float> << <GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, N, C, H, W, out_h, out_w, scale_h, scale_w, x, y); } else LOG(FATAL) << "Unknown data format: " << data_format; CUDA_POST_KERNEL_CHECK; } template <typename T> __global__ void _NNResizeGrad_NCHW(const int count, const int N, const int C, const int H, const int W, const int out_h, const int out_w, const float scale_h, const float scale_w, const T* dy, T* dx) { CUDA_KERNEL_LOOP(idx, count) { const int w = idx % out_w; const int h = (idx / out_w) % out_h; const int c = (idx / out_w / out_h) % C; const int n = idx / out_w / out_h / C; const int h_in = min(int(floorf(h * scale_h)), H - 1); const int w_in = min(int(floorf(w * scale_w)), W - 1); atomicAdd(&dx[((n * C + c) * H + h_in) * W + w_in], dy[idx]); } } template <typename T> __global__ void _NNResizeGrad_NHWC(const int count, const int N, const int C, const int H, const int W, const int out_h, const int out_w, const float scale_h, const float scale_w, const T* dy, T* dx) { CUDA_KERNEL_LOOP(idx, count) { const int c = idx % C; const int w = (idx / C) % out_w; const int h = (idx / C / out_w) % out_h; const int n = idx / C / out_w / out_h; const int h_in = min(int(floorf(h * scale_h)), H - 1); const int w_in = min(int(floorf(w * scale_w)), W - 1); atomicAdd(&dx[((n * H + h_in) * W + w_in) * C + c], dy[idx]); } } template <> void NNResizeGrad<float, CUDAContext>(const int count, const int N, const int C, const int H, const int W, const int out_h, const int out_w, const string& data_format, const float* dy, float* dx) { const float scale_h = (float)H / out_h; const float scale_w = (float)W / out_w; math::Set<float, CUDAContext>(N * C * H * W, 0, dx); if (data_format == "NCHW") { _NNResizeGrad_NCHW<float> << <GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, N, C, H, W, out_h, out_w, scale_h, scale_w, dy, dx); } else if(data_format == "NHWC") { _NNResizeGrad_NHWC<float> << <GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, N, C, H, W, out_h, out_w, scale_h, scale_w, dy, dx); } else LOG(FATAL) << "Unknown data format: " << data_format; CUDA_POST_KERNEL_CHECK; } /******************** vision.pooling ********************/ template<typename T> __global__ void _MAXPooling2d_NCHW(const int count, const int N, const int C, const int H, const int W, const int pool_h, const int pool_w, const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, const int pad_h, const int pad_w, const T* x, int* mask, T* y) { CUDA_KERNEL_LOOP(idx, count) { const int pw = idx % pool_w; const int ph = (idx / pool_w) % pool_h; const int pc = (idx / pool_w / pool_h) % C; const int pn = idx / pool_w / pool_h / C; int start_h = ph * stride_h - pad_h; int start_w = pw * stride_w - pad_w; const int end_h = min(start_h + kernel_h, H); const int end_w = min(start_w + kernel_w, W); start_h = max(start_h, 0); start_w = max(start_w, 0); T max_val = -FLT_MAX; int max_idx = -1; const T* x_ptr = x + (pn * C + pc) * H * W; for (int h = start_h; h < end_h; ++h) { for (int w = start_w; w < end_w; ++w) { if (x_ptr[h * W + w] > max_val) { max_idx = h * W + w; max_val = x_ptr[max_idx]; } } } y[idx] = max_val; mask[idx] = max_idx; } } template<typename T> __global__ void _MAXPooling2d_NHWC(const int count, const int N, const int C, const int H, const int W, const int pool_h, const int pool_w, const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, const int pad_h, const int pad_w, const T* x, int* mask, T* y) { CUDA_KERNEL_LOOP(idx, count) { const int pc = idx % C; const int pw = (idx / C) % pool_w; const int ph = (idx / C / pool_w) % pool_h; const int pn = idx / C / pool_w / pool_h; int start_h = ph * stride_h - pad_h; int start_w = pw * stride_w - pad_w; const int end_h = min(start_h + kernel_h, H); const int end_w = min(start_w + kernel_w, W); start_h = max(start_h, 0); start_w = max(start_w, 0); T max_val = -FLT_MAX; int max_idx = -1; for (int h = start_h; h < end_h; ++h) { for (int w = start_w; w < end_w; ++w) { const int x_idx = ((pn * H + h) * W + w) * C + pc; if (x[x_idx] > max_val) { max_idx = x_idx; max_val = x[max_idx]; } } } y[idx] = max_val; mask[idx] = max_idx; } } template<> void MAXPooling2d<float, CUDAContext>(const int count, const int N, const int C, const int H, const int W, const int pool_h, const int pool_w, const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, const int pad_h, const int pad_w, const string& data_format, const float* x, int* mask, float* y) { if (data_format == "NCHW") { _MAXPooling2d_NCHW<float> << <GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, N, C, H, W, pool_h, pool_w, kernel_h, kernel_w, stride_h, stride_w, pad_h, pad_w, x, mask, y); } else if (data_format == "NHWC") { _MAXPooling2d_NHWC<float> << <GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, N, C, H, W, pool_h, pool_w, kernel_h, kernel_w, stride_h, stride_w, pad_h, pad_w, x, mask, y); } else LOG(FATAL) << "Unknown data format: " << data_format; CUDA_POST_KERNEL_CHECK; } template<typename T> __global__ void _AVGPooling2d_NCHW(const int count, const int N, const int C, const int H, const int W, const int pool_h, const int pool_w, const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, const int pad_h, const int pad_w, const T* x, T* y) { CUDA_KERNEL_LOOP(idx, count) { const int pw = idx % pool_w; const int ph = (idx / pool_w) % pool_h; const int pc = (idx / pool_w / pool_h) % C; const int pn = idx / pool_w / pool_h / C; int start_h = ph * stride_h - pad_h; int start_w = pw * stride_w - pad_w; int end_h = min(start_h + kernel_h, H + pad_h); int end_w = min(start_w + kernel_w, W + pad_w); start_h = max(start_h, 0); start_w = max(start_w, 0); end_h = min(end_h, H); end_w = min(end_w, W); const T* x_ptr = x + (pn * C + pc) * H * W; const int pool_area = (end_h - start_h) * (end_w - start_w); T avg_val = 0; for (int h = start_h; h < end_h; ++h) { for (int w = start_w; w < end_w; ++w) { avg_val += x_ptr[h * W + w]; } } y[idx] = avg_val / pool_area; } } template<typename T> __global__ void _AVGPooling2d_NHWC(const int count, const int N, const int C, const int H, const int W, const int pool_h, const int pool_w, const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, const int pad_h, const int pad_w, const T* x, T* y) { CUDA_KERNEL_LOOP(idx, count) { const int pc = idx % C; const int pw = (idx / C) % pool_w; const int ph = (idx / C / pool_w) % pool_h; const int pn = idx / C / pool_w / pool_h; int start_h = ph * stride_h - pad_h; int start_w = pw * stride_w - pad_w; int end_h = min(start_h + kernel_h, H + pad_h); int end_w = min(start_w + kernel_w, W + pad_w); start_h = max(start_h, 0); start_w = max(start_w, 0); end_h = min(end_h, H); end_w = min(end_w, W); const int pool_area = (end_h - start_h) * (end_w - start_w); T avg_val = 0; for (int h = start_h; h < end_h; ++h) for (int w = start_w; w < end_w; ++w) avg_val += x[((pn * H + h) * W + w) * C + pc]; y[idx] = avg_val / pool_area; } } template<> void AVGPooling2d<float, CUDAContext>(const int count, const int N, const int C, const int H, const int W, const int pool_h, const int pool_w, const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, const int pad_h, const int pad_w, const string& data_format, const float* x, float* y) { if (data_format == "NCHW") { _AVGPooling2d_NCHW<float> << <GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, N, C, H, W, pool_h, pool_w, kernel_h, kernel_w, stride_h, stride_w, pad_h, pad_w, x, y); } else if (data_format == "NHWC") { _AVGPooling2d_NHWC<float> << <GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, N, C, H, W, pool_h, pool_w, kernel_h, kernel_w, stride_h, stride_w, pad_h, pad_w, x, y); } else LOG(FATAL) << "Unknown data format: " << data_format; CUDA_POST_KERNEL_CHECK; } template<typename T> __global__ void _MAXPooling2dGrad_NCHW(const int count, const int N, const int C, const int H, const int W, const int pool_h, const int pool_w, const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, const int pad_h, const int pad_w, const T* dy, const int* mask, T* dx) { CUDA_KERNEL_LOOP(idx, count) { const int w = idx % W; const int h = (idx / W) % H; const int c = (idx / W / H) % C; const int n = idx / W / H / C; // allow overlapping const int start_ph = (h + pad_h < kernel_h) ? 0 : (h + pad_h - kernel_h) / stride_h + 1; const int start_pw = (w + pad_w < kernel_w) ? 0 : (w + pad_w - kernel_w) / stride_w + 1; // allow clip const int end_ph = min((h + pad_h) / stride_h + 1, pool_h); const int end_pw = min((w + pad_w) / stride_w + 1, pool_w); T grad = 0; const int offset = (n * C + c) * pool_h * pool_w; const T* dy_ptr = dy + offset; const int* mask_ptr = mask + offset; for (int ph = start_ph; ph < end_ph; ++ph) { for (int pw = start_pw; pw < end_pw; ++pw) { if (mask_ptr[ph * pool_w + pw] == (h * W + w)) { grad += dy_ptr[ph * pool_w + pw]; } } } dx[idx] = grad; } } template<typename T> __global__ void _MAXPooling2dGrad_NHWC(const int count, const int N, const int C, const int H, const int W, const int pool_h, const int pool_w, const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, const int pad_h, const int pad_w, const T* dy, const int* mask, T* dx) { CUDA_KERNEL_LOOP(idx, count) { const int c = idx % C; const int w = (idx / C) % W; const int h = (idx / C / W) % H; const int n = idx / C / W / H; // allow overlapping const int start_ph = (h + pad_h < kernel_h) ? 0 : (h + pad_h - kernel_h) / stride_h + 1; const int start_pw = (w + pad_w < kernel_w) ? 0 : (w + pad_w - kernel_w) / stride_w + 1; // allow clip const int end_ph = min((h + pad_h) / stride_h + 1, pool_h); const int end_pw = min((w + pad_w) / stride_w + 1, pool_w); T grad = 0; for (int ph = start_ph; ph < end_ph; ++ph) { for (int pw = start_pw; pw < end_pw; ++pw) { const int x_idx = ((n * H + h) * W + w) * C + c; const int y_idx = ((n * pool_h + ph) * pool_w + pw) * C + c; if (mask[y_idx] == x_idx) grad += dy[y_idx]; } } dx[idx] = grad; } } template<> void MAXPooling2dGrad<float, CUDAContext>(const int count, const int N, const int C, const int H, const int W, const int pool_h, const int pool_w, const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, const int pad_h, const int pad_w, const string& data_format, const float* dy, const int* mask, float* dx) { if (data_format == "NCHW") { _MAXPooling2dGrad_NCHW<float> << <GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, N, C, H, W, pool_h, pool_w, kernel_h, kernel_w, stride_h, stride_w, pad_h, pad_w, dy, mask, dx); } else if (data_format == "NHWC") { _MAXPooling2dGrad_NHWC<float> << <GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, N, C, H, W, pool_h, pool_w, kernel_h, kernel_w, stride_h, stride_w, pad_h, pad_w, dy, mask, dx); } else LOG(FATAL) << "Unknown data format: " << data_format; CUDA_POST_KERNEL_CHECK; } template<typename T> __global__ void _AVGPooling2dGrad_NCHW(const int count, const int N, const int C, const int H, const int W, const int pool_h, const int pool_w, const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, const int pad_h, const int pad_w, const T* dy, T* dx) { CUDA_KERNEL_LOOP(idx, count) { const int w = idx % W; const int h = (idx / W) % H; const int c = (idx / W / H) % C; const int n = idx / W / H / C; const int start_ph = (h + pad_h < kernel_h) ? 0 : (h + pad_h - kernel_h) / stride_h + 1; const int start_pw = (w + pad_w < kernel_w) ? 0 : (w + pad_w - kernel_w) / stride_w + 1; const int end_ph = min(h / stride_h + 1, pool_h); const int end_pw = min(w / stride_w + 1, pool_w); T grad = 0; const T* dy_ptr = dy + (n * C + c) * pool_h * pool_w; for (int ph = start_ph; ph < end_ph; ++ph) { for (int pw = start_pw; pw < end_pw; ++pw) { int start_h = ph * stride_h - pad_h; int start_w = pw * stride_w - pad_w; int end_h = min(start_h + kernel_h, H + pad_h); int end_w = min(start_w + kernel_w, W + pad_w); int pool_area = (end_h - start_h) * (end_w - start_w); grad += (dy_ptr[ph * pool_w + pw] / pool_area); } } dx[idx] = grad; } } template<typename T> __global__ void _AVGPooling2dGrad_NHWC(const int count, const int N, const int C, const int H, const int W, const int pool_h, const int pool_w, const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, const int pad_h, const int pad_w, const T* dy, T* dx) { CUDA_KERNEL_LOOP(idx, count) { const int c = idx % C; const int w = (idx / C) % W; const int h = (idx / C / W) % H; const int n = idx / C / W / H; const int start_ph = (h + pad_h < kernel_h) ? 0 : (h + pad_h - kernel_h) / stride_h + 1; const int start_pw = (w + pad_w < kernel_w) ? 0 : (w + pad_w - kernel_w) / stride_w + 1; const int end_ph = min(h / stride_h + 1, pool_h); const int end_pw = min(w / stride_w + 1, pool_w); T grad = 0; for (int ph = start_ph; ph < end_ph; ++ph) { for (int pw = start_pw; pw < end_pw; ++pw) { int start_h = ph * stride_h - pad_h; int start_w = pw * stride_w - pad_w; int end_h = min(start_h + kernel_h, H + pad_h); int end_w = min(start_w + kernel_w, W + pad_w); int pool_area = (end_h - start_h) * (end_w - start_w); const int y_idx = ((n * pool_h + ph) * pool_w + pw) * C + c; grad += (dy[y_idx] / pool_area); } } dx[idx] = grad; } } template<> void AVGPooling2dGrad<float, CUDAContext>(const int count, const int N, const int C, const int H, const int W, const int pool_h, const int pool_w, const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, const int pad_h, const int pad_w, const string& data_format, const float* dy, float* dx) { if (data_format == "NCHW") { _AVGPooling2dGrad_NCHW<float> << <GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, N, C, H, W, pool_h, pool_w, kernel_h, kernel_w, stride_h, stride_w, pad_h, pad_w, dy, dx); } else if (data_format == "NHWC") { _AVGPooling2dGrad_NHWC<float> << <GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, N, C, H, W, pool_h, pool_w, kernel_h, kernel_w, stride_h, stride_w, pad_h, pad_w, dy, dx); } else LOG(FATAL) << "Unknown data format: " << data_format; CUDA_POST_KERNEL_CHECK; } /******************** vision.roi_pooling ********************/ template <typename T> __global__ void _ROIPooling(const int count, const T spatial_scale, const int channels, const int height, const int width, const int pool_h, const int pool_w, const T* x, const T* rois, int* mask, T* y) { CUDA_KERNEL_LOOP(idx, count) { int pw = idx % pool_w; int ph = (idx / pool_w) % pool_h; int c = (idx / pool_w / pool_h) % channels; int n = idx / pool_w / pool_h / channels; const T* offset_rois = rois + n * 5; int roi_batch_ind = offset_rois[0]; if (roi_batch_ind < 0) { y[idx] = 0; mask[idx] = 0; continue; } int roi_start_w = round(offset_rois[1] * spatial_scale); int roi_start_h = round(offset_rois[2] * spatial_scale); int roi_end_w = round(offset_rois[3] * spatial_scale); int roi_end_h = round(offset_rois[4] * spatial_scale); int roi_width = max(roi_end_w - roi_start_w + 1, 1); int roi_height = max(roi_end_h - roi_start_h + 1, 1); const T bin_size_h = (T)roi_height / (T)pool_h; const T bin_size_w = (T)roi_width / (T)pool_w; int hstart = floor(bin_size_h * ph); int wstart = floor(bin_size_w * pw); int hend = ceil(bin_size_h * (ph + 1)); int wend = ceil(bin_size_w * (pw + 1)); hstart = min(max(hstart + roi_start_h, 0), height); hend = min(max(hend + roi_start_h, 0), height); wstart = min(max(wstart + roi_start_w, 0), width); wend = min(max(wend + roi_start_w, 0), width); bool is_empty = (hend <= hstart) || (wend <= wstart); float max_val = is_empty ? 0 : -FLT_MAX; int max_idx = -1; x += ((roi_batch_ind * channels + c) * height * width); for (int h = hstart; h < hend; ++h) { for (int w = wstart; w < wend; ++w) { const int x_idx = h * width + w; if (x[x_idx] > max_val) { max_val = x[x_idx]; max_idx = x_idx; } } } y[idx] = max_val; mask[idx] = max_idx; } } template<> void ROIPooling<float, CUDAContext>(const float spatial_scale, const int pool_h, const int pool_w, Tensor* x, Tensor* rois, Tensor* mask, Tensor* y) { auto* Xdata = x->data<float, CUDAContext>(); auto* Rdata = rois->data<float, CUDAContext>(); auto* Ydata = y->mutable_data<float, CUDAContext>(); auto* Mdata = mask->mutable_data<int, CUDAContext>(); TIndex channels = x->dim(1), count = y->count(); TIndex height = x->dim(2), width = x->dim(3); _ROIPooling<float> << <GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, spatial_scale, channels, height, width, pool_h, pool_w, Xdata, Rdata, Mdata, Ydata); CUDA_POST_KERNEL_CHECK; } template <typename T> __global__ void _ROIPoolingGrad(const int count, const int num_rois, const T spatial_scale, const int channels, const int height, const int width, const int pool_h, const int pool_w, const T* dy, const T* rois, const int* mask, T* dx) { CUDA_KERNEL_LOOP(idx, count) { int w = idx % width; int h = (idx / width) % height; int c = (idx / width / height) % channels; int n = idx / width / height / channels; T gradient = 0; for (int roi_n = 0; roi_n < num_rois; ++roi_n) { const T* offset_rois = rois + roi_n * 5; int roi_batch_ind = offset_rois[0]; if (n != roi_batch_ind) continue; int roi_start_w = round(offset_rois[1] * spatial_scale); int roi_start_h = round(offset_rois[2] * spatial_scale); int roi_end_w = round(offset_rois[3] * spatial_scale); int roi_end_h = round(offset_rois[4] * spatial_scale); const bool in_roi = (w >= roi_start_w && w <= roi_end_w && h >= roi_start_h && h <= roi_end_h); if (!in_roi) continue; int y_offset = (roi_n * channels + c) * pool_h * pool_w; const T* offset_dy = dy + y_offset; const int* offset_mask = mask + y_offset; int roi_width = max(roi_end_w - roi_start_w + 1, 1); int roi_height = max(roi_end_h - roi_start_h + 1, 1); const T bin_size_h = (T)roi_height / (T)pool_h; const T bin_size_w = (T)roi_width / (T)pool_w; int phstart = floor(static_cast<T>(h - roi_start_h) / bin_size_h); int phend = ceil(static_cast<T>(h - roi_start_h + 1) / bin_size_h); int pwstart = floor(static_cast<T>(w - roi_start_w) / bin_size_w); int pwend = ceil(static_cast<T>(w - roi_start_w + 1) / bin_size_w); phstart = min(max(phstart, 0), pool_h); phend = min(max(phend, 0), pool_h); pwstart = min(max(pwstart, 0), pool_w); pwend = min(max(pwend, 0), pool_w); for (int ph = phstart; ph < phend; ++ph) { for (int pw = pwstart; pw < pwend; ++pw) { int pool_idx = ph * pool_w + pw; if (offset_mask[pool_idx] == (h * width + w)) { gradient += offset_dy[pool_idx]; } } } } dx[idx] = gradient; } } template<> void ROIPoolingGrad<float, CUDAContext>(const float spatial_scale, const int pool_h, const int pool_w, Tensor* dy, Tensor* rois, Tensor* mask, Tensor* dx) { auto* dYdata = dy->data<float, CUDAContext>(); auto* Rdata = rois->data<float, CUDAContext>(); auto* Mdata = mask->data<int, CUDAContext>(); auto* dXdata = dx->mutable_data<float, CUDAContext>(); TIndex channels = dx->dim(1), count = dx->count(); TIndex height = dx->dim(2), width = dx->dim(3); _ROIPoolingGrad<float> << <GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, rois->dim(0), spatial_scale, channels, height, width, pool_h, pool_w, dYdata, Rdata, Mdata, dXdata); CUDA_POST_KERNEL_CHECK; } /******************** vision.roi_align ********************/ template <typename T> __device__ T _ROIAlignInterpolate(const T* Xdata, const int height, const int width, T y, T x) { if (y < -1.0 || y > height || x < -1.0 || x > width) return 0; if (y <= 0) y = 0; if (x <= 0) x = 0; int y_low = (int)y; int x_low = (int)x; int y_high; int x_high; if (y_low >= height - 1) { y_high = y_low = height - 1; y = (T)y_low; } else { y_high = y_low + 1; } if (x_low >= width - 1) { x_high = x_low = width - 1; x = (T)x_low; } else { x_high = x_low + 1; } T ly = y - y_low; T lx = x - x_low; T hy = 1. - ly, hx = 1. - lx; T v1 = Xdata[y_low * width + x_low]; T v2 = Xdata[y_low * width + x_high]; T v3 = Xdata[y_high * width + x_low]; T v4 = Xdata[y_high * width + x_high]; T w1 = hy * hx, w2 = hy * lx, w3 = ly * hx, w4 = ly * lx; T val = (w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4); return val; } template <typename T> __global__ void _ROIAlign(const int count, const float spatial_scale, const int channels, const int height, const int width, const int pool_h, const int pool_w, const int sampling_ratio, const T* Xdata, const T* rois, T* Ydata) { CUDA_KERNEL_LOOP(idx, count) { int pw = idx % pool_w; int ph = (idx / pool_w) % pool_h; int c = (idx / pool_w / pool_h) % channels; int n = idx / pool_w / pool_h / channels; const T* offset_rois = rois + n * 5; int roi_batch_ind = offset_rois[0]; if (roi_batch_ind < 0) { Ydata[idx] = 0; continue; } T roi_start_w = offset_rois[1] * spatial_scale; T roi_start_h = offset_rois[2] * spatial_scale; T roi_end_w = offset_rois[3] * spatial_scale; T roi_end_h = offset_rois[4] * spatial_scale; T roi_width = max(roi_end_w - roi_start_w, (T)1.); T roi_height = max(roi_end_h - roi_start_h, (T)1.); T bin_size_h = static_cast<T>(roi_height) / static_cast<T>(pool_h); T bin_size_w = static_cast<T>(roi_width) / static_cast<T>(pool_w); const T* offset_Xdata = Xdata + (roi_batch_ind * channels + c) * height * width; int roi_bin_grid_h = (sampling_ratio > 0) ? sampling_ratio : ceil(roi_height / pool_h); int roi_bin_grid_w = (sampling_ratio > 0) ? sampling_ratio : ceil(roi_width / pool_w); const T num_bin_grids = roi_bin_grid_h * roi_bin_grid_w; T output_val = 0.; for (int iy = 0; iy < roi_bin_grid_h; iy++) { const T y = roi_start_h + ph * bin_size_h + static_cast<T>(iy + .5f) * bin_size_h / static_cast<T>(roi_bin_grid_h); for (int ix = 0; ix < roi_bin_grid_w; ix++) { const T x = roi_start_w + pw * bin_size_w + static_cast<T>(ix + .5f) * bin_size_w / static_cast<T>(roi_bin_grid_w); T val = _ROIAlignInterpolate(offset_Xdata, height, width, y, x); output_val += val; } } output_val /= num_bin_grids; Ydata[idx] = output_val; } } template<> void ROIAlign<float, CUDAContext>(const float spatial_scale, const int pool_h, const int pool_w, const int sampling_ratio, Tensor* x, Tensor* rois, Tensor* y) { auto* Xdata = x->data<float, CUDAContext>(); auto* Rdata = rois->data<float, CUDAContext>(); auto* Ydata = y->mutable_data<float, CUDAContext>(); TIndex channels = x->dim(1), count = y->count(); TIndex height = x->dim(2), width = x->dim(3); _ROIAlign<float> << <GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, spatial_scale, channels, height, width, pool_h, pool_w, sampling_ratio, Xdata, Rdata, Ydata); CUDA_POST_KERNEL_CHECK; } template <typename T> __device__ void _ROIAlignInterpolateGrad(const int height, const int width, T y, T x, T & w1, T & w2, T & w3, T & w4, int & x_low, int & x_high, int & y_low, int & y_high) { if (y < -1.0 || y > height || x < -1.0 || x > width) { w1 = w2 = w3 = w4 = 0.; x_low = x_high = y_low = y_high = -1; return; } if (y <= 0) y = 0; if (x <= 0) x = 0; y_low = (int)y; x_low = (int)x; if (y_low >= height - 1) { y_high = y_low = height - 1; y = (T)y_low; } else { y_high = y_low + 1; } if (x_low >= width - 1) { x_high = x_low = width - 1; x = (T)x_low; } else { x_high = x_low + 1; } T ly = y - y_low; T lx = x - x_low; T hy = 1. - ly, hx = 1. - lx; w1 = hy * hx, w2 = hy * lx, w3 = ly * hx, w4 = ly * lx; return; } template <typename T> __global__ void _ROIAlignGrad(const int count, const int num_rois, const T spatial_scale, const int channels, const int height, const int width, const int pool_h, const int pool_w, const int sampling_ratio, const T* dYdata, const T* rois, T* dXdata) { CUDA_KERNEL_LOOP(idx, count) { int pw = idx % pool_w; int ph = (idx / pool_w) % pool_h; int c = (idx / pool_w / pool_h) % channels; int n = idx / pool_w / pool_h / channels; const T* offset_rois = rois + n * 5; int roi_batch_ind = offset_rois[0]; if (roi_batch_ind < 0) continue; T roi_start_w = offset_rois[1] * spatial_scale; T roi_start_h = offset_rois[2] * spatial_scale; T roi_end_w = offset_rois[3] * spatial_scale; T roi_end_h = offset_rois[4] * spatial_scale; T roi_width = max(roi_end_w - roi_start_w, (T)1.); T roi_height = max(roi_end_h - roi_start_h, (T)1.); T bin_size_h = static_cast<T>(roi_height) / static_cast<T>(pool_h); T bin_size_w = static_cast<T>(roi_width) / static_cast<T>(pool_w); T* offset_dXdata = dXdata + (roi_batch_ind * channels + c) * height * width; int y_offset = (n * channels + c) * pool_h * pool_w; const T* offset_dYdata = dYdata + y_offset; const T dYdata_this_bin = offset_dYdata[ph * pool_w + pw]; int roi_bin_grid_h = (sampling_ratio > 0) ? sampling_ratio : ceil(roi_height / pool_h); int roi_bin_grid_w = (sampling_ratio > 0) ? sampling_ratio : ceil(roi_width / pool_w); const T num_bin_grids = roi_bin_grid_h * roi_bin_grid_w; for (int iy = 0; iy < roi_bin_grid_h; iy++) { const T y = roi_start_h + ph * bin_size_h + static_cast<T>(iy + .5f) * bin_size_h / static_cast<T>(roi_bin_grid_h); for (int ix = 0; ix < roi_bin_grid_w; ix++) { const T x = roi_start_w + pw * bin_size_w + static_cast<T>(ix + .5f) * bin_size_w / static_cast<T>(roi_bin_grid_w); T w1, w2, w3, w4; int x_low, x_high, y_low, y_high; _ROIAlignInterpolateGrad(height, width, y, x, w1, w2, w3, w4, x_low, x_high, y_low, y_high); T g1 = dYdata_this_bin * w1 / num_bin_grids; T g2 = dYdata_this_bin * w2 / num_bin_grids; T g3 = dYdata_this_bin * w3 / num_bin_grids; T g4 = dYdata_this_bin * w4 / num_bin_grids; if (x_low >= 0 && x_high >= 0 && y_low >= 0 && y_high >= 0) { atomicAdd(offset_dXdata + y_low * width + x_low, static_cast<T>(g1)); atomicAdd(offset_dXdata + y_low * width + x_high, static_cast<T>(g2)); atomicAdd(offset_dXdata + y_high * width + x_low, static_cast<T>(g3)); atomicAdd(offset_dXdata + y_high * width + x_high, static_cast<T>(g4)); } } } } } template<> void ROIAlignGrad<float, CUDAContext>(const float spatial_scale, const int pool_h, const int pool_w, const int sampling_ratio, Tensor* dy, Tensor* rois, Tensor* dx) { auto* dYdata = dy->data<float, CUDAContext>(); auto* Rdata = rois->data<float, CUDAContext>(); auto* dXdata = dx->mutable_data<float, CUDAContext>(); TIndex channels = dx->dim(1), count = dy->count(); TIndex height = dx->dim(2), width = dx->dim(3); math::Set<float, CUDAContext>(dx->count(), 0, dXdata); _ROIAlignGrad<float> << <GET_BLOCKS(count), CUDA_NUM_THREADS >> >(count, rois->dim(0), spatial_scale, channels, height, width, pool_h, pool_w, sampling_ratio, dYdata, Rdata, dXdata); CUDA_POST_KERNEL_CHECK; } } // namespace kernel } // namespace dragon #endif // WITH_CUDA
the_stack
namespace caffe2 { #define CUDA_FUNCTOR(name, op, input_type, output_type) \ template <int b_is_scalar, typename T, typename R> \ __global__ void name##Kernel(const T* a, const T* b, R* out, int n) { \ CUDA_1D_KERNEL_LOOP(i, n) { \ out[i] = op(a[i], b[b_is_scalar ? 0 : i]); \ } \ } \ template <typename T, typename R> \ __global__ void name##BroadcastKernel( \ const T* a, const T* b, R* out, int pre, int n) { \ CUDA_1D_KERNEL_LOOP(i, pre * n) { \ out[i] = op(a[i], b[i % n]); \ } \ } \ template <typename T, typename R> \ __global__ void name##Broadcast2Kernel( \ const T* a, const T* b, R* out, int pre, int n, int post) { \ CUDA_1D_KERNEL_LOOP(i, pre * n * post) { \ out[i] = op(a[i], b[(i / post) % n]); \ } \ } \ \ struct Cuda##name##Functor { \ template <bool b_is_scalar, typename T, typename R> \ inline void Run( \ size_t n, const T* a, const T* b, R* out, CUDAContext* context) { \ name##Kernel<b_is_scalar, T, R><<<CAFFE_GET_BLOCKS(n), \ CAFFE_CUDA_NUM_THREADS, \ 0, context->cuda_stream()>>>( \ a, b, out, n); \ } \ template <typename T, typename R> \ void RunWithBroadcast( \ const T* a, const T* b, R* out, size_t pre, size_t n, \ CUDAContext* context) { \ name##BroadcastKernel<T, R><<<CAFFE_GET_BLOCKS(pre * n), \ CAFFE_CUDA_NUM_THREADS, \ 0, context->cuda_stream()>>>( \ a, b, out, pre, n); \ } \ template <typename T, typename R> \ void RunWithBroadcast2( \ const T* a, const T* b, R* out, size_t pre, size_t n, size_t post, \ CUDAContext* context) { \ name##Broadcast2Kernel<T, R><<<CAFFE_GET_BLOCKS(pre * n * post), \ CAFFE_CUDA_NUM_THREADS, \ 0, context->cuda_stream()>>>( \ a, b, out, pre, n, post); \ } \ }; \ REGISTER_CUDA_OPERATOR( \ name, BinaryElementwiseOp< \ input_type, CUDAContext, Cuda##name##Functor, output_type>) #define CUDA_SUB(x, y) ((x) - (y)) CUDA_FUNCTOR(Sub, CUDA_SUB, NumericTypes, SameTypeAsInput); #undef CUDA_SUB #define CUDA_MUL(x, y) ((x) * (y)) CUDA_FUNCTOR(Mul, CUDA_MUL, NumericTypes, SameTypeAsInput); #undef CUDA_MUL #define CUDA_DIV(x, y) ((x) / (y)) CUDA_FUNCTOR(Div, CUDA_DIV, NumericTypes, SameTypeAsInput); #undef CUDA_DIV #define CUDA_LT(x, y) ((x) < (y)) CUDA_FUNCTOR(LT, CUDA_LT, NumericTypes, FixedType<bool>); #undef CUDA_LT #define CUDA_LE(x, y) ((x) <= (y)) CUDA_FUNCTOR(LE, CUDA_LE, NumericTypes, FixedType<bool>); #undef CUDA_LE #define CUDA_GT(x, y) ((x) > (y)) CUDA_FUNCTOR(GT, CUDA_GT, NumericTypes, FixedType<bool>); #undef CUDA_GT #define CUDA_GE(x, y) ((x) >= (y)) CUDA_FUNCTOR(GE, CUDA_GE, NumericTypes, FixedType<bool>); #undef CUDA_GE #define CUDA_EQ(x, y) ((x) == (y)) CUDA_FUNCTOR(EQ, CUDA_EQ, IntTypes, FixedType<bool>); #undef CUDA_EQ #define CUDA_AND(x, y) ((x) & (y)) CUDA_FUNCTOR(And, CUDA_AND, BoolTypes, FixedType<bool>); #undef CUDA_AND #define CUDA_OR(x, y) ((x) | (y)) CUDA_FUNCTOR(Or, CUDA_OR, BoolTypes, FixedType<bool>); #undef CUDA_OR #define CUDA_XOR(x, y) ((x) ^ (y)) CUDA_FUNCTOR(Xor, CUDA_XOR, BoolTypes, FixedType<bool>); #undef CUDA_XOR __global__ void NotKernel(const int n, const bool* x, bool* y) { CUDA_1D_KERNEL_LOOP(i, n) { y[i] = !x[i]; } } struct CudaNotFunctor { inline void operator()( const int n, const bool* x, bool* y, CUDAContext* context) { NotKernel<<<CAFFE_GET_BLOCKS(n), CAFFE_CUDA_NUM_THREADS, 0, context->cuda_stream()>>>(n, x, y); } }; REGISTER_CUDA_OPERATOR( Not, UnaryElementwiseOp<BoolTypes, CUDAContext, CudaNotFunctor>); __global__ void DivKernel(const int n, float *dXdata, float *dYdata, const float *dZdata, const float *Ydata, const float *Zdata) { CUDA_1D_KERNEL_LOOP(i, n) { dXdata[i] = dZdata[i] / Ydata[i]; dYdata[i] = - (dZdata[i] * Zdata[i]) / Ydata[i]; } } void ElementWiseDivide( CUDAContext& context, const int n, float* dXdata, float* dYdata, const float* dZdata, const float* Ydata, const float* Zdata) { DivKernel<<<CAFFE_GET_BLOCKS(n), CAFFE_CUDA_NUM_THREADS, 0, context.cuda_stream()>>>(n, dXdata, dYdata, dZdata, Ydata, Zdata); } REGISTER_CUDA_OPERATOR(DivGradient, DivGradientOp<CUDAContext>); namespace { template <typename T> __global__ void reduce_sum_like_post1(const T* g_idata, T* g_odata, int pre, int N) { int n = blockIdx.x * blockDim.x + threadIdx.x; if (n >= N) { return; } float sum = 0.0; for (int i = 0; i < pre; ++i) { sum += convert::To<T, float>(g_idata[i * N + n]); } g_odata[n] = convert::To<float, T>(sum); } template <typename T> void device_reduce( const T* d_in, T* d_out, int N, Tensor<CUDAContext>* buffer, CUDAContext* context) { // Determine temporary device storage requirements size_t temp_storage_bytes = 0; cub::DeviceReduce::Sum( NULL, temp_storage_bytes, d_in, d_out, N, context->cuda_stream()); auto buffer_size = temp_storage_bytes / sizeof(T); buffer_size += temp_storage_bytes % sizeof(T) != 0 ? 1 : 0; buffer->Resize(buffer_size); void* d_temp_storage = static_cast<void*>(buffer->template mutable_data<T>()); // Run sum-reduction cub::DeviceReduce::Sum( d_temp_storage, temp_storage_bytes, d_in, d_out, N, context->cuda_stream()); } template <> void device_reduce<float16>( const float16* in, float16* out, int N, Tensor<CUDAContext>* buffer, CUDAContext* context) { auto buffer_size = 1; if (buffer->size() != buffer_size) { buffer->Resize(buffer_size); math::Set<float16, CUDAContext>( N, convert::To<float,float16>(1.), buffer->mutable_data<float16>(), context); } CUBLAS_ENFORCE(cublasDotEx( context->cublas_handle(), N, in, CUDA_R_16F, 1, buffer->data<float16>(), CUDA_R_16F, 0, out, CUDA_R_16F, CUDA_R_32F)); } template <typename T, int BLOCK_THREADS> __global__ void reduce_sum_like(const T* g_idata, T* g_odata, int pre, int N, int post) { int n = blockIdx.x; float sum = 0.0; int limit = pre * post; for (int i = threadIdx.x; i < limit; i += blockDim.x) { int curPre = i / post; int curPost = i % post; sum += convert::To<T, float>(g_idata[curPre * N * post + n * post + curPost]); } // uses a shared memory reduction within block typedef cub::BlockReduce<float, BLOCK_THREADS> BlockReduceT; // Shared memory __shared__ typename BlockReduceT::TempStorage temp_storage; float aggregate = BlockReduceT(temp_storage).Sum(sum); if (threadIdx.x == 0) { g_odata[n] = convert::To<float, T>(aggregate); } } } // namespace template <> template <typename T> bool SumReduceLikeOp<CUDAContext>::DoRunWithType() { const auto& A = Input(0); const auto& B = Input(1); auto* C = Output(0); auto count = A.size(); CAFFE_ENFORCE(&B != C, "In-place is not allowed."); C->ResizeLike(B); const T* Adata = A.template data<T>(); auto* Cdata = C->template mutable_data<T>(); if (B.size() == 1) { device_reduce<T>(Adata, Cdata, count, &sum_buffer_, &context_); } else { size_t pre, n, post; std::tie(pre, n, post) = calculate_broadcast_sizes(A, B, axis_); // because we check shape(B) \in shape(A) before, // post and pre cannot be 1 at same time if (post == 1) { reduce_sum_like_post1<T> <<<CAFFE_GET_BLOCKS(n), CAFFE_CUDA_NUM_THREADS, 0, context_.cuda_stream()>>>(Adata, Cdata, pre, n); } else { if (post >= 128) { reduce_sum_like<T, 512> <<<n, 512, 0, context_.cuda_stream()>>>(Adata, Cdata, pre, n, post); } else if (post >= 64) { reduce_sum_like<T, 128> <<<n, 128, 0, context_.cuda_stream()>>>(Adata, Cdata, pre, n, post); } else if (post >= 32) { reduce_sum_like<T, 64> <<<n, 64, 0, context_.cuda_stream()>>>(Adata, Cdata, pre, n, post); } else { reduce_sum_like<T, 32> <<<n, 32, 0, context_.cuda_stream()>>>(Adata, Cdata, pre, n, post); } } } return true; } template <> bool SumReduceLikeOp<CUDAContext>::RunOnDevice() { return DispatchHelper<TensorTypes<float, float16>>::call(this, Input(0)); } REGISTER_CUDA_OPERATOR(SumReduceLike, SumReduceLikeOp<CUDAContext>); namespace { template <bool is_scaler, typename T, typename M> __global__ void binary_add_kernel(const int N, const T* a, const T* b, T* r) { CUDA_1D_KERNEL_LOOP(idx, N) { r[idx] = convert::To<M, T>( convert::To<T, M>(a[idx]) + convert::To<T, M>(is_scaler ? b[0] : b[idx])); } } template <bool no_post, typename T, typename M> __global__ void binary_add_kernel_broadcast( const T* a, const T* b, T* r, const int pre, const int post, const int n) { CUDA_1D_KERNEL_LOOP(idx, no_post ? pre * n : pre * post * n) { r[idx] = convert::To<M, T>( convert::To<T, M>(a[idx]) + convert::To<T, M>(no_post ? b[idx % n] : b[(idx / post) % n])); } } } // namespace // Actual Add operator, because the above macros are read-only. class CUDAAddOp final : public Operator<CUDAContext> { public: CUDAAddOp(const OperatorDef& operator_def, Workspace* ws) : Operator<CUDAContext>(operator_def, ws), OP_SINGLE_ARG(bool, "broadcast", enable_broadcast_, 0), OP_SINGLE_ARG(int, "axis", axis_, -1), OP_SINGLE_ARG(string, "axis_str", axis_str_, ""), OP_SINGLE_ARG(string, "order", order_, "NCHW") { // Figure out the correct axis to use. if (enable_broadcast_) { if (axis_ != -1) { // Get axis from an explicit axis argument. CAFFE_ENFORCE_EQ( axis_str_.size(), 0, "Args axis and axis_str cannot be used simultaneously."); } else if (axis_str_.size()) { // Get the axis index semantically. CAFFE_ENFORCE_EQ( axis_str_.size(), 1, "Unsupported axis string", axis_str_); size_t semantic_axis_ = order_.find(axis_str_); CAFFE_ENFORCE_NE( semantic_axis_, string::npos, "Unrecognizable axis string ", axis_str_, " from order string ", order_); axis_ = semantic_axis_; } } else { CAFFE_ENFORCE( axis_ == -1 && axis_str_.size() == 0, "Do not specify axis or axis_str if broadcast is not enabled."); } } ~CUDAAddOp() {} template <typename T, typename M> bool DoRunWithType() { auto& X0 = Input(0); auto& X1 = Input(1); auto* output = Output(0); output->ResizeLike(X0); const T* X0data = X0.template data<T>(); const T* X1data = X1.template data<T>(); T* outputData = output->template mutable_data<T>(); if (!enable_broadcast_) { CAFFE_ENFORCE_EQ( X0.dims(), X1.dims(), "Dimension mismatch - did you forget to set broadcast=1?"); binary_add_kernel<false, T, M><<< CAFFE_GET_BLOCKS(X0.size()), CAFFE_CUDA_NUM_THREADS, 0, context_.cuda_stream()>>>(X0.size(), X0data, X1data, outputData); } else if (X1.size() == 1) { binary_add_kernel<true, T, M><<< CAFFE_GET_BLOCKS(X0.size()), CAFFE_CUDA_NUM_THREADS, 0, context_.cuda_stream()>>>(X0.size(), X0data, X1data, outputData); } else { size_t pre, n, post; std::tie(pre, n, post) = calculate_broadcast_sizes(X0, X1, axis_); if (post == 1) { binary_add_kernel_broadcast<true, T, M><<< CAFFE_GET_BLOCKS(pre * n), CAFFE_CUDA_NUM_THREADS, 0, context_.cuda_stream()>>>(X0data, X1data, outputData, pre, post, n); } else { binary_add_kernel_broadcast<false, T, M><<< CAFFE_GET_BLOCKS(pre * post * n), CAFFE_CUDA_NUM_THREADS, 0, context_.cuda_stream()>>>(X0data, X1data, outputData, pre, post, n); } } return true; } bool RunOnDevice() override { if (Input(0).IsType<float>()) { return DoRunWithType<float, float>(); } else if (Input(0).IsType<float16>()) { return DoRunWithType<float16, float>(); } else if (Input(0).IsType<int32_t>()) { return DoRunWithType<int32_t, int32_t>(); } else if (Input(0).IsType<int64_t>()) { return DoRunWithType<int64_t, int64_t>(); } else { return false; } } private: bool enable_broadcast_; int axis_; string axis_str_; string order_; }; REGISTER_CUDA_OPERATOR(Add, CUDAAddOp); } // namespace caffe2
the_stack
//#include "thrust/detail/device_ptr.inl" __global__ void findAdjacencySizesKernel(int size, int *adjIndexes, int *output) { int idx = blockIdx.x * blockDim.x + threadIdx.x; if(idx < size) { output[idx] = adjIndexes[idx + 1] - adjIndexes[idx]; } } __global__ void allocateNodesKernel(int size, int *adjIndexes, int *adjacency, int *partIn, int *partOut, int *aggregated) { int idx = blockIdx.x * blockDim.x + threadIdx.x; if(idx < size) { if(aggregated[idx] == 0) { int start = adjIndexes[idx]; int end = adjIndexes[idx + 1]; // Storage for possible aggregations. int candidates[10]; int candidateCounts[10]; for(int i = 0; i < 10; i++) { candidates[i] = -1; candidateCounts[i] = 0; } // Going through neighbors to aggregate: for(int i = start; i < end; i++) { int candidate = partIn[ adjacency[i] ]; if(candidate != -1) { for(int j = 0; j < 10 && candidate != -1; j++) { if(candidates[j] == -1) { candidates[j] = candidate; candidateCounts[j] = 1; } else { if(candidates[j] == candidate) { candidateCounts[j] += 1; candidate = -1; } } } } } // Finding the most adjacent aggregate and adding node to it: int addTo = candidates[0]; int count = candidateCounts[0]; for(int i = 1; i < 10; i++) { if(candidateCounts[i] > count) { count = candidateCounts[i]; addTo = candidates[i]; } } partOut[idx] = addTo; if(addTo != -1) { aggregated[idx] = 1; } } } } __global__ void findPartIndicesKernel(int size, int *array, int *partIndices) { int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx < size) { int value = array[idx]; int nextValue = (idx != size - 1) ? array[idx + 1] : -1; if (value != nextValue) { partIndices[value + 1] = idx + 1; } } } __global__ void findPartIndicesNegStartKernel(int size, int *array, int *partIndices) { int idx = blockIdx.x * blockDim.x + threadIdx.x + 1; if(idx < size) { int value = array[idx]; int nextValue = array[idx + 1]; if(value != nextValue) partIndices[value + 1] = idx; } } __global__ void fillWithIndexKernel(int size, int *array) { int idx = blockIdx.x * blockDim.x + threadIdx.x; if(idx < size) { array[idx] = idx; } } __global__ void getInversePermutationKernel(int size, int *original, int *inverse) { int idx = blockIdx.x * blockDim.x + threadIdx.x; if(idx < size) { inverse[original[idx]] = idx; } } __global__ void permuteInitialAdjacencyKernel(int size, int *adjIndexesIn, int *adjacencyIn, int *permutedAdjIndexesIn, int *permutedAdjacencyIn, int *ipermutation, int *fineAggregate) { int idx = blockIdx.x * blockDim.x + threadIdx.x; if(idx < size) { int oldBegin = adjIndexesIn[ipermutation[idx]]; int oldEnd = adjIndexesIn[ipermutation[idx] + 1]; int runSize = oldEnd - oldBegin; int newBegin = permutedAdjIndexesIn[idx]; //int newEnd = permutedAdjIndexesIn[idx + 1]; //int newRunSize = newEnd - newBegin; //printf("Thread %d is copying from %d through %d into %d through %d\n", idx, oldBegin, oldEnd, newBegin, newEnd); // Transfer old adjacency into new, while changing node id's with partition id's for(int i = 0; i < runSize; i++) { permutedAdjacencyIn[newBegin + i] = fineAggregate[ adjacencyIn[oldBegin + i] ]; } } } __global__ void getInducedGraphNeighborCountsKernel(int size, int *aggregateIdx, int *adjIndexesOut, int *permutedAdjIndexes, int *permutedAdjacencyIn) { int idx = blockIdx.x * blockDim.x + threadIdx.x; if(idx < size) { int Begin = permutedAdjIndexes[ aggregateIdx[idx] ]; int End = permutedAdjIndexes[ aggregateIdx[idx + 1] ]; // Sort each section of the adjacency: for(int i = Begin; i < End - 1; i++) { for(int ii = i + 1; ii < End; ii++) { if(permutedAdjacencyIn[i] < permutedAdjacencyIn[ii]) { int temp = permutedAdjacencyIn[i]; permutedAdjacencyIn[i] = permutedAdjacencyIn[ii]; permutedAdjacencyIn[ii] = temp; } } } // Scan through the sorted adjacency to get the condensed adjacency: int neighborCount = 1; if(permutedAdjacencyIn[Begin] == idx) neighborCount = 0; for(int i = Begin + 1; i < End; i++) { if(permutedAdjacencyIn[i] != permutedAdjacencyIn[i - 1] && permutedAdjacencyIn[i] != idx) { permutedAdjacencyIn[neighborCount + Begin] = permutedAdjacencyIn[i]; neighborCount++; } } // Store the size adjIndexesOut[idx] = neighborCount; } } __global__ void fillCondensedAdjacencyKernel(int size, int *aggregateIdx, int *adjIndexesOut, int *adjacencyOut, int *permutedAdjIndexesIn, int *permutedAdjacencyIn) { int idx = blockIdx.x * blockDim.x + threadIdx.x; if(idx < size) { int oldBegin = permutedAdjIndexesIn[ aggregateIdx[idx] ]; int newBegin = adjIndexesOut[idx]; int runSize = adjIndexesOut[idx + 1] - newBegin; // Copy adjacency over for(int i = 0; i < runSize; i++) { adjacencyOut[newBegin + i] = permutedAdjacencyIn[oldBegin + i]; } } } __global__ void fillPartitionLabelKernel(int size, int *coarseAggregate, int *fineAggregateSort, int *partitionLabel) { int idx = blockIdx.x * blockDim.x + threadIdx.x; if(idx < size) { partitionLabel[idx] = coarseAggregate[ fineAggregateSort[idx] ]; } } __global__ void getAggregateStartIndicesKernel(int size, int *fineAggregateSort, int *aggregateRemapIndex) { int idx = blockIdx.x * blockDim.x + threadIdx.x; if(idx < size) { if(idx == 0 || fineAggregateSort[idx] != fineAggregateSort[idx - 1]) { aggregateRemapIndex[fineAggregateSort[idx]] = idx; } } } __global__ void remapAggregateIdxKernel(int size, int *fineAggregateSort, int *aggregateRemapId) { int idx = blockIdx.x * blockDim.x + threadIdx.x; if(idx < size) { fineAggregateSort[idx] = aggregateRemapId[fineAggregateSort[idx]]; } } __global__ void mapAdjacencyToBlockKernel(int size, int *adjIndexes, int *adjacency, int *adjacencyBlockLabel, int *blockMappedAdjacency, int *fineAggregate) { int idx = blockIdx.x * blockDim.x + threadIdx.x; if(idx < size) { int begin = adjIndexes[idx]; int end = adjIndexes[idx + 1]; int thisBlock = fineAggregate[idx]; // Fill block labeled adjacency and block mapped adjacency vectors for(int i = begin; i < end; i++) { int neighbor = fineAggregate[adjacency[i]]; if(thisBlock == neighbor) { adjacencyBlockLabel[i] = -1; blockMappedAdjacency[i] = -1; } else { adjacencyBlockLabel[i] = thisBlock; blockMappedAdjacency[i] = neighbor; } } } } __global__ void removeRuntyPartsKernel(int size, int *partition, int *removeStencil, int *subtractions) { int idx = blockIdx.x * blockDim.x + threadIdx.x; if(idx < size) { int currentNode = partition[idx]; if(removeStencil[currentNode] == 1) partition[idx] = -1; else partition[idx] -= subtractions[currentNode]; } } __global__ void accumulatedPartSizesKernel(int size, int *part, int *weights, int *accumulatedSize) { int idx = blockIdx.x * blockDim.x + threadIdx.x; if(idx == size - 1) accumulatedSize[part[idx]] = weights[idx]; if(idx < size - 1) { int thisPart = part[idx]; if(thisPart != part[idx + 1]) accumulatedSize[thisPart] = weights[idx]; } } __global__ void unaccumulatedPartSizesKernel(int size, int *accumulatedSize, int *sizes) { int idx = blockIdx.x * blockDim.x + threadIdx.x; if(idx == 0) sizes[idx] = accumulatedSize[0]; else if(idx < size) { sizes[idx] = accumulatedSize[idx] - accumulatedSize[idx - 1]; } } __global__ void findDesirabilityKernel(int size, int optimalSize, int *adjIndexes, int *adjacency, int *partition, int *partSizes, int *nodeWeights, int *swap_to, int *swap_from, int *swap_index, float *desirability) { int idx = blockIdx.x * blockDim.x + threadIdx.x; if(idx < size) { int currentPart = partition[idx]; int currentPartSize = partSizes[currentPart]; int nodeSize = nodeWeights[idx]; int selfAdjacency = 0; int addTo = -1; float bestDesirability = 0; // The currentWeightFactor is higher the farther the count is from average float currentWeightFactor = (float)abs(currentPartSize - optimalSize) / optimalSize; // The self improvement is a measure of how much better this partitions size will be if the node is gone. float selfImprovement = (abs(currentPartSize - optimalSize) - abs((currentPartSize - nodeSize) - optimalSize)) * currentWeightFactor; if(selfImprovement > 0) { int start = adjIndexes[idx]; int end = adjIndexes[idx + 1]; // Arrays to store info about neighboring aggregates int candidates[10]; int candidateCounts[10]; for(int i = 0; i < 10; i++) { candidates[i] = -1; candidateCounts[i] = 0; } // Going through the neighbors: for(int i = start; i < end; i++) { int candidate = partition[ adjacency[i] ]; if(candidate == currentPart) selfAdjacency++; else for(int j = 0; j < 10; j++) { if(candidate != -1 && candidates[j] == -1) { candidates[j] = candidate; candidateCounts[j] = 1; candidate = -1; } else if(candidates[j] == candidate) { candidateCounts[j] += 1; candidate = -1; } } } // Finding the best possible swap: for(int i = 1; i < 10; i++) { if(candidates[i] != -1) { int neighborPart = candidates[i]; int neighborPartSize = partSizes[neighborPart]; float neighborWeightFactor = (float)abs(neighborPartSize - optimalSize) / optimalSize; float neighborImprovement = ((float)(abs(neighborPartSize - optimalSize) - abs((neighborPartSize + nodeSize) - optimalSize))) * neighborWeightFactor; // Combining with self improvement to get net neighborImprovement += selfImprovement; // Multiplying by adjacency factor neighborImprovement *= (float)candidateCounts[i] / selfAdjacency; if(neighborImprovement > bestDesirability) { addTo = neighborPart; bestDesirability = neighborImprovement; } } } } swap_from[idx] = currentPart; swap_index[idx] = idx; swap_to[idx] = addTo; desirability[idx] = bestDesirability; } } __global__ void makeSwapsKernel(int size, int *partition, int *partSizes, int *nodeWeights, int *swap_to, int *swap_from, int *swap_index, float *desirability) { int idx = blockIdx.x * blockDim.x + threadIdx.x; if(idx == size - 1) { if(desirability[idx] > .1) { int swapTo = swap_to[idx]; int swapFrom = swap_from[idx]; int swapIndex = swap_index[idx]; int nodeWeight = nodeWeights[swapIndex]; partition[swapIndex] = swapTo; atomicAdd(&partSizes[swapTo], nodeWeight); atomicAdd(&partSizes[swapFrom], -nodeWeight); //printf("Swapping node: %d, %d from part: %d, %d to part: %d, %d desirability: %f\n", swapIndex, nodeWeight, swapFrom, partSizes[swapFrom], swapTo, partSizes[swapTo], desirability[idx]); } } else if(idx < size - 1) { if(desirability[idx] > .1 && swap_from[idx] != swap_from[idx + 1]) { int swapTo = swap_to[idx]; int swapFrom = swap_from[idx]; int swapIndex = swap_index[idx]; int nodeWeight = nodeWeights[swapIndex]; partition[swapIndex] = swapTo; atomicAdd(&partSizes[swapTo], nodeWeight); atomicAdd(&partSizes[swapFrom], -nodeWeight); //printf("Swapping node: %d, %d from part: %d, %d to part: %d, %d desirability: %f\n", swapIndex, nodeWeight, swapFrom, partSizes[swapFrom], swapTo, partSizes[swapTo], desirability[idx]); } } } void misHelpers::getMIS(IdxVector_d &adjIndexes, IdxVector_d &adjacency, IdxVector_d &misStencil, int depth) { IdxVector_d mtxValues_d(adjacency.size(), 1); int vSize = adjIndexes.size() - 1; // IdxVector_h tmp = misStencil; // for(int i=0; i<10; i++) // { // printf("%d\n", tmp[i]); // } // Creating a matrix with the vectors supplied: // devMtx graphy(vSize, vSize, adjacency.size()); // graphy.column_indices = adjacency; // cusp::detail::offsets_to_indices(adjIndexes , graphy.row_indices); // graphy.values = mtxValues_d; // cusp::print(graphy); cusp::csr_matrix<int, int, cusp::device_memory> graphy(vSize, vSize, adjacency.size()); graphy.column_indices = adjacency; graphy.row_offsets = adjIndexes; cusp::graph::maximal_independent_set(graphy, misStencil, depth); // tmp = misStencil; // for(int i=0; i<50; i++) // { // printf("%d\n", tmp[i]); // } graphy.resize(0, 0, 0); } void misHelpers::getAdjacency(TriMesh *meshPtr, IdxVector_d &adjIndexes, IdxVector_d &adjacency) { int vSize = meshPtr->vertices.size(); meshPtr->need_neighbors(); // Finding total size of adjacency list: int adjacencySize = 0; for(int i = 0; i < vSize; i++) { adjacencySize += meshPtr->neighbors[i].size(); } // Vectors to hold the adjacency: IdxVector_h adjIndexes_h(vSize + 1); IdxVector_h adjacency_h(adjacencySize); // Populating adjacency adjIndexes_h[0] = 0; ; int nextOffset = 0; for(int i = 0; i < vSize; i++) { for(int j = 0; j < meshPtr->neighbors[i].size(); j++) adjacency_h[nextOffset + j] = meshPtr->neighbors[i][j]; nextOffset += meshPtr->neighbors[i].size(); adjIndexes_h[i + 1] = nextOffset; } // Copying to device vectors adjIndexes = adjIndexes_h; adjacency = adjacency_h; } void misHelpers::getAdjacency(TetMesh *meshPtr, IdxVector_d &adjIndexes, IdxVector_d &adjacency) { int vSize = meshPtr->vertices.size(); meshPtr->need_neighbors(); // Finding total size of adjacency list: int adjacencySize = 0; for(int i = 0; i < vSize; i++) { adjacencySize += meshPtr->neighbors[i].size(); } // Vectors to hold the adjacency: IdxVector_h adjIndexes_h(vSize + 1); IdxVector_h adjacency_h(adjacencySize); // Populating adjacency adjIndexes_h[0] = 0; ; int nextOffset = 0; for(int i = 0; i < vSize; i++) { for(int j = 0; j < meshPtr->neighbors[i].size(); j++) adjacency_h[nextOffset + j] = meshPtr->neighbors[i][j]; nextOffset += meshPtr->neighbors[i].size(); adjIndexes_h[i + 1] = nextOffset; } // Copying to device vectors adjIndexes = adjIndexes_h; adjacency = adjacency_h; } void misHelpers::aggregateGraph(int minSize, int depth, IdxVector_d &adjIndexes, IdxVector_d &adjacency, IdxVector_d &partIn, bool verbose) { int size = adjIndexes.size() - 1; // Get an MIS for the graph: // getMIS(adjIndexes, adjacency, partIn, depth); randomizedMIS(adjIndexes, adjacency, partIn, depth); IdxVector_d aggregated = partIn; IdxVector_d partOut; // Prefix sum to number aggregate roots: thrust::inclusive_scan(partIn.begin(), partIn.end(), partIn.begin()); if( verbose ) std::cout << "Finished aggregateGraph inclusive_scan." << std::endl; int misCount = partIn.back(); // DataRecorder::Add("Fine MIS Count", misCount); // Transform non root nodes to -1 thrust::transform(partIn.begin(), partIn.end(), aggregated.begin(), partIn.begin(), ifLabelOne()); partOut = partIn; if( verbose ) std::cout << "Finished aggregateGraph thrust::transform." << std::endl; // Preparing to call aggregate kernel: int *partIn_d; // Pointer to partIn vector int *partOut_d; // Pointer to partOut vector int *adjIndexes_d; // Pointer to adjacency indexes int *adjacency_d; // Pointer to adjacency int *aggregated_d; // Pointer to aggregated bool complete = false; // Indicates whether all nodes are aggregated partIn_d = thrust::raw_pointer_cast(&partIn[0]); partOut_d = thrust::raw_pointer_cast(&partOut[0]); adjIndexes_d = thrust::raw_pointer_cast(&adjIndexes[0]); adjacency_d = thrust::raw_pointer_cast(&adjacency[0]); aggregated_d = thrust::raw_pointer_cast(&aggregated[0]); // Figuring out block sizes for kernel call: int blockSize = 256; int nBlocks = size / blockSize + (size % blockSize == 0 ? 0 : 1); int loopCounter = 0; if( verbose ) std::cout << "Starting aggregateGraph loop." << std::endl; while(!complete) { // Allocating nodes allocateNodesKernel <<< nBlocks, blockSize >>> (size, adjIndexes_d, adjacency_d, partIn_d, partOut_d, aggregated_d); // Copying partOut to partIn partIn = partOut; // Checking if done int unallocatedNodes = thrust::count(aggregated.begin(), aggregated.end(), 0); if(unallocatedNodes == 0) { // Trying to remove parts below minSize complete = removeRuntyParts(minSize, partIn); // If stuff was removed get the aggregated labeling again if(!complete) { thrust::transform(partIn.begin(), partIn.end(), aggregated.begin(), findAggregated()); partOut = partIn; } } if( verbose ) { bool doPrint = false; if( loopCounter < 10 ) { doPrint = true; } else if( loopCounter < 50 ) { if( loopCounter % 5 == 0 ) doPrint = true; } else if( loopCounter < 250 ) { if( loopCounter % 10 == 0 ) doPrint = true; } else { if( loopCounter % 100 == 0 ) doPrint = true; } if( doPrint ) std::cout << "Finished loop " << loopCounter << " in aggregateGraph loop with " << unallocatedNodes << " unallocated nodes." << std::endl; loopCounter++; } } } void misHelpers::aggregateWeightedGraph(int maxSize, int fullSize, int depth, IdxVector_d &adjIndexes, IdxVector_d &adjacency, IdxVector_d &partIn, IdxVector_d &nodeWeights, bool verbose) { int size = adjIndexes.size() - 1; // Get an MIS for the graph: // getMIS(adjIndexes, adjacency, partIn, depth); randomizedMIS(adjIndexes, adjacency, partIn, depth); IdxVector_d aggregated = partIn; IdxVector_d partOut; // Prefix sum to number aggregate roots: thrust::inclusive_scan(partIn.begin(), partIn.end(), partIn.begin()); if( verbose ) std::cout << "Finished inclusive_scan." << std::endl; int misCount = partIn.back(); // DataRecorder::Add("Coarse MIS Count", misCount); // Transform non root nodes to -1 thrust::transform(partIn.begin(), partIn.end(), aggregated.begin(), partIn.begin(), ifLabelOne()); partOut = partIn; if( verbose ) std::cout << "Finished thrust::transform." << std::endl; // Preparing to call aggregate kernel: int *partIn_d; // Pointer to partIn vector int *partOut_d; // Pointer to partOut vector int *adjIndexes_d; // Pointer to adjacency indexes int *adjacency_d; // Pointer to adjacency int *aggregated_d; // Pointer to aggregated bool complete = false; // Indicates whether all nodes are aggregated partIn_d = thrust::raw_pointer_cast(&partIn[0]); partOut_d = thrust::raw_pointer_cast(&partOut[0]); adjIndexes_d = thrust::raw_pointer_cast(&adjIndexes[0]); adjacency_d = thrust::raw_pointer_cast(&adjacency[0]); aggregated_d = thrust::raw_pointer_cast(&aggregated[0]); // Figuring out block sizes for kernel call: int blockSize = 256; int nBlocks = size / blockSize + (size % blockSize == 0 ? 0 : 1); bool firstTime = true; int counter = 0; while(!complete) { counter++; // Allocating nodes allocateNodesKernel << < nBlocks, blockSize >> > (size, adjIndexes_d, adjacency_d, partIn_d, partOut_d, aggregated_d); // Copying partOut to partIn partIn = partOut; // Checking if done int unallocatedNodes = thrust::count(aggregated.begin(), aggregated.end(), 0); if (verbose) printf("unallocatedNodes = %d\n", unallocatedNodes); if(unallocatedNodes == 0) { // Removing small partitions: if(!firstTime || misCount < 10) { // Making sure there are no oversized partitions restrictPartitionSize(maxSize, fullSize, adjIndexes, adjacency, partIn, nodeWeights); complete = true; } else { firstTime = false; removeRuntyPartitions(fullSize, partIn, nodeWeights, verbose); thrust::transform(partIn.begin(), partIn.end(), aggregated.begin(), findAggregated()); partOut = partIn; } } } cudaThreadSynchronize(); } void misHelpers::restrictPartitionSize(int maxSize, int fullSize, IdxVector_d &adjIndexes, IdxVector_d &adjacency, IdxVector_d &partition, IdxVector_d &nodeWeights, bool verbose) { int size = partition.size(); IntVector_d partSizes, swap_to(size), swap_from(size), swap_index(size); FloatVector_d desirability(size); // Finding the weighted sizes of each partition getWeightedPartSizes(partition, nodeWeights, partSizes); // Finding the average size: int averageSize = fullSize / partSizes.size(); // Finding largest part size: int largestPart = thrust::reduce(partSizes.begin(), partSizes.end(), (int)0, thrust::maximum<int>()); while(largestPart > maxSize) { if (verbose) printf("largestPart = %d\n", largestPart); // Calculating the desirability of the nodes: int *adjIndexes_d = thrust::raw_pointer_cast(&adjIndexes[0]); int *adjacency_d = thrust::raw_pointer_cast(&adjacency[0]); int *partition_d = thrust::raw_pointer_cast(&partition[0]); int *partSizes_d = thrust::raw_pointer_cast(&partSizes[0]); int *swap_to_d = thrust::raw_pointer_cast(&swap_to[0]); int *swap_from_d = thrust::raw_pointer_cast(&swap_from[0]); int *nodeWeights_d = thrust::raw_pointer_cast(&nodeWeights[0]); int *swap_index_d = thrust::raw_pointer_cast(&swap_index[0]); float *desirability_d = thrust::raw_pointer_cast(&desirability[0]); int blockSize = 256; int nBlocks = size / blockSize + (size % blockSize == 0 ? 0 : 1); findDesirabilityKernel << < nBlocks, blockSize >> >(size, averageSize, adjIndexes_d, adjacency_d, partition_d, partSizes_d, nodeWeights_d, swap_to_d, swap_from_d, swap_index_d, desirability_d); // Sort the results with (swap_from, desirability) as the key thrust::sort_by_key(thrust::make_zip_iterator( thrust::make_tuple(swap_from.begin(), desirability.begin())), thrust::make_zip_iterator( thrust::make_tuple(swap_from.end(), desirability.end())), thrust::make_zip_iterator( thrust::make_tuple(swap_to.begin(), swap_index.begin()))); // Perform good swaps makeSwapsKernel << < nBlocks, blockSize >> >(size, partition_d, partSizes_d, nodeWeights_d, swap_to_d, swap_from_d, swap_index_d, desirability_d); // Repeat until no overlarge aggregates are found largestPart = thrust::reduce(partSizes.begin(), partSizes.end(), (int)0, thrust::maximum<int>()); } } void misHelpers::getSizes(IdxVector_d &adjIndexes, IdxVector_d &sizes) { int size = adjIndexes.size() - 1; sizes.resize(size, 0); int *adjIndexes_d = thrust::raw_pointer_cast(&adjIndexes[0]); int *sizes_d = thrust::raw_pointer_cast(&sizes[0]); // Figuring out block sizes for kernel call: int blockSize = 256; int nBlocks = size / blockSize + (size % blockSize == 0 ? 0 : 1); // Calling kernel to find sizes: findAdjacencySizesKernel << < nBlocks, blockSize >> > (size, adjIndexes_d, sizes_d); } bool misHelpers::removeRuntyParts(int minSize, IdxVector_d &partition) { // Getting the part sizes: IdxVector_d partSizes; getPartSizes(partition, partSizes); // Converting part sizes to a removeStencil thrust::device_vector<int> removeStencil(partSizes.size()); thrust::transform(partSizes.begin(), partSizes.end(), removeStencil.begin(), labelLessThan(minSize)); // Checking if anything will be removed: int removed = thrust::count(removeStencil.begin(), removeStencil.end(), 1); // DataRecorder::Add("Runty parts Removed", removed); // If nothing to remove, just return. if(removed == 0) return true; // Getting a vector with how much to subtract from non-removed aggregates thrust::device_vector<int> subtractions(partSizes.size()); thrust::inclusive_scan(removeStencil.begin(), removeStencil.end(), subtractions.begin()); // Figuring out block sizes for kernel call: int size = partition.size(); int blockSize = 256; int nBlocks = size / blockSize + (size % blockSize == 0 ? 0 : 1); // Getting pointers for the call: int *partition_d = thrust::raw_pointer_cast(&partition[0]); int *removeStencil_d = thrust::raw_pointer_cast(&removeStencil[0]); int *subtractions_d = thrust::raw_pointer_cast(&subtractions[0]); // Calling kernel to find sizes: removeRuntyPartsKernel <<< nBlocks, blockSize >>> (size, partition_d, removeStencil_d, subtractions_d); return false; } bool misHelpers::removeRuntyPartitions(int fullSize, IdxVector_d &partition, IdxVector_d &nodeWeights, bool verbose) { // Getting the part sizes: IntVector_d partSizes; getWeightedPartSizes(partition, nodeWeights, partSizes); // Figuring out the appropriate removal size double averageSize = (double)fullSize / partSizes.size(); if (verbose) printf("Partition average size is %f\n", averageSize); int threshold = (int)(averageSize * .7); // Converting part sizes to a removeStencil thrust::device_vector<int> removeStencil(partSizes.size()); thrust::transform(partSizes.begin(), partSizes.end(), removeStencil.begin(), labelLessThan(threshold)); // Checking if anything will be removed: int removed = thrust::count(removeStencil.begin(), removeStencil.end(), 1); // Getting a vector with how much to subtract from non-removed aggregates thrust::device_vector<int> subtractions(partSizes.size()); thrust::inclusive_scan(removeStencil.begin(), removeStencil.end(), subtractions.begin()); // Figuring out block sizes for kernel call: int size = partition.size(); int blockSize = 256; int nBlocks = size / blockSize + (size % blockSize == 0 ? 0 : 1); // Getting pointers for the call: int *partition_d = thrust::raw_pointer_cast(&partition[0]); int *removeStencil_d = thrust::raw_pointer_cast(&removeStencil[0]); int *subtractions_d = thrust::raw_pointer_cast(&subtractions[0]); // Calling kernel to find sizes: removeRuntyPartsKernel << < nBlocks, blockSize >> > (size, partition_d, removeStencil_d, subtractions_d); return false; } void misHelpers::getPartSizes(IdxVector_d &partition, IdxVector_d &partSizes) { // Make a copy of the partition vector to mess with: IdxVector_d temp = partition; // Sort the copy and find largest element thrust::sort(temp.begin(), temp.end()); int maxPart = temp[temp.size() - 1]; // Creating a new array size IdxVector_d partIndices(maxPart + 2, 0); // Figuring out block sizes for kernel call: int size = partition.size(); int blockSize = 256; int nBlocks = size / blockSize + (size % blockSize == 0 ? 0 : 1); // Getting pointers int *temp_d = thrust::raw_pointer_cast(&temp[0]); int *partIndices_d = thrust::raw_pointer_cast(&partIndices[0]); // Calling kernel to find indices for each part: findPartIndicesKernel << < nBlocks, blockSize >> > (size, temp_d, partIndices_d); // Getting the sizes: getSizes(partIndices, partSizes); } void misHelpers::getPartSizes(IdxVector_d &partition, IdxVector_d &partSizes, IdxVector_d &partIndices) { // Make a copy of the partition vector to mess with: IdxVector_d temp = partition; // Sort the copy and find largest element thrust::sort(temp.begin(), temp.end()); int maxPart = temp[temp.size() - 1]; // Creating a new array size partIndices.resize(maxPart + 2, 0); // Figuring out block sizes for kernel call: int size = partition.size(); int blockSize = 256; int nBlocks = size / blockSize + (size % blockSize == 0 ? 0 : 1); // Getting pointers int *temp_d = thrust::raw_pointer_cast(&temp[0]); int *partIndices_d = thrust::raw_pointer_cast(&partIndices[0]); // Calling kernel to find indices for each part: findPartIndicesKernel << < nBlocks, blockSize >> > (size, temp_d, partIndices_d); // Getting the sizes: getSizes(partIndices, partSizes); } void misHelpers::getPartIndices(IdxVector_d& sortedPartition, IdxVector_d& partIndices) { // Sizing the array: int maxPart = sortedPartition[sortedPartition.size() - 1]; partIndices.resize(maxPart + 2); thrust::fill(partIndices.begin(), partIndices.end(), 0); // Figuring out block sizes for kernel call: int size = sortedPartition.size(); int blockSize = 256; int nBlocks = size / blockSize + (size % blockSize == 0 ? 0 : 1); // Getting pointers int *sortedPartition_d = thrust::raw_pointer_cast(&sortedPartition[0]); int *partIndices_d = thrust::raw_pointer_cast(&partIndices[0]); // Calling kernel to find indices for each part: findPartIndicesKernel << < nBlocks, blockSize >> > (size, sortedPartition_d, partIndices_d); partIndices[partIndices.size() - 1] = size; } void misHelpers::getPartIndicesNegStart(IdxVector_d& sortedPartition, IdxVector_d& partIndices) { // Sizing the array: int maxPart = sortedPartition[sortedPartition.size() - 1]; partIndices.resize(maxPart + 2, 0); // Figuring out block sizes for kernel call: int size = sortedPartition.size(); int blockSize = 256; int nBlocks = size / blockSize + (size % blockSize == 0 ? 0 : 1); // Getting pointers int *sortedPartition_d = thrust::raw_pointer_cast(&sortedPartition[0]); int *partIndices_d = thrust::raw_pointer_cast(&partIndices[0]); // Calling kernel to find indices for each part: findPartIndicesNegStartKernel << < nBlocks, blockSize >> > (size, sortedPartition_d, partIndices_d); partIndices[partIndices.size() - 1] = size - 1; } void misHelpers::fillWithIndex(IdxVector_d &tofill) { // Figuring out block sizes for kernel call: int size = tofill.size(); int blockSize = 256; int nBlocks = size / blockSize + (size % blockSize == 0 ? 0 : 1); int *tofill_d = thrust::raw_pointer_cast(&tofill[0]); fillWithIndexKernel << < nBlocks, blockSize >> > (size, tofill_d); } void misHelpers::getInversePermutation(IdxVector_d &original, IdxVector_d &inverse) { int size = original.size(); inverse.resize(size, -1); // Get pointers: int *original_d = thrust::raw_pointer_cast(&original[0]); int *inverse_d = thrust::raw_pointer_cast(&inverse[0]); // Figuring out block sizes for kernel call: int blockSize = 256; int nBlocks = size / blockSize + (size % blockSize == 0 ? 0 : 1); // Calling kernel: getInversePermutationKernel << < nBlocks, blockSize >> > (size, original_d, inverse_d); } void misHelpers::permuteInitialAdjacency(IdxVector_d &adjIndexesIn, IdxVector_d &adjacencyIn, IdxVector_d &permutedAdjIndexesIn, IdxVector_d &permutedAdjacencyIn, IdxVector_d &ipermutation, IdxVector_d &fineAggregate) { int size = adjIndexesIn.size() - 1; // Get pointers:adjacencyIn int *adjIndexesIn_d = thrust::raw_pointer_cast(&adjIndexesIn[0]); int *adjacencyIn_d = thrust::raw_pointer_cast(&adjacencyIn[0]); int *permutedAdjIndexesIn_d = thrust::raw_pointer_cast(&permutedAdjIndexesIn[0]); int *permutedAdjacencyIn_d = thrust::raw_pointer_cast(&permutedAdjacencyIn[0]); int *ipermutation_d = thrust::raw_pointer_cast(&ipermutation[0]); int *fineAggregate_d = thrust::raw_pointer_cast(&fineAggregate[0]); // Figuring out block sizes for kernel call: int blockSize = 256; int nBlocks = size / blockSize + (size % blockSize == 0 ? 0 : 1); // Calling kernel: permuteInitialAdjacencyKernel << < nBlocks, blockSize >> > (size, adjIndexesIn_d, adjacencyIn_d, permutedAdjIndexesIn_d, permutedAdjacencyIn_d, ipermutation_d, fineAggregate_d); } void misHelpers::getInducedGraphNeighborCounts(IdxVector_d &aggregateIdx, IdxVector_d &adjIndexesOut, IdxVector_d &permutedAdjIndexesIn, IdxVector_d &permutedAdjacencyIn) { int size = aggregateIdx.size() - 1; // Get pointers:adjacencyIn int *aggregateIdx_d = thrust::raw_pointer_cast(&aggregateIdx[0]); int *adjIndexesOut_d = thrust::raw_pointer_cast(&adjIndexesOut[0]); int *permutedAdjIndexesIn_d = thrust::raw_pointer_cast(&permutedAdjIndexesIn[0]); int *permutedAdjacencyIn_d = thrust::raw_pointer_cast(&permutedAdjacencyIn[0]); // Figuring out block sizes for kernel call: int blockSize = 256; int nBlocks = size / blockSize + (size % blockSize == 0 ? 0 : 1); // Calling kernel: getInducedGraphNeighborCountsKernel << < nBlocks, blockSize >> > (size, aggregateIdx_d, adjIndexesOut_d, permutedAdjIndexesIn_d, permutedAdjacencyIn_d); } void misHelpers::fillCondensedAdjacency(IdxVector_d& aggregateIdx, IdxVector_d& adjIndexesOut, IdxVector_d& adjacencyOut, IdxVector_d& permutedAdjIndexesIn, IdxVector_d& permutedAdjacencyIn) { int size = adjIndexesOut.size() - 1; // Get pointers:adjacencyIn int *aggregateIdx_d = thrust::raw_pointer_cast(&aggregateIdx[0]); int *adjIndexesOut_d = thrust::raw_pointer_cast(&adjIndexesOut[0]); int *adjacencyOut_d = thrust::raw_pointer_cast(&adjacencyOut[0]); int *permutedAdjIndexesIn_d = thrust::raw_pointer_cast(&permutedAdjIndexesIn[0]); int *permutedAdjacencyIn_d = thrust::raw_pointer_cast(&permutedAdjacencyIn[0]); // Figuring out block sizes for kernel call: int blockSize = 256; int nBlocks = size / blockSize + (size % blockSize == 0 ? 0 : 1); // Calling kernel: fillCondensedAdjacencyKernel << < nBlocks, blockSize >> > (size, aggregateIdx_d, adjIndexesOut_d, adjacencyOut_d, permutedAdjIndexesIn_d, permutedAdjacencyIn_d); } void misHelpers::fillPartitionLabel(IdxVector_d& coarseAggregate, IdxVector_d& fineAggregateSort, IdxVector_d& partitionLabel) { int size = partitionLabel.size(); // Get pointers:adjacencyIn int *coarseAggregate_d = thrust::raw_pointer_cast(&coarseAggregate[0]); int *fineAggregateSort_d = thrust::raw_pointer_cast(&fineAggregateSort[0]); int *partitionLabel_d = thrust::raw_pointer_cast(&partitionLabel[0]); // Figuring out block sizes for kernel call: int blockSize = 256; int nBlocks = size / blockSize + (size % blockSize == 0 ? 0 : 1); // Calling kernel: fillPartitionLabelKernel << < nBlocks, blockSize >> > (size, coarseAggregate_d, fineAggregateSort_d, partitionLabel_d); } void misHelpers::getAggregateStartIndices(IdxVector_d& fineAggregateSort, IdxVector_d& aggregateRemapIndex) { int size = fineAggregateSort.size(); // Get pointers:adjacencyIn int *fineAggregateSort_d = thrust::raw_pointer_cast(&fineAggregateSort[0]); int *aggregateRemapIndex_d = thrust::raw_pointer_cast(&aggregateRemapIndex[0]); // Figuring out block sizes for kernel call: int blockSize = 256; int nBlocks = size / blockSize + (size % blockSize == 0 ? 0 : 1); // Calling kernel: getAggregateStartIndicesKernel << < nBlocks, blockSize >> > (size, fineAggregateSort_d, aggregateRemapIndex_d); } void misHelpers::remapAggregateIdx(IdxVector_d& fineAggregateSort, IdxVector_d& aggregateRemapId) { int size = fineAggregateSort.size(); // Get pointers:adjacencyIn int *fineAggregateSort_d = thrust::raw_pointer_cast(&fineAggregateSort[0]); int *aggregateRemapId_d = thrust::raw_pointer_cast(&aggregateRemapId[0]); // Figuring out block sizes for kernel call: int blockSize = 256; int nBlocks = size / blockSize + (size % blockSize == 0 ? 0 : 1); // Calling kernel: remapAggregateIdxKernel << < nBlocks, blockSize >> > (size, fineAggregateSort_d, aggregateRemapId_d); } void misHelpers::mapAdjacencyToBlock(IdxVector_d &adjIndexes, IdxVector_d &adjacency, IdxVector_d &adjacencyBlockLabel, IdxVector_d &blockMappedAdjacency, IdxVector_d &fineAggregate) { int size = adjIndexes.size() - 1; // Get pointers:adjacencyIn int *adjIndexes_d = thrust::raw_pointer_cast(&adjIndexes[0]); int *adjacency_d = thrust::raw_pointer_cast(&adjacency[0]); int *adjacencyBlockLabel_d = thrust::raw_pointer_cast(&adjacencyBlockLabel[0]); int *blockMappedAdjacency_d = thrust::raw_pointer_cast(&blockMappedAdjacency[0]); int *fineAggregate_d = thrust::raw_pointer_cast(&fineAggregate[0]); // Figuring out block sizes for kernel call: int blockSize = 256; int nBlocks = size / blockSize + (size % blockSize == 0 ? 0 : 1); // Calling kernel: mapAdjacencyToBlockKernel << < nBlocks, blockSize >> > (size, adjIndexes_d, adjacency_d, adjacencyBlockLabel_d, blockMappedAdjacency_d, fineAggregate_d); } void misHelpers::getInducedGraph(IdxVector_d &adjIndexesIn, IdxVector_d &adjacencyIn, IdxVector_d &partitionLabel, IdxVector_d &adjIndexesOut, IdxVector_d &adjacencyOut) { // Declaring temporary vectors: IdxVector_d adjacencyBlockLabel, blockMappedAdjacency; adjacencyBlockLabel.resize(adjacencyIn.size(), 0); blockMappedAdjacency.resize(adjacencyIn.size(), 0); // Get the blocklabeled adjacency: misHelpers::mapAdjacencyToBlock(adjIndexesIn, adjacencyIn, adjacencyBlockLabel, blockMappedAdjacency, partitionLabel); // Zip up the block label and block mapped vectors and sort: thrust::sort(thrust::make_zip_iterator( thrust::make_tuple(adjacencyBlockLabel.begin(), blockMappedAdjacency.begin())), thrust::make_zip_iterator( thrust::make_tuple(adjacencyBlockLabel.end(), blockMappedAdjacency.end()))); // Remove Duplicates and resize: int newSize = thrust::unique( thrust::make_zip_iterator( thrust::make_tuple(adjacencyBlockLabel.begin(), blockMappedAdjacency.begin())), thrust::make_zip_iterator( thrust::make_tuple(adjacencyBlockLabel.end(), blockMappedAdjacency.end()))) - thrust::make_zip_iterator(thrust::make_tuple(adjacencyBlockLabel.begin(), blockMappedAdjacency.begin())); adjacencyBlockLabel.resize(newSize); blockMappedAdjacency.resize(newSize); misHelpers::getPartIndicesNegStart(adjacencyBlockLabel, adjIndexesOut); adjacencyOut.resize(blockMappedAdjacency.size() - 1); thrust::copy(blockMappedAdjacency.begin() + 1, blockMappedAdjacency.end(), adjacencyOut.begin()); } void misHelpers::getWeightedPartSizes(IdxVector_d &partition, IdxVector_d &nodeWeights, IntVector_d &partSizes) { // Make copies to mess with IntVector_d part(partition.begin(), partition.end()); IntVector_d weights(nodeWeights.begin(), nodeWeights.end()); // Sorting temp vectors together thrust::sort_by_key(part.begin(), part.end(), weights.begin()); // Getting prefix sum of values thrust::inclusive_scan(weights.begin(), weights.end(), weights.begin()); // Another temp vector for accumulated size at last nodes IntVector_d accumulatedSize(part[part.size() - 1] + 1); // Preparing to call kernel to fill accumulated size vector int size = part.size(); int *part_d = thrust::raw_pointer_cast(&part[0]); int *weights_d = thrust::raw_pointer_cast(&weights[0]); int *accumulatedSize_d = thrust::raw_pointer_cast(&accumulatedSize[0]); // Figuring out block sizes for kernel call: int blockSize = 256; int nBlocks = size / blockSize + (size % blockSize == 0 ? 0 : 1); accumulatedPartSizesKernel << < nBlocks, blockSize >> > (size, part_d, weights_d, accumulatedSize_d); // Calling kernel to get the unaccumulated part sizes: size = accumulatedSize.size(); nBlocks = size / blockSize + (size % blockSize == 0 ? 0 : 1); partSizes.resize(size); int *sizes_d = thrust::raw_pointer_cast(&partSizes[0]); unaccumulatedPartSizesKernel << < nBlocks, blockSize >> > (size, accumulatedSize_d, sizes_d); } void misHelpers::checkPartConnectivity(int partCount, IdxVector_d partition, IdxVector_d adjIndexes, IdxVector_d adjacency, char *message) { // Debugging check on part connectivity: std::cout << message << "\n"; std::vector<int> nodesToExplore, exploredNodes; for(int i = 0; i < partCount; i++) { nodesToExplore.clear(); exploredNodes.clear(); // Find a node in the part int rootId = -1; for(int j = 0; j < partition.size(); j++) { if(partition[j] == i) { rootId = j; break; } } // Explore out from the part int start = adjIndexes[rootId], end = adjIndexes[rootId + 1]; for(int n = start; n < end; n++) { int neighbor = adjacency[n]; if(partition[neighbor] == i) nodesToExplore.push_back(neighbor); } exploredNodes.push_back(rootId); // Iterating through everything: while(nodesToExplore.size() > 0) { // Popping off the last node to explore and checking if it's done int node = nodesToExplore.back(); nodesToExplore.pop_back(); // Checking if the node has been explored: bool exploredAlready = false; for(int q = 0; q < exploredNodes.size(); q++) if(exploredNodes[q] == node) exploredAlready = true; if(!exploredAlready) { int start = adjIndexes[node], end = adjIndexes[node + 1]; for(int n = start; n < end; n++) { int neighbor = adjacency[n]; if(partition[neighbor] == i) { nodesToExplore.push_back(neighbor); //printf("\tAdded %d a neighbor of %d to explore list for part %d", neighbor, node, i); } } exploredNodes.push_back(node); //printf("\tAdded %d to explored for part %d\n", node, i); } } // Now checking to see if there were any unreachable nodes. for(int j = 0; j < partition.size(); j++) { if(partition[j] == i) { bool found = false; for(int q = 0; q < exploredNodes.size(); q++) if(exploredNodes[q] == j) { found = true; break; } if(!found) { printf("Could not reach node %d in part %d from root %d\n", j, i, rootId); printf("\tExplored nodes:"); for(int g = 0; g < exploredNodes.size(); g++) printf(" %3d", exploredNodes[g]); printf("\n"); } } } } // Pausing int dummy = 0; std::cin >> dummy; if(dummy == 1) { int partToCheck; std::cin >> partToCheck; for(int i = 0; i < partition.size(); i++) { if(partition[i] == partToCheck) { int start = adjIndexes[i], end = adjIndexes[i + 1]; printf("Node %d is in partition %d\n\t", i, partToCheck); for(int j = start; j < end; j++) { int neighbor = adjacency[j]; printf(" %4d ", neighbor); } printf("\n"); } } } } void misHelpers::remapInducedGraph(IdxVector_d &adjIndexes, IdxVector_d &adjacency, IdxVector_d &partition) { IdxVector_d tempCoarseAggregate = partition; IdxVector_d aggregateLabel = adjacency; IdxVector_d permutedAdjacency = adjacency; IdxVector_d coarsePermutation = partition; IdxVector_d coarseIPermutation; // Get the inverse permutation for the re-mapping misHelpers::fillWithIndex(coarsePermutation); thrust::stable_sort_by_key(tempCoarseAggregate.begin(), tempCoarseAggregate.end(), coarsePermutation.begin()); misHelpers::getInversePermutation(coarsePermutation, coarseIPermutation); // Map the adjacency according to the inverse permutation misHelpers::mapAdjacencyToBlock(adjIndexes, adjacency, aggregateLabel, permutedAdjacency, coarseIPermutation); thrust::sort_by_key(aggregateLabel.begin(), aggregateLabel.end(), permutedAdjacency.begin()); // Copy from the temp to the real adjacency thrust::copy(permutedAdjacency.begin(), permutedAdjacency.end(), adjacency.begin()); // Find the adjIndexes for the new adjacency misHelpers::getPartIndices(aggregateLabel, adjIndexes); }
the_stack
#include "opticalFlowUtils.hpp" #include "backend/common/vectorOps.hpp" #include "backend/cuda/deviceBuffer.hpp" #include "backend/cuda/deviceStream.hpp" #include "gpu/image/sampling.hpp" #include "gpu/image/imageOps.hpp" #include "gpu/image/blur.hpp" #include "gpu/stream.hpp" #include "cuda/error.hpp" #include "cuda/util.hpp" #include "util/imageProcessingGPUUtils.hpp" #include "backend/cuda/core1/kernels/samplingKernel.cu" #include "parallax/gpu/cuda/kernels/patchDifferenceFunction.cu" #define REGULARIZATION_TILE_WIDTH 16 #define KERNEL_SIZE 25 #define AREA_SIZE (REGULARIZATION_TILE_WIDTH + 2 * KERNEL_SIZE) #define TILE_WIDTH 16 #define CUDABLOCKSIZE 512 namespace VideoStitch { namespace Util { __global__ void backwardCoordLookupKernel(const int2 inputOffset, int2 inputSize, const float2* g_iCoord, const int2 outputOffset, int2 outputSize, float2* g_oCoord) { const int x = blockIdx.x * blockDim.x + threadIdx.x; const int y = blockIdx.y * blockDim.y + threadIdx.y; if (x < inputSize.x && y < inputSize.y) { const float2 iCoord = g_iCoord[y * inputSize.x + x]; if (iCoord.x != INVALID_FLOW_VALUE) { const float2 inputCoord = make_float2(inputOffset.x + x, inputOffset.y + y); const float2 outputCoord = inputCoord + iCoord; const float2 relativeOutputCoord = outputCoord - make_float2(outputOffset.x, outputOffset.y); if (inRange(relativeOutputCoord, outputSize)) { g_oCoord[int(round(relativeOutputCoord.y)) * outputSize.x + int(round(relativeOutputCoord.x))] = make_float2(0, 0) - iCoord; } } } } Status OpticalFlow::backwardCoordLookup(const int2 inputOffset, const int2 inputSize, const GPU::Buffer<const float2> inputCoordBuffer, const int2 outputOffset, const int2 outputSize, GPU::Buffer<float2> outputCoordBuffer, GPU::Stream gpuStream) { cudaStream_t stream = gpuStream.get(); dim3 dimBlock(TILE_WIDTH, TILE_WIDTH, 1); dim3 dimGrid((unsigned)Cuda::ceilDiv(inputSize.x, dimBlock.x), (unsigned)Cuda::ceilDiv(inputSize.y, dimBlock.y), 1); backwardCoordLookupKernel<<<dimGrid, dimBlock, 0, stream>>>(inputOffset, inputSize, inputCoordBuffer.get(), outputOffset, outputSize, outputCoordBuffer.get()); return CUDA_STATUS; } struct BilinearFlowInterpolation { typedef float2 Type; static inline __device__ float2 interpolate(float2 a, float2 b, float2 c, float2 d) { if (a.x == INVALID_FLOW_VALUE || b.x == INVALID_FLOW_VALUE || c.x == INVALID_FLOW_VALUE || d.x == INVALID_FLOW_VALUE) { return make_float2(INVALID_FLOW_VALUE, INVALID_FLOW_VALUE); } else { return (9.0f / 16.0f * a + 3.0f / 16.0f * (b + c) + 1.0f / 16.0f * d); } } }; Status OpticalFlow::upsampleFlow22(GPU::Buffer<float2> dst, GPU::Buffer<const float2> src, std::size_t dstWidth, std::size_t dstHeight, bool wrap, unsigned blockSize, GPU::Stream stream) { const unsigned srcWidth = ((unsigned)dstWidth + 1) / 2; const unsigned srcHeight = ((unsigned)dstHeight + 1) / 2; const dim3 dimBlock(blockSize, blockSize, 1); const dim3 dimGrid((unsigned)Cuda::ceilDiv(srcWidth, dimBlock.x), (unsigned)Cuda::ceilDiv(srcHeight, dimBlock.y), 1); if (wrap) { Image::upsample22Kernel<Image::HWrapBoundary<float2>, BilinearFlowInterpolation> <<<dimGrid, dimBlock, (blockSize + 2) * (blockSize + 2) * sizeof(float2), stream.get()>>>( dst.get(), src.get(), (unsigned)dstWidth, (unsigned)dstHeight, srcWidth, srcHeight); } else { Image::upsample22Kernel<Image::ExtendBoundary<float2>, BilinearFlowInterpolation> <<<dimGrid, dimBlock, (blockSize + 2) * (blockSize + 2) * sizeof(float2), stream.get()>>>( dst.get(), src.get(), (unsigned)dstWidth, (unsigned)dstHeight, srcWidth, srcHeight); } return CUDA_STATUS; } __global__ void outwardCoordLookupKernel(const int2 offset1, int2 size1, const float2* g_iCoord, const int2 offset0, const int2 size0, const uint32_t* g_iBuffer, uint32_t* g_oBuffer) { const int x = blockIdx.x * blockDim.x + threadIdx.x; const int y = blockIdx.y * blockDim.y + threadIdx.y; if (x < size1.x && y < size1.y) { g_oBuffer[y * size1.x + x] = 0; const float2 iCoord = g_iCoord[y * size1.x + x]; if (iCoord.x != INVALID_FLOW_VALUE) { // const float2 outputCoord = make_float2(offset1.x - offset0.x + x + iCoord.x, offset1.y - offset0.y + y + // iCoord.y); if (inRange(outputCoord, size0)) { g_oBuffer[y * size1.x + x] = g_iBuffer[y * size0.x + x]; } } } } Status OpticalFlow::outwardCoordLookup(const int2 offset1, const int2 size1, const GPU::Buffer<const float2> coordBuffer, const int2 offset0, const int2 size0, const GPU::Buffer<const uint32_t> inputBuffer, GPU::Buffer<uint32_t> outputBuffer, GPU::Stream gpuStream) { cudaStream_t stream = gpuStream.get(); dim3 dimBlock(TILE_WIDTH, TILE_WIDTH, 1); dim3 dimGrid((unsigned)Cuda::ceilDiv(size1.x, dimBlock.x), (unsigned)Cuda::ceilDiv(size1.y, dimBlock.y), 1); outwardCoordLookupKernel<<<dimGrid, dimBlock, 0, stream>>>(offset1, size1, coordBuffer.get(), offset0, size0, inputBuffer.get(), outputBuffer.get()); return CUDA_STATUS; } __global__ void forwardCoordLookupKernel(const int2 inputOffset, int2 inputSize, const float2* g_iCoord, const int2 originalOffset, const int2 originalSize, const float2* g_originalCoord, const int2 outputOffset, int2 outputSize, float2* g_oCoord) { const int x = blockIdx.x * blockDim.x + threadIdx.x; const int y = blockIdx.y * blockDim.y + threadIdx.y; if (x < inputSize.x && y < inputSize.y) { const float2 iCoord = g_iCoord[y * inputSize.x + x]; if (iCoord.x != INVALID_FLOW_VALUE) { const float2 inputCoord = make_float2(inputOffset.x + x, inputOffset.y + y); const float2 outputCoord = inputCoord + iCoord; const float2 relativeOutputCoord = outputCoord - make_float2(outputOffset.x, outputOffset.y); const float2 originalOutputcoord = outputCoord - make_float2(originalOffset.x, originalOffset.y); // Check the original flow value, in exist at all float2 originalFlow = make_float2(INVALID_FLOW_VALUE, INVALID_FLOW_VALUE); if (inRange(originalOutputcoord, originalSize)) { originalFlow = g_originalCoord[int(originalOutputcoord.y) * originalSize.x + int(originalOutputcoord.x)]; } if (inRange(relativeOutputCoord, outputSize)) { if (originalFlow.x == INVALID_FLOW_VALUE) { g_oCoord[int(round(relativeOutputCoord.y)) * outputSize.x + int(round(relativeOutputCoord.x))] = make_float2(0, 0) - iCoord; } else { g_oCoord[int(round(relativeOutputCoord.y)) * outputSize.x + int(round(relativeOutputCoord.x))] = originalFlow; } } } } } Status OpticalFlow::forwardCoordLookup(const int2 inputOffset, const int2 inputSize, const GPU::Buffer<const float2> inputCoordBuffer, const int2 originalOffset, const int2 originalSize, const GPU::Buffer<const float2> originalCoordBuffer, const int2 outputOffset, const int2 outputSize, GPU::Buffer<float2> outputCoordBuffer, GPU::Stream gpuStream) { cudaStream_t stream = gpuStream.get(); dim3 dimBlock(TILE_WIDTH, TILE_WIDTH, 1); dim3 dimGrid((unsigned)Cuda::ceilDiv(inputSize.x, dimBlock.x), (unsigned)Cuda::ceilDiv(inputSize.y, dimBlock.y), 1); forwardCoordLookupKernel<<<dimGrid, dimBlock, 0, stream>>>(inputOffset, inputSize, inputCoordBuffer.get(), originalOffset, originalSize, originalCoordBuffer.get(), outputOffset, outputSize, outputCoordBuffer.get()); return CUDA_STATUS; } __global__ void putOverOriginalFlowKernel(const int2 inputOffset, const int2 inputSize, const float2* const inputFlow, const int2 outputOffset, const int2 outputSize, float2* outputFlow) { uint32_t x = blockIdx.x * blockDim.x + threadIdx.x; uint32_t y = blockIdx.y * blockDim.y + threadIdx.y; if (x >= inputSize.x || y >= inputSize.y) return; float2 input = inputFlow[y * inputSize.x + x]; if (input.x != INVALID_FLOW_VALUE) { int2 outputCoord = make_int2(x, y) + inputOffset - outputOffset; if (inRange(outputCoord, outputSize)) { outputFlow[outputCoord.y * outputSize.x + outputCoord.x] = input; } } } Status OpticalFlow::putOverOriginalFlow(const int2 inputOffset, const int2 inputSize, const GPU::Buffer<const float2> inputFlow, const int2 outputOffset, const int2 outputSize, GPU::Buffer<float2> outputFlow, GPU::Stream gpuStream) { cudaStream_t stream = gpuStream.get(); dim3 dimBlock(TILE_WIDTH, TILE_WIDTH, 1); dim3 dimGrid((unsigned)Cuda::ceilDiv(inputSize.x, dimBlock.x), (unsigned)Cuda::ceilDiv(inputSize.y, dimBlock.y), 1); putOverOriginalFlowKernel<<<dimGrid, dimBlock, 0, stream>>>(inputOffset, inputSize, inputFlow.get(), outputOffset, outputSize, outputFlow.get()); return CUDA_STATUS; } __global__ void identityFlowKernel(const bool normalizedFlow, const int2 size, float2* coordBuffer) { uint32_t x = blockIdx.x * blockDim.x + threadIdx.x; uint32_t y = blockIdx.y * blockDim.y + threadIdx.y; if (x >= size.x || y >= size.y) return; if (normalizedFlow) { coordBuffer[y * size.x + x] = make_float2(float(x) / size.x, float(y) / size.y); } else { coordBuffer[y * size.x + x] = make_float2(x, y); } } Status OpticalFlow::generateIdentityFlow(const int2 size, GPU::Buffer<float2> coordBuffer, GPU::Stream gpuStream, const bool normalizedFlow) { cudaStream_t stream = gpuStream.get(); dim3 dimBlock(TILE_WIDTH, TILE_WIDTH, 1); dim3 dimGrid((unsigned)Cuda::ceilDiv(size.x, TILE_WIDTH), (unsigned)Cuda::ceilDiv(size.y, TILE_WIDTH), 1); identityFlowKernel<<<dimGrid, dimBlock, 0, stream>>>(normalizedFlow, size, coordBuffer.get()); return CUDA_STATUS; } __global__ void transformOffsetToFlowKernel(const int2 size0, const int2 offset0, const int2 offset1, const float2* const inputBuffer, float2* const outputBuffer) { int x = blockIdx.x * blockDim.x + threadIdx.x; int y = blockIdx.y * blockDim.y + threadIdx.y; if (x >= size0.x || y >= size0.y) return; float2 offset = inputBuffer[y * size0.x + x]; if (offset.x != INVALID_FLOW_VALUE) { outputBuffer[y * size0.x + x] = offset + make_float2(x + offset0.x - offset1.x, y + offset0.y - offset1.y); } else { outputBuffer[y * size0.x + x] = offset; } } Status OpticalFlow::transformOffsetToFlow(const int2 size0, const int2 offset0, const int2 offset1, GPU::Buffer<float2> buffer, GPU::Stream gpuStream) { return transformOffsetToFlow(size0, offset0, offset1, buffer, buffer, gpuStream); } Status OpticalFlow::transformOffsetToFlow(const int2 size0, const int2 offset0, const int2 offset1, const GPU::Buffer<const float2> inputBuffer, GPU::Buffer<float2> outputBuffer, GPU::Stream gpuStream) { cudaStream_t stream = gpuStream.get(); dim3 dimBlock(TILE_WIDTH, TILE_WIDTH, 1); dim3 dimGrid((unsigned)Cuda::ceilDiv(size0.x, TILE_WIDTH), (unsigned)Cuda::ceilDiv(size0.y, TILE_WIDTH), 1); transformOffsetToFlowKernel<<<dimGrid, dimBlock, 0, stream>>>(size0, offset0, offset1, inputBuffer.get(), outputBuffer.get()); return CUDA_STATUS; } __global__ void transformFlowToOffsetKernel(const int2 size0, const int2 offset0, const int2 offset1, const float2* const inputBuffer, float2* const outputBuffer) { int x = blockIdx.x * blockDim.x + threadIdx.x; int y = blockIdx.y * blockDim.y + threadIdx.y; if (x >= size0.x || y >= size0.y) return; float2 flow = inputBuffer[y * size0.x + x]; if (flow.x != INVALID_FLOW_VALUE) outputBuffer[y * size0.x + x] = flow - make_float2(x + offset0.x - offset1.x, y + offset0.y - offset1.y); else outputBuffer[y * size0.x + x] = flow; } Status OpticalFlow::transformFlowToOffset(const int2 size0, const int2 offset0, const int2 offset1, const GPU::Buffer<const float2> inputBuffer, GPU::Buffer<float2> outputBuffer, GPU::Stream gpuStream) { cudaStream_t stream = gpuStream.get(); dim3 dimBlock(TILE_WIDTH, TILE_WIDTH, 1); dim3 dimGrid((unsigned)Cuda::ceilDiv(size0.x, TILE_WIDTH), (unsigned)Cuda::ceilDiv(size0.y, TILE_WIDTH), 1); transformFlowToOffsetKernel<<<dimGrid, dimBlock, 0, stream>>>(size0, offset0, offset1, inputBuffer.get(), outputBuffer.get()); return CUDA_STATUS; } __global__ void coordLookupKernel(int outputWidth, int outputHeight, const float2* g_iCoord, int inputWidth, int inputHeight, const uint32_t* g_idata, uint32_t* g_odata) { // calculate normalized texture coordinates const int x = blockIdx.x * blockDim.x + threadIdx.x; const int y = blockIdx.y * blockDim.y + threadIdx.y; if (x < outputWidth && y < outputHeight) { float2 uv = g_iCoord[y * outputWidth + x]; int2 size = make_int2(inputWidth, inputHeight); g_odata[y * outputWidth + x] = Image::bilinearLookup<Image::BilinearLookupRGBAtoRGBA>(uv, size, g_idata); } } Status OpticalFlow::coordLookup(const int outputWidth, const int outputHeight, const GPU::Buffer<const float2> coordBuffer, const int inputWidth, const int inputHeight, const GPU::Buffer<const uint32_t> inputBuffer, GPU::Buffer<uint32_t> outputBuffer, GPU::Stream gpuStream) { cudaStream_t stream = gpuStream.get(); dim3 dimBlock(TILE_WIDTH, TILE_WIDTH, 1); dim3 dimGrid((unsigned)Cuda::ceilDiv(outputWidth, dimBlock.x), (unsigned)Cuda::ceilDiv(outputHeight, dimBlock.y), 1); coordLookupKernel<<<dimGrid, dimBlock, 0, stream>>>(outputWidth, outputHeight, coordBuffer.get(), inputWidth, inputHeight, inputBuffer.get(), outputBuffer.get()); return CUDA_STATUS; } __global__ void mulFlowOperatorKernel(float2* dst, const float2 toMul, std::size_t size) { std::size_t i = (blockIdx.x + blockIdx.y * gridDim.x) * blockDim.x + threadIdx.x; if (i < size) { if (dst[i].x != INVALID_FLOW_VALUE) { dst[i] *= toMul; } } } Status OpticalFlow::mulFlowOperator(GPU::Buffer<float2> dst, const float2 toMul, std::size_t size, GPU::Stream stream) { dim3 dimBlock(CUDABLOCKSIZE); dim3 dimGrid(Cuda::compute2DGridForFlatBuffer(size, CUDABLOCKSIZE)); mulFlowOperatorKernel<<<dimGrid, dimBlock, 0, stream.get()>>>(dst.get(), toMul, size); return CUDA_STATUS; } __global__ void mulFlowOperatorKernel(float2* dst, const float2* src, const float2 toMul, std::size_t size) { std::size_t i = (blockIdx.x + blockIdx.y * gridDim.x) * blockDim.x + threadIdx.x; if (i < size) { if (dst[i].x != INVALID_FLOW_VALUE) { dst[i] = make_float2(src[i].x * toMul.x, src[i].y * toMul.y); } } } Status OpticalFlow::mulFlowOperator(GPU::Buffer<float2> dst, const GPU::Buffer<const float2> src, const float2 toMul, std::size_t size, GPU::Stream stream) { dim3 dimBlock(CUDABLOCKSIZE); dim3 dimGrid(Cuda::compute2DGridForFlatBuffer(size, CUDABLOCKSIZE)); mulFlowOperatorKernel<<<dimGrid, dimBlock, 0, stream.get()>>>(dst.get(), src.get(), toMul, size); return CUDA_STATUS; } __global__ void generateWeightKernel(const int kernelSize, const float sigmaDistance, float* kernelWeight) { int y = blockIdx.y * blockDim.x + threadIdx.y; int x = blockIdx.x * blockDim.y + threadIdx.x; if (x <= 2 * kernelSize && y <= 2 * kernelSize) { float maxDist = kernelSize * 1.4142; float distSpace = length(make_float2(x - kernelSize, y - kernelSize)) / maxDist; float weightDist = exp(-sigmaDistance * distSpace * distSpace); kernelWeight[y * (2 * kernelSize + 1) + x] = weightDist; } } __global__ void interCoordLookupKernel(const int warpWidth, const int interOffsetX, const int interOffsetY, const int interWidth, const int interHeight, const uint32_t* inputBuffer, const int coordWidth, const int coordHeight, const float2* coordBuffer, uint32_t* outputBuffer) { // calculate normalized texture coordinates const int x = blockIdx.x * blockDim.x + threadIdx.x; const int y = blockIdx.y * blockDim.y + threadIdx.y; if (x < coordWidth && y < coordHeight) { float2 coord = coordBuffer[y * coordWidth + x] - make_float2(interOffsetX, interOffsetY); if (coord.x >= 0 && coord.x < interWidth && coord.y >= 0 && coord.y < interHeight) { outputBuffer[y * coordWidth + x] = inputBuffer[int(coord.y) * interWidth + int(coord.x)]; } else { outputBuffer[y * coordWidth + x] = 0; } } } Status OpticalFlow::interCoordLookup(const int warpWidth, const int interOffsetX, const int interOffsetY, const int interWidth, const int interHeight, const GPU::Buffer<const uint32_t> inputBuffer, const int coordWidth, const int coordHeight, const GPU::Buffer<const float2> coordBuffer, GPU::Buffer<uint32_t> output, GPU::Stream gpuStream) { cudaStream_t stream = gpuStream.get(); dim3 dimBlock(TILE_WIDTH, TILE_WIDTH, 1); dim3 dimGrid((unsigned)Cuda::ceilDiv(coordWidth, dimBlock.x), (unsigned)Cuda::ceilDiv(coordHeight, dimBlock.y), 1); interCoordLookupKernel<<<dimGrid, dimBlock, 0, stream>>>(warpWidth, interOffsetX, interOffsetY, interWidth, interHeight, inputBuffer.get(), coordWidth, coordHeight, coordBuffer.get(), output.get()); return CUDA_STATUS; } __global__ void flowToRGBAKernel(const int2 size, const float2* inputBuffer, const int2 maxFlowValue, uint32_t* outputBuffer) { // calculate normalized texture coordinates const int x = blockIdx.x * blockDim.x + threadIdx.x; const int y = blockIdx.y * blockDim.y + threadIdx.y; if (x < size.x && y < size.y) { float2 input = inputBuffer[y * size.x + x]; if (abs(input.x - INVALID_FLOW_VALUE) < 1e-5) { outputBuffer[y * size.x + x] = 0; } else { outputBuffer[y * size.x + x] = Image::RGBA::pack((float(input.x) / maxFlowValue.x) * 255, (float(input.y) / maxFlowValue.y) * 255, 0, 255); } } } Status OpticalFlow::convertFlowToRGBA(const int2 size, const GPU::Buffer<const float2> src, const int2 maxFlowValue, GPU::Buffer<uint32_t> dst, GPU::Stream stream) { dim3 dimBlock(TILE_WIDTH, TILE_WIDTH, 1); const dim3 dimGrid((unsigned)Cuda::ceilDiv(size.x, dimBlock.x), (unsigned)Cuda::ceilDiv(size.y, dimBlock.y), 1); flowToRGBAKernel<<<dimGrid, dimBlock, 0, stream.get()>>>(size, src.get(), maxFlowValue, dst.get()); return CUDA_STATUS; } __global__ void setAlphaToFlowBufferKernel(const int2 size, const uint32_t* colorBuffer, float2* flowBuffer) { // calculate normalized texture coordinates const int x = blockIdx.x * blockDim.x + threadIdx.x; const int y = blockIdx.y * blockDim.y + threadIdx.y; if (x < size.x && y < size.y) { const unsigned index = y * size.x + x; if (Image::RGBA::a(colorBuffer[index]) == 0) { flowBuffer[index] = make_float2(INVALID_FLOW_VALUE, INVALID_FLOW_VALUE); } } } Status OpticalFlow::setAlphaToFlowBuffer(const int2 size, const GPU::Buffer<const uint32_t> colorBuffer, GPU::Buffer<float2> flowBuffer, GPU::Stream gpuStream) { dim3 dimBlock(TILE_WIDTH, TILE_WIDTH, 1); dim3 dimGrid((unsigned)Cuda::ceilDiv(size.x, dimBlock.x), (unsigned)Cuda::ceilDiv(size.y, dimBlock.y), 1); cudaStream_t stream = gpuStream.get(); setAlphaToFlowBufferKernel<<<dimGrid, dimBlock, 0, stream>>>(size, colorBuffer.get(), flowBuffer.get()); return CUDA_STATUS; } } // namespace Util } // namespace VideoStitch
the_stack
#include <algorithm> #include <unordered_map> #include "gloo/cuda_private.h" namespace gloo { namespace nccl { // Allocate a set of per-device streams used to serialize NCCL op scheduling. // These ensure concurrent NCCL ops are not interleaved across devices (i.e., // through priority scheduling), resulting in deadlock. Use a function-scope // static to avoid SIOF with the CUDA runtime. static CudaDeviceStreams& getNcclStreams() { static CudaDeviceStreams ncclStreams; return ncclStreams; } template <typename T> class NCCLContext { public: NCCLContext(const std::vector<int>& devices) : devices(devices) { // Initialze comms. Synchronize with conflicting CUDA and NCCL operations. comms.resize(devices.size()); std::lock_guard<std::mutex> lock(CudaShared::getMutex()); NCCL_CHECK(ncclCommInitAll(comms.data(), devices.size(), devices.data())); } ~NCCLContext() { /* * TODO(T30279827) Temporarily disable calling ncclCommDestroy * Calling ncclCommDestroy while program exiting is undefined * according to nvidia, and lead to segfault in NCCL 2 * (whether it is called before or after the CUDA runtime destructor). * Temporarily disable it in destructor to avoid segfault. * Following up with Nvidia for long term solution. */ /* for (auto i = 0; i < devices.size(); ++i) { CudaDeviceScope scope(devices[i]); { // Synchronize memory allocation with NCCL operations std::lock_guard<std::mutex> lock(CudaShared::getMutex()); ncclCommDestroy(comms[i]); } } */ } // Instances cannot be copied or copy-assigned NCCLContext(const NCCLContext&) = delete; NCCLContext& operator=(const NCCLContext&) = delete; const std::vector<int> devices; std::vector<ncclComm_t> comms; }; // Initializing NCCL communications is expensive. Allocate context as needed per // unique device set and cache for reuse. template <typename T> static std::shared_ptr<NCCLContext<T>> getNcclContext( const NCCLExecution<T>& ex) { static std::unordered_map<std::string, std::shared_ptr<NCCLContext<T>>> contexts; const auto key = ex.getKey(); { static std::mutex m; std::lock_guard<std::mutex> lock(m); if (!contexts[key]) { contexts[key] = std::make_shared<NCCLContext<T>>(ex.getDevices()); } } const auto context = contexts[key]; GLOO_ENFORCE_NE(context.get(), (void*)nullptr); return context; } template <typename T> NCCLExecution<T>::NCCLExecution(std::vector<NCCLElement<T>>&& elements) : elements(std::move(elements)) { // Allocate events to synchronize source, destination, and NCCL streams ncclEvents.resize(this->elements.size()); for (auto i = 0; i < this->elements.size(); i++) { CudaDeviceScope scope(this->elements[i].device); CUDA_CHECK(cudaEventCreateWithFlags( &ncclEvents[i], cudaEventDefault | cudaEventDisableTiming)); } } template <typename T> NCCLExecution<T>::~NCCLExecution() noexcept(false) { for (auto i = 0; i < this->elements.size(); i++) { CudaDeviceScope scope(this->elements[i].device); CUDA_CHECK(cudaEventDestroy(ncclEvents[i])); } } template <typename T> std::vector<int> NCCLExecution<T>::getDevices() const { std::vector<int> result; result.reserve(elements.size()); for (const auto& el : elements) { GLOO_ENFORCE( // Performing a linear search given small set of devices std::find(result.begin(), result.end(), el.device) == result.end(), "NCCL elements must map to unique devices"); result.push_back(el.device); } return result; } template <typename T> std::string NCCLExecution<T>::getKey() const { // Construct a key representing the order-dependent devices in this NCCL // execution. This is used to index into the NCCL context map and allows an // implicit association between elements[i].device and NCCLContext::comms[i] std::string result; for (const auto& el : elements) { result += std::to_string(el.device) + ","; } return result; } template <typename T> class ncclTypeWrapper; template <> class ncclTypeWrapper<int8_t> { public: static const ncclDataType_t type = ncclChar; }; template <> class ncclTypeWrapper<uint8_t> { public: static const ncclDataType_t type = ncclChar; }; template <> class ncclTypeWrapper<int32_t> { public: static const ncclDataType_t type = ncclInt; }; template <> class ncclTypeWrapper<int64_t> { public: static const ncclDataType_t type = ncclInt64; }; template <> class ncclTypeWrapper<uint64_t> { public: static const ncclDataType_t type = ncclUint64; }; template <> class ncclTypeWrapper<float16> { public: static const ncclDataType_t type = ncclHalf; }; template <> class ncclTypeWrapper<float> { public: static const ncclDataType_t type = ncclFloat; }; template <> class ncclTypeWrapper<double> { public: static const ncclDataType_t type = ncclDouble; }; template <typename T> NCCLOp<T>::NCCLOp(NCCLExecution<T>&& execution) : execution_(std::move(execution)), context_(getNcclContext(execution_)) {} template <typename T> void NCCLOp<T>::wait() { auto& elements = execution_.elements; for (auto i = 0; i < elements.size(); ++i) { CudaDeviceScope scope(elements[i].device); elements[i].dstStream.wait(); } } template <typename T> template <typename F> void NCCLOp<T>::runNCCL(F&& f) { const auto& elements = execution_.elements; const auto& ncclEvents = execution_.ncclEvents; const auto& comms = context_->comms; // Synchronize memory allocation with NCCL operations std::lock_guard<std::mutex> lock(CudaShared::getMutex()); #if NCCL_VERSION_MIN(2,0,0) NCCL_CHECK(ncclGroupStart()); #endif // Kick off the NCCL operation on each device for (auto i = 0; i < elements.size(); i++) { const auto& element = elements[i]; const auto& srcStream = element.srcStream.getStream(); const auto& dstStream = element.dstStream.getStream(); const auto& ncclStream = getNcclStreams()[element.device]; const auto& srcEvent = element.srcStream.getEvent(); const auto& dstEvent = element.dstStream.getEvent(); CudaDeviceScope scope(element.device); // Synchronize the source and destination with the NCCL stream. Record // events in the source and destination streams, and wait on these in the // NCCL streams. CUDA_CHECK(cudaEventRecord(srcEvent, srcStream)); CUDA_CHECK(cudaStreamWaitEvent(ncclStream, srcEvent, 0)); if (srcStream != dstStream) { CUDA_CHECK(cudaEventRecord(dstEvent, dstStream)); CUDA_CHECK(cudaStreamWaitEvent(ncclStream, dstEvent, 0)); } // Run the operation f(element, comms[i], ncclStream); } #if NCCL_VERSION_MIN(2,0,0) NCCL_CHECK(ncclGroupEnd()); #endif for (auto i = 0; i < elements.size(); ++i) { const auto& element = elements[i]; const auto& ncclStream = getNcclStreams()[element.device]; const auto& dstStream = element.dstStream.getStream(); const auto& dstEvent = element.dstStream.getEvent(); CudaDeviceScope scope(element.device); // Record an event in the NCCL stream signaling the operation is complete. // Synchronize with the destination stream. CUDA_CHECK(cudaEventRecord(ncclEvents[i], ncclStream)); CUDA_CHECK(cudaStreamWaitEvent(dstStream, ncclEvents[i], 0)); CUDA_CHECK(cudaEventRecord(dstEvent, dstStream)); } } template <typename T> void ReduceOp<T>::runAsync() { const auto op = op_; const auto root = root_; this->runNCCL([op, root]( const NCCLElement<T>& element, ncclComm_t comm, cudaStream_t stream) { NCCL_CHECK(ncclReduce( *element.src, *element.dst, element.src.getCount(), ncclTypeWrapper<T>::type, op, root, comm, stream)); }); } template <typename T> void AllreduceOp<T>::runAsync() { const auto op = op_; this->runNCCL([op]( const NCCLElement<T>& element, ncclComm_t comm, cudaStream_t stream) { NCCL_CHECK(ncclAllReduce( *element.src, *element.dst, element.src.getCount(), ncclTypeWrapper<T>::type, op, comm, stream)); }); } template <typename T> void ReduceScatterOp<T>::runAsync() { const auto op = op_; this->runNCCL([op]( const NCCLElement<T>& element, ncclComm_t comm, cudaStream_t stream) { NCCL_CHECK(ncclReduceScatter( *element.src, *element.dst, element.dst.getCount(), ncclTypeWrapper<T>::type, op, comm, stream)); }); } template <typename T> void BroadcastOp<T>::runAsync() { const int root = root_; this->runNCCL([root]( const NCCLElement<T>& element, ncclComm_t comm, cudaStream_t stream) { NCCL_CHECK(ncclBcast( *element.dst, element.dst.getCount(), ncclTypeWrapper<T>::type, root, comm, stream)); }); } template <typename T> void AllgatherOp<T>::runAsync() { this->runNCCL([]( const NCCLElement<T>& element, ncclComm_t comm, cudaStream_t stream) { #if NCCL_VERSION_MIN(2,0,0) NCCL_CHECK(ncclAllGather( *element.src, *element.dst, element.src.getCount(), ncclTypeWrapper<T>::type, comm, stream)); #else NCCL_CHECK(ncclAllGather( *element.src, element.src.getCount(), ncclTypeWrapper<T>::type, *element.dst, comm, stream)); #endif }); } #define DEFINE_NCCL_TYPES_AND_OPS(T) \ template class NCCLExecution<T>; \ template class NCCLContext<T>; \ template class NCCLOp<T>; \ \ template class ReduceOp<T>; \ template class AllreduceOp<T>; \ template class ReduceScatterOp<T>; \ template class BroadcastOp<T>; \ template class AllgatherOp<T>; DEFINE_NCCL_TYPES_AND_OPS(int8_t); DEFINE_NCCL_TYPES_AND_OPS(uint8_t); DEFINE_NCCL_TYPES_AND_OPS(int32_t); DEFINE_NCCL_TYPES_AND_OPS(int64_t); DEFINE_NCCL_TYPES_AND_OPS(uint64_t); DEFINE_NCCL_TYPES_AND_OPS(float16); DEFINE_NCCL_TYPES_AND_OPS(float); DEFINE_NCCL_TYPES_AND_OPS(double); } // namespace nccl } // namespace gloo
the_stack
#include "typedef.h" #include "cuda_rys_sp.h" void my_cuda_safe(hipError_t err, std::string word) { if(err != hipSuccess) { fprintf(stderr, "Error during %s: ", word.c_str()); // check for error hipDeviceSynchronize(); hipError_t error = hipGetLastError(); if(error != hipSuccess) { // print the CUDA error message and exit fprintf(stderr, "CUDA error: %s\n", hipGetErrorString(error)); exit(-1); } } } __device__ int cuda_ij2intindex(int i, int j) { if (i < j) { int t = i; i = j; j = t; } return i * (i + 1) / 2 + j; } __device__ int cuda_fact(int n){ int result = 1; for (int i = 2; i <= n; i++) result *= i; return result; } __device__ int cuda_binomial(int a, int b){ return cuda_fact(a)/(cuda_fact(b)*cuda_fact(a-b)); } __device__ void cuda_Roots(int n, float X, float roots[], float weights[]){ if (n <= 3) cuda_Root123(n,X, roots,weights); else if (n==4) cuda_Root4(X, roots,weights); else if (n==5) cuda_Root5(X, roots,weights); else cuda_Root6(n,X, roots,weights); return; } __device__ void cuda_Root123(int n, float X, float roots[], float weights[]){ float R12, PIE4, R22, W22, R13, R23, W23, R33, W33; float RT1=0,RT2=0,RT3=0,WW1=0,WW2=0,WW3=0; float F1,F2,E,T1,T2,T3,A1,A2,Y; R12 = 2.75255128608411E-01f; PIE4 = 7.85398163397448E-01f; R22 = 2.72474487139158E+00f; W22 = 9.17517095361369E-02f; R13 = 1.90163509193487E-01f; R23 = 1.78449274854325E+00f; W23 = 1.77231492083829E-01f; R33 = 5.52534374226326E+00f; W33 = 5.11156880411248E-03f; if (X < 3.e-7f){ if (n == 1){ RT1 = 0.5E+00f -X/5.0E+00f; WW1 = 1.0E+00f -X/3.0E+00f; } else if (n == 2) { RT1 = 1.30693606237085E-01f -2.90430236082028E-02f *X; RT2 = 2.86930639376291E+00f -6.37623643058102E-01f *X; WW1 = 6.52145154862545E-01f -1.22713621927067E-01f *X; WW2 = 3.47854845137453E-01f -2.10619711404725E-01f *X; } else if (n == 3) { RT1 = 6.03769246832797E-02f -9.28875764357368E-03f *X; RT2 = 7.76823355931043E-01f -1.19511285527878E-01f *X; RT3 = 6.66279971938567E+00f -1.02504611068957E+00f *X; WW1 = 4.67913934572691E-01f -5.64876917232519E-02f *X; WW2 = 3.60761573048137E-01f -1.49077186455208E-01f *X; WW3 = 1.71324492379169E-01f -1.27768455150979E-01f *X; } } else if (X < 1.f) { if (n == 1){ F1 = ((((((((-8.36313918003957E-08f*X+1.21222603512827E-06f )*X- 1.15662609053481E-05f )*X+9.25197374512647E-05f )*X- 6.40994113129432E-04f )*X+3.78787044215009E-03f )*X- 1.85185172458485E-02f )*X+7.14285713298222E-02f )*X- 1.99999999997023E-01f )*X+3.33333333333318E-01f; WW1 = (X+X)*F1+expf(-X); RT1 = F1/(WW1-F1); } else if (n == 2) { F1 = ((((((((-8.36313918003957E-08f*X+1.21222603512827E-06f )*X- 1.15662609053481E-05f )*X+9.25197374512647E-05f )*X- 6.40994113129432E-04f )*X+3.78787044215009E-03f )*X- 1.85185172458485E-02f )*X+7.14285713298222E-02f )*X- 1.99999999997023E-01f )*X+3.33333333333318E-01f; WW1 = (X+X)*F1+expf(-X); RT1 = (((((((-2.35234358048491E-09f*X+2.49173650389842E-08f)*X- 4.558315364581E-08f)*X-2.447252174587E-06f)*X+ 4.743292959463E-05f)*X-5.33184749432408E-04f )*X+ 4.44654947116579E-03f )*X-2.90430236084697E-02f )*X+ 1.30693606237085E-01f; RT2 = (((((((-2.47404902329170E-08f*X+2.36809910635906E-07f)*X+ 1.835367736310E-06f)*X-2.066168802076E-05f)*X- 1.345693393936E-04f)*X-5.88154362858038E-05f )*X+ 5.32735082098139E-02f )*X-6.37623643056745E-01f )*X+ 2.86930639376289E+00f; WW2 = ((F1-WW1)*RT1+F1)*(1.0E+00f+RT2)/(RT2-RT1); WW1 = WW1-WW2; } else if (n==3){ RT1 = ((((((-5.10186691538870E-10f*X+2.40134415703450E-08f)*X- 5.01081057744427E-07f )*X+7.58291285499256E-06f )*X- 9.55085533670919E-05f )*X+1.02893039315878E-03f )*X- 9.28875764374337E-03f )*X+6.03769246832810E-02f; RT2 = ((((((-1.29646524960555E-08f*X+7.74602292865683E-08f)*X+ 1.56022811158727E-06f )*X-1.58051990661661E-05f )*X- 3.30447806384059E-04f )*X+9.74266885190267E-03f )*X- 1.19511285526388E-01f )*X+7.76823355931033E-01f; RT3 = ((((((-9.28536484109606E-09f*X-3.02786290067014E-07f)*X- 2.50734477064200E-06f )*X-7.32728109752881E-06f )*X+ 2.44217481700129E-04f )*X+4.94758452357327E-02f )*X- 1.02504611065774E+00f )*X+6.66279971938553E+00f; F2 = ((((((((-7.60911486098850E-08f*X+1.09552870123182E-06f )*X- 1.03463270693454E-05f )*X+8.16324851790106E-05f )*X- 5.55526624875562E-04f )*X+3.20512054753924E-03f )*X- 1.51515139838540E-02f )*X+5.55555554649585E-02f )*X- 1.42857142854412E-01f )*X+1.99999999999986E-01f; E = expf(-X); F1 = ((X+X)*F2+E)/3.0E+00f; WW1 = (X+X)*F1+E; T1 = RT1/(RT1+1.0E+00f); T2 = RT2/(RT2+1.0E+00f); T3 = RT3/(RT3+1.0E+00f); A2 = F2-T1*F1; A1 = F1-T1*WW1; WW3 = (A2-T2*A1)/((T3-T2)*(T3-T1)); WW2 = (T3*A1-A2)/((T3-T2)*(T2-T1)); WW1 = WW1-WW2-WW3; } } else if (X < 3.f) { Y = X-2.0E+00f; if (n == 1) { F1 = ((((((((((-1.61702782425558E-10f*Y+1.96215250865776E-09f )*Y- 2.14234468198419E-08f )*Y+2.17216556336318E-07f )*Y- 1.98850171329371E-06f )*Y+1.62429321438911E-05f )*Y- 1.16740298039895E-04f )*Y+7.24888732052332E-04f )*Y- 3.79490003707156E-03f )*Y+1.61723488664661E-02f )*Y- 5.29428148329736E-02f )*Y+1.15702180856167E-01f; WW1 = (X+X)*F1+expf(-X); RT1 = F1/(WW1-F1); } else if (n == 2) { F1 = ((((((((((-1.61702782425558E-10f*Y+1.96215250865776E-09f )*Y- 2.14234468198419E-08f )*Y+2.17216556336318E-07f )*Y- 1.98850171329371E-06f )*Y+1.62429321438911E-05f )*Y- 1.16740298039895E-04f )*Y+7.24888732052332E-04f )*Y- 3.79490003707156E-03f )*Y+1.61723488664661E-02f )*Y- 5.29428148329736E-02f )*Y+1.15702180856167E-01f; WW1 = (X+X)*F1+expf(-X); RT1 = (((((((((-6.36859636616415E-12f*Y+8.47417064776270E-11f)*Y- 5.152207846962E-10f)*Y-3.846389873308E-10f)*Y+ 8.472253388380E-08f)*Y-1.85306035634293E-06f )*Y+ 2.47191693238413E-05f )*Y-2.49018321709815E-04f )*Y+ 2.19173220020161E-03f )*Y-1.63329339286794E-02f )*Y+ 8.68085688285261E-02f; RT2 = ((((((((( 1.45331350488343E-10f*Y+2.07111465297976E-09f)*Y- 1.878920917404E-08f)*Y-1.725838516261E-07f)*Y+ 2.247389642339E-06f)*Y+9.76783813082564E-06f )*Y- 1.93160765581969E-04f )*Y-1.58064140671893E-03f )*Y+ 4.85928174507904E-02f )*Y-4.30761584997596E-01f )*Y+ 1.80400974537950E+00f; WW2 = ((F1-WW1)*RT1+F1)*(1.0E+00f+RT2)/(RT2-RT1); WW1 = WW1-WW2; } else if (n == 3) { RT1 = (((((((( 1.44687969563318E-12f*Y+4.85300143926755E-12f)*Y- 6.55098264095516E-10f )*Y+1.56592951656828E-08f )*Y- 2.60122498274734E-07f )*Y+3.86118485517386E-06f )*Y- 5.13430986707889E-05f )*Y+6.03194524398109E-04f )*Y- 6.11219349825090E-03f )*Y+4.52578254679079E-02f; RT2 = ((((((( 6.95964248788138E-10f*Y-5.35281831445517E-09f)*Y- 6.745205954533E-08f)*Y+1.502366784525E-06f)*Y+ 9.923326947376E-07f)*Y-3.89147469249594E-04f )*Y+ 7.51549330892401E-03f )*Y-8.48778120363400E-02f )*Y+ 5.73928229597613E-01f; RT3 = ((((((((-2.81496588401439E-10f*Y+3.61058041895031E-09f)*Y+ 4.53631789436255E-08f )*Y-1.40971837780847E-07f )*Y- 6.05865557561067E-06f )*Y-5.15964042227127E-05f )*Y+ 3.34761560498171E-05f )*Y+5.04871005319119E-02f )*Y- 8.24708946991557E-01f )*Y+4.81234667357205E+00f; F2 = ((((((((((-1.48044231072140E-10f*Y+1.78157031325097E-09f )*Y- 1.92514145088973E-08f )*Y+1.92804632038796E-07f )*Y- 1.73806555021045E-06f )*Y+1.39195169625425E-05f )*Y- 9.74574633246452E-05f )*Y+5.83701488646511E-04f )*Y- 2.89955494844975E-03f )*Y+1.13847001113810E-02f )*Y- 3.23446977320647E-02f )*Y+5.29428148329709E-02f; E = expf(-X); F1 = ((X+X)*F2+E)/3.0E+00f; WW1 = (X+X)*F1+E; T1 = RT1/(RT1+1.0E+00f); T2 = RT2/(RT2+1.0E+00f); T3 = RT3/(RT3+1.0E+00f); A2 = F2-T1*F1; A1 = F1-T1*WW1; WW3 = (A2-T2*A1)/((T3-T2)*(T3-T1)); WW2 = (T3*A1-A2)/((T3-T2)*(T2-T1)); WW1 = WW1-WW2-WW3; } } else if (X < 5.f){ Y = X-4.0E+00f; if (n == 1){ F1 = ((((((((((-2.62453564772299E-11f*Y+3.24031041623823E-10f )*Y- 3.614965656163E-09f)*Y+3.760256799971E-08f)*Y- 3.553558319675E-07f)*Y+3.022556449731E-06f)*Y- 2.290098979647E-05f)*Y+1.526537461148E-04f)*Y- 8.81947375894379E-04f )*Y+4.33207949514611E-03f )*Y- 1.75257821619926E-02f )*Y+5.28406320615584E-02f; WW1 = (X+X)*F1+expf(-X); RT1 = F1/(WW1-F1); } else if (n == 2) { F1 = ((((((((((-2.62453564772299E-11f*Y+3.24031041623823E-10f )*Y- 3.614965656163E-09f)*Y+3.760256799971E-08f)*Y- 3.553558319675E-07f)*Y+3.022556449731E-06f)*Y- 2.290098979647E-05f)*Y+1.526537461148E-04f)*Y- 8.81947375894379E-04f )*Y+4.33207949514611E-03f )*Y- 1.75257821619926E-02f )*Y+5.28406320615584E-02f; WW1 = (X+X)*F1+expf(-X); RT1 = ((((((((-4.11560117487296E-12f*Y+7.10910223886747E-11f)*Y- 1.73508862390291E-09f )*Y+5.93066856324744E-08f )*Y- 9.76085576741771E-07f )*Y+1.08484384385679E-05f )*Y- 1.12608004981982E-04f )*Y+1.16210907653515E-03f )*Y- 9.89572595720351E-03f )*Y+6.12589701086408E-02f; RT2 = (((((((((-1.80555625241001E-10f*Y+5.44072475994123E-10f)*Y+ 1.603498045240E-08f)*Y-1.497986283037E-07f)*Y- 7.017002532106E-07f)*Y+1.85882653064034E-05f )*Y- 2.04685420150802E-05f )*Y-2.49327728643089E-03f )*Y+ 3.56550690684281E-02f )*Y-2.60417417692375E-01f )*Y+ 1.12155283108289E+00f; WW2 = ((F1-WW1)*RT1+F1)*(1.0E+00f+RT2)/(RT2-RT1); WW1 = WW1-WW2; } else if (n == 3) { RT1 = ((((((( 1.44265709189601E-11f*Y-4.66622033006074E-10f)*Y+ 7.649155832025E-09f)*Y-1.229940017368E-07f)*Y+ 2.026002142457E-06f)*Y-2.87048671521677E-05f )*Y+ 3.70326938096287E-04f )*Y-4.21006346373634E-03f )*Y+ 3.50898470729044E-02f; RT2 = ((((((((-2.65526039155651E-11f*Y+1.97549041402552E-10f)*Y+ 2.15971131403034E-09f )*Y-7.95045680685193E-08f )*Y+ 5.15021914287057E-07f )*Y+1.11788717230514E-05f )*Y- 3.33739312603632E-04f )*Y+5.30601428208358E-03f )*Y- 5.93483267268959E-02f )*Y+4.31180523260239E-01f; RT3 = ((((((((-3.92833750584041E-10f*Y-4.16423229782280E-09f)*Y+ 4.42413039572867E-08f )*Y+6.40574545989551E-07f )*Y- 3.05512456576552E-06f )*Y-1.05296443527943E-04f )*Y- 6.14120969315617E-04f )*Y+4.89665802767005E-02f )*Y- 6.24498381002855E-01f )*Y+3.36412312243724E+00f; F2 = ((((((((((-2.36788772599074E-11f*Y+2.89147476459092E-10f )*Y- 3.18111322308846E-09f )*Y+3.25336816562485E-08f )*Y- 3.00873821471489E-07f )*Y+2.48749160874431E-06f )*Y- 1.81353179793672E-05f )*Y+1.14504948737066E-04f )*Y- 6.10614987696677E-04f )*Y+2.64584212770942E-03f )*Y- 8.66415899015349E-03f )*Y+1.75257821619922E-02f; E = expf(-X); F1 = ((X+X)*F2+E)/3.0E+00f; WW1 = (X+X)*F1+E; T1 = RT1/(RT1+1.0E+00f); T2 = RT2/(RT2+1.0E+00f); T3 = RT3/(RT3+1.0E+00f); A2 = F2-T1*F1; A1 = F1-T1*WW1; WW3 = (A2-T2*A1)/((T3-T2)*(T3-T1)); WW2 = (T3*A1-A2)/((T3-T2)*(T2-T1)); WW1 = WW1-WW2-WW3; } } else if (X < 10.f) { E = expf(-X); WW1 = (((((( 4.6897511375022E-01f/X-6.9955602298985E-01f)/X + 5.3689283271887E-01f)/X-3.2883030418398E-01f)/X + 2.4645596956002E-01f)/X-4.9984072848436E-01f)/X - 3.1501078774085E-06f)*E + sqrtf(PIE4/X); F1 = (WW1-E)/(X+X); if (n == 1) RT1 = F1/(WW1-F1); else if (n == 2){ Y = X-7.5E+00f; RT1 = (((((((((((((-1.43632730148572E-16f*Y+2.38198922570405E-16f)* Y+1.358319618800E-14f)*Y-7.064522786879E-14f)*Y- 7.719300212748E-13f)*Y+7.802544789997E-12f)*Y+ 6.628721099436E-11f)*Y-1.775564159743E-09f)*Y+ 1.713828823990E-08f)*Y-1.497500187053E-07f)*Y+ 2.283485114279E-06f)*Y-3.76953869614706E-05f )*Y+ 4.74791204651451E-04f )*Y-4.60448960876139E-03f )*Y+ 3.72458587837249E-02f; RT2 = (((((((((((( 2.48791622798900E-14f*Y-1.36113510175724E-13f)*Y- 2.224334349799E-12f)*Y+4.190559455515E-11f)*Y- 2.222722579924E-10f)*Y-2.624183464275E-09f)*Y+ 6.128153450169E-08f)*Y-4.383376014528E-07f)*Y- 2.49952200232910E-06f )*Y+1.03236647888320E-04f )*Y- 1.44614664924989E-03f )*Y+1.35094294917224E-02f )*Y- 9.53478510453887E-02f )*Y+5.44765245686790E-01f; WW2 = ((F1-WW1)*RT1+F1)*(1.0E+00f+RT2)/(RT2-RT1); WW1 = WW1-WW2; } else if (n == 3) { F2 = (F1+F1+F1-E)/(X+X); Y = X-7.5E+00f; RT1 = ((((((((((( 5.74429401360115E-16f*Y+7.11884203790984E-16f)*Y- 6.736701449826E-14f)*Y-6.264613873998E-13f)*Y+ 1.315418927040E-11f)*Y-4.23879635610964E-11f )*Y+ 1.39032379769474E-09f )*Y-4.65449552856856E-08f )*Y+ 7.34609900170759E-07f )*Y-1.08656008854077E-05f )*Y+ 1.77930381549953E-04f )*Y-2.39864911618015E-03f )*Y+ 2.39112249488821E-02f; RT2 = ((((((((((( 1.13464096209120E-14f*Y+6.99375313934242E-15f)*Y- 8.595618132088E-13f)*Y-5.293620408757E-12f)*Y- 2.492175211635E-11f)*Y+2.73681574882729E-09f )*Y- 1.06656985608482E-08f )*Y-4.40252529648056E-07f )*Y+ 9.68100917793911E-06f )*Y-1.68211091755327E-04f )*Y+ 2.69443611274173E-03f )*Y-3.23845035189063E-02f )*Y+ 2.75969447451882E-01f; RT3 = (((((((((((( 6.66339416996191E-15f*Y+1.84955640200794E-13f)*Y- 1.985141104444E-12f)*Y-2.309293727603E-11f)*Y+ 3.917984522103E-10f)*Y+1.663165279876E-09f)*Y- 6.205591993923E-08f)*Y+8.769581622041E-09f)*Y+ 8.97224398620038E-06f )*Y-3.14232666170796E-05f )*Y- 1.83917335649633E-03f )*Y+3.51246831672571E-02f )*Y- 3.22335051270860E-01f )*Y+1.73582831755430E+00f; T1 = RT1/(RT1+1.0E+00f); T2 = RT2/(RT2+1.0E+00f); T3 = RT3/(RT3+1.0E+00f); A2 = F2-T1*F1; A1 = F1-T1*WW1; WW3 = (A2-T2*A1)/((T3-T2)*(T3-T1)); WW2 = (T3*A1-A2)/((T3-T2)*(T2-T1)); WW1 = WW1-WW2-WW3; } } else if (X < 15.f) { E = expf(-X); WW1 = (((-1.8784686463512E-01f/X+2.2991849164985E-01f)/X - 4.9893752514047E-01f)/X-2.1916512131607E-05f)*E + sqrtf(PIE4/X); F1 = (WW1-E)/(X+X); if (n == 1) RT1 = F1/(WW1-F1); else if (n == 2) { RT1 = ((((-1.01041157064226E-05f*X+1.19483054115173E-03f)*X - 6.73760231824074E-02f)*X+1.25705571069895E+00f)*X + (((-8.57609422987199E+03f/X+5.91005939591842E+03f)/X - 1.70807677109425E+03f)/X+2.64536689959503E+02f)/X - 2.38570496490846E+01f)*E + R12/(X-R12); RT2 = ((( 3.39024225137123E-04f*X-9.34976436343509E-02f)*X - 4.22216483306320E+00f)*X + (((-2.08457050986847E+03f/X - 1.04999071905664E+03f)/X+3.39891508992661E+02f)/X - 1.56184800325063E+02f)/X+8.00839033297501E+00f)*E + R22/(X-R22); WW2 = ((F1-WW1)*RT1+F1)*(1.0E+00f+RT2)/(RT2-RT1); WW1 = WW1-WW2; } else if (n == 3) { F2 = (F1+F1+F1-E)/(X+X); Y = X-12.5E+00f; RT1 = ((((((((((( 4.42133001283090E-16f*Y-2.77189767070441E-15f)*Y- 4.084026087887E-14f)*Y+5.379885121517E-13f)*Y+ 1.882093066702E-12f)*Y-8.67286219861085E-11f )*Y+ 7.11372337079797E-10f )*Y-3.55578027040563E-09f )*Y+ 1.29454702851936E-07f )*Y-4.14222202791434E-06f )*Y+ 8.04427643593792E-05f )*Y-1.18587782909876E-03f )*Y+ 1.53435577063174E-02f; RT2 = ((((((((((( 6.85146742119357E-15f*Y-1.08257654410279E-14f)*Y- 8.579165965128E-13f)*Y+6.642452485783E-12f)*Y+ 4.798806828724E-11f)*Y-1.13413908163831E-09f )*Y+ 7.08558457182751E-09f )*Y-5.59678576054633E-08f )*Y+ 2.51020389884249E-06f )*Y-6.63678914608681E-05f )*Y+ 1.11888323089714E-03f )*Y-1.45361636398178E-02f )*Y+ 1.65077877454402E-01f; RT3 = (((((((((((( 3.20622388697743E-15f*Y-2.73458804864628E-14f)*Y- 3.157134329361E-13f)*Y+8.654129268056E-12f)*Y- 5.625235879301E-11f)*Y-7.718080513708E-10f)*Y+ 2.064664199164E-08f)*Y-1.567725007761E-07f)*Y- 1.57938204115055E-06f )*Y+6.27436306915967E-05f )*Y- 1.01308723606946E-03f )*Y+1.13901881430697E-02f )*Y- 1.01449652899450E-01f )*Y+7.77203937334739E-01f; T1 = RT1/(RT1+1.0E+00f); T2 = RT2/(RT2+1.0E+00f); T3 = RT3/(RT3+1.0E+00f); A2 = F2-T1*F1; A1 = F1-T1*WW1; WW3 = (A2-T2*A1)/((T3-T2)*(T3-T1)); WW2 = (T3*A1-A2)/((T3-T2)*(T2-T1)); WW1 = WW1-WW2-WW3; } } else if (X < 33.f) { E = expf(-X); WW1 = (( 1.9623264149430E-01f/X-4.9695241464490E-01f)/X - 6.0156581186481E-05f)*E + sqrtf(PIE4/X); F1 = (WW1-E)/(X+X); if (n == 1) RT1 = F1/(WW1-F1); else if (n == 2){ RT1 = ((((-1.14906395546354E-06f*X+1.76003409708332E-04f)*X - 1.71984023644904E-02f)*X-1.37292644149838E-01f)*X + (-4.75742064274859E+01f/X+9.21005186542857E+00f)/X - 2.31080873898939E-02f)*E + R12/(X-R12); RT2 = ((( 3.64921633404158E-04f*X-9.71850973831558E-02f)*X - 4.02886174850252E+00f)*X + (-1.35831002139173E+02f/X - 8.66891724287962E+01f)/X+2.98011277766958E+00f)*E + R22/(X-R22); WW2 = ((F1-WW1)*RT1+F1)*(1.0E+00f+RT2)/(RT2-RT1); WW1 = WW1-WW2; } else if (n == 3) { F2 = (F1+F1+F1-E)/(X+X); if (X < 20.f) { RT1 = ((((((-2.43270989903742E-06f*X+3.57901398988359E-04f)*X - 2.34112415981143E-02f)*X+7.81425144913975E-01f)*X - 1.73209218219175E+01f)*X+2.43517435690398E+02f)*X + (-1.97611541576986E+04f/X+9.82441363463929E+03f)/X - 2.07970687843258E+03f)*E + R13/(X-R13); RT2 = (((((-2.62627010965435E-04f*X+3.49187925428138E-02f)*X - 3.09337618731880E+00f)*X+1.07037141010778E+02f)*X - 2.36659637247087E+03f)*X + ((-2.91669113681020E+06f/X + 1.41129505262758E+06f)/X-2.91532335433779E+05f)/X + 3.35202872835409E+04f)*E + R23/(X-R23); RT3 = ((((( 9.31856404738601E-05f*X-2.87029400759565E-02f)*X - 7.83503697918455E-01f)*X-1.84338896480695E+01f)*X + 4.04996712650414E+02f)*X + (-1.89829509315154E+05f/X + 5.11498390849158E+04f)/X-6.88145821789955E+03f)*E + R33/(X-R33); } else { RT1 = ((((-4.97561537069643E-04f*X-5.00929599665316E-02f)*X + 1.31099142238996E+00f)*X-1.88336409225481E+01f)*X - 6.60344754467191E+02f /X+1.64931462413877E+02f)*E + R13/(X-R13); RT2 = ((((-4.48218898474906E-03f*X-5.17373211334924E-01f)*X + 1.13691058739678E+01f)*X-1.65426392885291E+02f)*X - 6.30909125686731E+03f /X+1.52231757709236E+03f)*E + R23/(X-R23); RT3 = ((((-1.38368602394293E-02f*X-1.77293428863008E+00f)*X + 1.73639054044562E+01f)*X-3.57615122086961E+02f)*X - 1.45734701095912E+04f /X+2.69831813951849E+03f)*E + R33/(X-R33); } T1 = RT1/(RT1+1.0E+00f); T2 = RT2/(RT2+1.0E+00f); T3 = RT3/(RT3+1.0E+00f); A2 = F2-T1*F1; A1 = F1-T1*WW1; WW3 = (A2-T2*A1)/((T3-T2)*(T3-T1)); WW2 = (T3*A1-A2)/((T3-T2)*(T2-T1)); WW1 = WW1-WW2-WW3; } } else { WW1 = sqrtf(PIE4/X); if (n == 1) RT1 = 0.5E+00f/(X-0.5E+00f); else if (n == 2) { if (X < 40.f) { E = expf(-X); RT1 = (-8.78947307498880E-01f*X+1.09243702330261E+01f)*E + R12/(X-R12); RT2 = (-9.28903924275977E+00f*X+8.10642367843811E+01f)*E + R22/(X-R22); WW2 = ( 4.46857389308400E+00f*X-7.79250653461045E+01f)*E + W22*WW1; WW1 = WW1-WW2; } else { RT1 = R12/(X-R12); RT2 = R22/(X-R22); WW2 = W22*WW1; WW1 = WW1-WW2; } } else if (n == 3) { if (X < 47.f) { E = expf(-X); RT1 = ((-7.39058467995275E+00f*X+3.21318352526305E+02f)*X - 3.99433696473658E+03f)*E + R13/(X-R13); RT2 = ((-7.38726243906513E+01f*X+3.13569966333873E+03f)*X - 3.86862867311321E+04f)*E + R23/(X-R23); RT3 = ((-2.63750565461336E+02f*X+1.04412168692352E+04f)*X - 1.28094577915394E+05f)*E + R33/(X-R33); WW3 = ((( 1.52258947224714E-01f*X-8.30661900042651E+00f)*X + 1.92977367967984E+02f)*X-1.67787926005344E+03f)*E + W33*WW1; WW2 = (( 6.15072615497811E+01f*X-2.91980647450269E+03f)*X + 3.80794303087338E+04f)*E + W23*WW1; WW1 = WW1-WW2-WW3; } else { RT1 = R13/(X-R13); RT2 = R23/(X-R23); RT3 = R33/(X-R33); WW2 = W23*WW1; WW3 = W33*WW1; WW1 = WW1-WW2-WW3; } } } roots[0] = RT1; weights[0] = WW1; if (n > 1){ roots[1] = RT2; weights[1] = WW2; } if (n > 2) { roots[2] = RT3; weights[2] = WW3; } return; } __device__ void cuda_Root4(float X, float roots[], float weights[]){ float R14,PIE4,R24,W24,R34,W34,R44,W44; float RT1=0,RT2=0,RT3=0,RT4=0,WW1=0,WW2=0,WW3=0,WW4=0; float Y,E; R14 = 1.45303521503316E-01f; PIE4 = 7.85398163397448E-01f; R24 = 1.33909728812636E+00f; W24 = 2.34479815323517E-01f; R34 = 3.92696350135829E+00f; W34 = 1.92704402415764E-02f; R44 = 8.58863568901199E+00f; W44 = 2.25229076750736E-04f; if (X <= 3.0E-7f) { RT1 = 3.48198973061471E-02f -4.09645850660395E-03f *X; RT2 = 3.81567185080042E-01f -4.48902570656719E-02f *X; RT3 = 1.73730726945891E+00f -2.04389090547327E-01f *X; RT4 = 1.18463056481549E+01f -1.39368301742312E+00f *X; WW1 = 3.62683783378362E-01f -3.13844305713928E-02f *X; WW2 = 3.13706645877886E-01f -8.98046242557724E-02f *X; WW3 = 2.22381034453372E-01f -1.29314370958973E-01f *X; WW4 = 1.01228536290376E-01f -8.28299075414321E-02f *X; } else if (X <= 1.f) { RT1 = ((((((-1.95309614628539E-10f*X+5.19765728707592E-09f)*X- 1.01756452250573E-07f )*X+1.72365935872131E-06f )*X- 2.61203523522184E-05f )*X+3.52921308769880E-04f )*X- 4.09645850658433E-03f )*X+3.48198973061469E-02f; RT2 = (((((-1.89554881382342E-08f*X+3.07583114342365E-07f)*X+ 1.270981734393E-06f)*X-1.417298563884E-04f)*X+ 3.226979163176E-03f)*X-4.48902570678178E-02f )*X+ 3.81567185080039E-01f; RT3 = (((((( 1.77280535300416E-09f*X+3.36524958870615E-08f)*X- 2.58341529013893E-07f )*X-1.13644895662320E-05f )*X- 7.91549618884063E-05f )*X+1.03825827346828E-02f )*X- 2.04389090525137E-01f )*X+1.73730726945889E+00f; RT4 = (((((-5.61188882415248E-08f*X-2.49480733072460E-07f)*X+ 3.428685057114E-06f)*X+1.679007454539E-04f)*X+ 4.722855585715E-02f)*X-1.39368301737828E+00f )*X+ 1.18463056481543E+01f; WW1 = ((((((-1.14649303201279E-08f*X+1.88015570196787E-07f)*X- 2.33305875372323E-06f )*X+2.68880044371597E-05f )*X- 2.94268428977387E-04f )*X+3.06548909776613E-03f )*X- 3.13844305680096E-02f )*X+3.62683783378335E-01f; WW2 = ((((((((-4.11720483772634E-09f*X+6.54963481852134E-08f)*X- 7.20045285129626E-07f )*X+6.93779646721723E-06f )*X- 6.05367572016373E-05f )*X+4.74241566251899E-04f )*X- 3.26956188125316E-03f )*X+1.91883866626681E-02f )*X- 8.98046242565811E-02f )*X+3.13706645877886E-01f; WW3 = ((((((((-3.41688436990215E-08f*X+5.07238960340773E-07f)*X- 5.01675628408220E-06f )*X+4.20363420922845E-05f )*X- 3.08040221166823E-04f )*X+1.94431864731239E-03f )*X- 1.02477820460278E-02f )*X+4.28670143840073E-02f )*X- 1.29314370962569E-01f )*X+2.22381034453369E-01f; WW4 = ((((((((( 4.99660550769508E-09f*X-7.94585963310120E-08f)*X+ 8.359072409485E-07f)*X-7.422369210610E-06f)*X+ 5.763374308160E-05f)*X-3.86645606718233E-04f )*X+ 2.18417516259781E-03f )*X-9.99791027771119E-03f )*X+ 3.48791097377370E-02f )*X-8.28299075413889E-02f )*X+ 1.01228536290376E-01f; } else if (X <= 5.f) { Y = X-3.0E+00f; RT1 = (((((((((-1.48570633747284E-15f*Y-1.33273068108777E-13f)*Y+ 4.068543696670E-12f)*Y-9.163164161821E-11f)*Y+ 2.046819017845E-09f)*Y-4.03076426299031E-08f )*Y+ 7.29407420660149E-07f )*Y-1.23118059980833E-05f )*Y+ 1.88796581246938E-04f )*Y-2.53262912046853E-03f )*Y+ 2.51198234505021E-02f; RT2 = ((((((((( 1.35830583483312E-13f*Y-2.29772605964836E-12f)*Y- 3.821500128045E-12f)*Y+6.844424214735E-10f)*Y- 1.048063352259E-08f)*Y+1.50083186233363E-08f )*Y+ 3.48848942324454E-06f )*Y-1.08694174399193E-04f )*Y+ 2.08048885251999E-03f )*Y-2.91205805373793E-02f )*Y+ 2.72276489515713E-01f; RT3 = ((((((((( 5.02799392850289E-13f*Y+1.07461812944084E-11f)*Y- 1.482277886411E-10f)*Y-2.153585661215E-09f)*Y+ 3.654087802817E-08f)*Y+5.15929575830120E-07f )*Y- 9.52388379435709E-06f )*Y-2.16552440036426E-04f )*Y+ 9.03551469568320E-03f )*Y-1.45505469175613E-01f )*Y+ 1.21449092319186E+00f; RT4 = (((((((((-1.08510370291979E-12f*Y+6.41492397277798E-11f)*Y+ 7.542387436125E-10f)*Y-2.213111836647E-09f)*Y- 1.448228963549E-07f)*Y-1.95670833237101E-06f )*Y- 1.07481314670844E-05f )*Y+1.49335941252765E-04f )*Y+ 4.87791531990593E-02f )*Y-1.10559909038653E+00f )*Y+ 8.09502028611780E+00f; WW1 = ((((((((((-4.65801912689961E-14f*Y+7.58669507106800E-13f)*Y- 1.186387548048E-11f)*Y+1.862334710665E-10f)*Y- 2.799399389539E-09f)*Y+4.148972684255E-08f)*Y- 5.933568079600E-07f)*Y+8.168349266115E-06f)*Y- 1.08989176177409E-04f )*Y+1.41357961729531E-03f )*Y- 1.87588361833659E-02f )*Y+2.89898651436026E-01f; WW2 = ((((((((((((-1.46345073267549E-14f*Y+2.25644205432182E-13f)*Y- 3.116258693847E-12f)*Y+4.321908756610E-11f)*Y- 5.673270062669E-10f)*Y+7.006295962960E-09f)*Y- 8.120186517000E-08f)*Y+8.775294645770E-07f)*Y- 8.77829235749024E-06f )*Y+8.04372147732379E-05f )*Y- 6.64149238804153E-04f )*Y+4.81181506827225E-03f )*Y- 2.88982669486183E-02f )*Y+1.56247249979288E-01f; WW3 = ((((((((((((( 9.06812118895365E-15f*Y-1.40541322766087E-13f)* Y+1.919270015269E-12f)*Y-2.605135739010E-11f)*Y+ 3.299685839012E-10f)*Y-3.86354139348735E-09f )*Y+ 4.16265847927498E-08f )*Y-4.09462835471470E-07f )*Y+ 3.64018881086111E-06f )*Y-2.88665153269386E-05f )*Y+ 2.00515819789028E-04f )*Y-1.18791896897934E-03f )*Y+ 5.75223633388589E-03f )*Y-2.09400418772687E-02f )*Y+ 4.85368861938873E-02f; WW4 = ((((((((((((((-9.74835552342257E-16f*Y+1.57857099317175E-14f)* Y-2.249993780112E-13f)*Y+3.173422008953E-12f)*Y- 4.161159459680E-11f)*Y+5.021343560166E-10f)*Y- 5.545047534808E-09f)*Y+5.554146993491E-08f)*Y- 4.99048696190133E-07f )*Y+3.96650392371311E-06f )*Y- 2.73816413291214E-05f )*Y+1.60106988333186E-04f )*Y- 7.64560567879592E-04f )*Y+2.81330044426892E-03f )*Y- 7.16227030134947E-03f )*Y+9.66077262223353E-03f; } else if (X <= 10.f) { Y = X-7.5E+00f; RT1 = ((((((((( 4.64217329776215E-15f*Y-6.27892383644164E-15f)*Y+ 3.462236347446E-13f)*Y-2.927229355350E-11f)*Y+ 5.090355371676E-10f)*Y-9.97272656345253E-09f )*Y+ 2.37835295639281E-07f )*Y-4.60301761310921E-06f )*Y+ 8.42824204233222E-05f )*Y-1.37983082233081E-03f )*Y+ 1.66630865869375E-02f; RT2 = ((((((((( 2.93981127919047E-14f*Y+8.47635639065744E-13f)*Y- 1.446314544774E-11f)*Y-6.149155555753E-12f)*Y+ 8.484275604612E-10f)*Y-6.10898827887652E-08f )*Y+ 2.39156093611106E-06f )*Y-5.35837089462592E-05f )*Y+ 1.00967602595557E-03f )*Y-1.57769317127372E-02f )*Y+ 1.74853819464285E-01f; RT3 = (((((((((( 2.93523563363000E-14f*Y-6.40041776667020E-14f)*Y- 2.695740446312E-12f)*Y+1.027082960169E-10f)*Y- 5.822038656780E-10f)*Y-3.159991002539E-08f)*Y+ 4.327249251331E-07f)*Y+4.856768455119E-06f)*Y- 2.54617989427762E-04f )*Y+5.54843378106589E-03f )*Y- 7.95013029486684E-02f )*Y+7.20206142703162E-01f; RT4 = (((((((((((-1.62212382394553E-14f*Y+7.68943641360593E-13f)*Y+ 5.764015756615E-12f)*Y-1.380635298784E-10f)*Y- 1.476849808675E-09f)*Y+1.84347052385605E-08f )*Y+ 3.34382940759405E-07f )*Y-1.39428366421645E-06f )*Y- 7.50249313713996E-05f )*Y-6.26495899187507E-04f )*Y+ 4.69716410901162E-02f )*Y-6.66871297428209E-01f )*Y+ 4.11207530217806E+00f; WW1 = ((((((((((-1.65995045235997E-15f*Y+6.91838935879598E-14f)*Y- 9.131223418888E-13f)*Y+1.403341829454E-11f)*Y- 3.672235069444E-10f)*Y+6.366962546990E-09f)*Y- 1.039220021671E-07f)*Y+1.959098751715E-06f)*Y- 3.33474893152939E-05f )*Y+5.72164211151013E-04f )*Y- 1.05583210553392E-02f )*Y+2.26696066029591E-01f; WW2 = ((((((((((((-3.57248951192047E-16f*Y+6.25708409149331E-15f)*Y- 9.657033089714E-14f)*Y+1.507864898748E-12f)*Y- 2.332522256110E-11f)*Y+3.428545616603E-10f)*Y- 4.698730937661E-09f)*Y+6.219977635130E-08f)*Y- 7.83008889613661E-07f )*Y+9.08621687041567E-06f )*Y- 9.86368311253873E-05f )*Y+9.69632496710088E-04f )*Y- 8.14594214284187E-03f )*Y+8.50218447733457E-02f; WW3 = ((((((((((((( 1.64742458534277E-16f*Y-2.68512265928410E-15f)* Y+3.788890667676E-14f)*Y-5.508918529823E-13f)*Y+ 7.555896810069E-12f)*Y-9.69039768312637E-11f )*Y+ 1.16034263529672E-09f )*Y-1.28771698573873E-08f )*Y+ 1.31949431805798E-07f )*Y-1.23673915616005E-06f )*Y+ 1.04189803544936E-05f )*Y-7.79566003744742E-05f )*Y+ 5.03162624754434E-04f )*Y-2.55138844587555E-03f )*Y+ 1.13250730954014E-02f; WW4 = ((((((((((((((-1.55714130075679E-17f*Y+2.57193722698891E-16f)* Y-3.626606654097E-15f)*Y+5.234734676175E-14f)*Y- 7.067105402134E-13f)*Y+8.793512664890E-12f)*Y- 1.006088923498E-10f)*Y+1.050565098393E-09f)*Y- 9.91517881772662E-09f )*Y+8.35835975882941E-08f )*Y- 6.19785782240693E-07f )*Y+3.95841149373135E-06f )*Y- 2.11366761402403E-05f )*Y+9.00474771229507E-05f )*Y- 2.78777909813289E-04f )*Y+5.26543779837487E-04f; } else if (X <= 15.f) { Y = X-12.5E+00f; RT1 = ((((((((((( 4.94869622744119E-17f*Y+8.03568805739160E-16f)*Y- 5.599125915431E-15f)*Y-1.378685560217E-13f)*Y+ 7.006511663249E-13f)*Y+1.30391406991118E-11f )*Y+ 8.06987313467541E-11f )*Y-5.20644072732933E-09f )*Y+ 7.72794187755457E-08f )*Y-1.61512612564194E-06f )*Y+ 4.15083811185831E-05f )*Y-7.87855975560199E-04f )*Y+ 1.14189319050009E-02f; RT2 = ((((((((((( 4.89224285522336E-16f*Y+1.06390248099712E-14f)*Y- 5.446260182933E-14f)*Y-1.613630106295E-12f)*Y+ 3.910179118937E-12f)*Y+1.90712434258806E-10f )*Y+ 8.78470199094761E-10f )*Y-5.97332993206797E-08f )*Y+ 9.25750831481589E-07f )*Y-2.02362185197088E-05f )*Y+ 4.92341968336776E-04f )*Y-8.68438439874703E-03f )*Y+ 1.15825965127958E-01f; RT3 = (((((((((( 6.12419396208408E-14f*Y+1.12328861406073E-13f)*Y- 9.051094103059E-12f)*Y-4.781797525341E-11f)*Y+ 1.660828868694E-09f)*Y+4.499058798868E-10f)*Y- 2.519549641933E-07f)*Y+4.977444040180E-06f)*Y- 1.25858350034589E-04f )*Y+2.70279176970044E-03f )*Y- 3.99327850801083E-02f )*Y+4.33467200855434E-01f; RT4 = ((((((((((( 4.63414725924048E-14f*Y-4.72757262693062E-14f)*Y- 1.001926833832E-11f)*Y+6.074107718414E-11f)*Y+ 1.576976911942E-09f)*Y-2.01186401974027E-08f )*Y- 1.84530195217118E-07f )*Y+5.02333087806827E-06f )*Y+ 9.66961790843006E-06f )*Y-1.58522208889528E-03f )*Y+ 2.80539673938339E-02f )*Y-2.78953904330072E-01f )*Y+ 1.82835655238235E+00f; WW4 = ((((((((((((( 2.90401781000996E-18f*Y-4.63389683098251E-17f)* Y+6.274018198326E-16f)*Y-8.936002188168E-15f)*Y+ 1.194719074934E-13f)*Y-1.45501321259466E-12f )*Y+ 1.64090830181013E-11f )*Y-1.71987745310181E-10f )*Y+ 1.63738403295718E-09f )*Y-1.39237504892842E-08f )*Y+ 1.06527318142151E-07f )*Y-7.27634957230524E-07f )*Y+ 4.12159381310339E-06f )*Y-1.74648169719173E-05f )*Y+ 8.50290130067818E-05f; WW3 = ((((((((((((-4.19569145459480E-17f*Y+5.94344180261644E-16f)*Y- 1.148797566469E-14f)*Y+1.881303962576E-13f)*Y- 2.413554618391E-12f)*Y+3.372127423047E-11f)*Y- 4.933988617784E-10f)*Y+6.116545396281E-09f)*Y- 6.69965691739299E-08f )*Y+7.52380085447161E-07f )*Y- 8.08708393262321E-06f )*Y+6.88603417296672E-05f )*Y- 4.67067112993427E-04f )*Y+5.42313365864597E-03f; WW2 = ((((((((((-6.22272689880615E-15f*Y+1.04126809657554E-13f)*Y- 6.842418230913E-13f)*Y+1.576841731919E-11f)*Y- 4.203948834175E-10f)*Y+6.287255934781E-09f)*Y- 8.307159819228E-08f)*Y+1.356478091922E-06f)*Y- 2.08065576105639E-05f )*Y+2.52396730332340E-04f )*Y- 2.94484050194539E-03f )*Y+6.01396183129168E-02f; WW1 = (((-1.8784686463512E-01f/X+2.2991849164985E-01f)/X - 4.9893752514047E-01f)/X-2.1916512131607E-05f)*expf(-X) + sqrtf(PIE4/X)-WW4-WW3-WW2; } else if (X <= 20.f) { WW1 = sqrtf(PIE4/X); Y = X-17.5E+00f; RT1 = ((((((((((( 4.36701759531398E-17f*Y-1.12860600219889E-16f)*Y- 6.149849164164E-15f)*Y+5.820231579541E-14f)*Y+ 4.396602872143E-13f)*Y-1.24330365320172E-11f )*Y+ 6.71083474044549E-11f )*Y+2.43865205376067E-10f )*Y+ 1.67559587099969E-08f )*Y-9.32738632357572E-07f )*Y+ 2.39030487004977E-05f )*Y-4.68648206591515E-04f )*Y+ 8.34977776583956E-03f; RT2 = ((((((((((( 4.98913142288158E-16f*Y-2.60732537093612E-16f)*Y- 7.775156445127E-14f)*Y+5.766105220086E-13f)*Y+ 6.432696729600E-12f)*Y-1.39571683725792E-10f )*Y+ 5.95451479522191E-10f )*Y+2.42471442836205E-09f )*Y+ 2.47485710143120E-07f )*Y-1.14710398652091E-05f )*Y+ 2.71252453754519E-04f )*Y-4.96812745851408E-03f )*Y+ 8.26020602026780E-02f; RT3 = ((((((((((( 1.91498302509009E-15f*Y+1.48840394311115E-14f)*Y- 4.316925145767E-13f)*Y+1.186495793471E-12f)*Y+ 4.615806713055E-11f)*Y-5.54336148667141E-10f )*Y+ 3.48789978951367E-10f )*Y-2.79188977451042E-09f )*Y+ 2.09563208958551E-06f )*Y-6.76512715080324E-05f )*Y+ 1.32129867629062E-03f )*Y-2.05062147771513E-02f )*Y+ 2.88068671894324E-01f; RT4 = (((((((((((-5.43697691672942E-15f*Y-1.12483395714468E-13f)*Y+ 2.826607936174E-12f)*Y-1.266734493280E-11f)*Y- 4.258722866437E-10f)*Y+9.45486578503261E-09f )*Y- 5.86635622821309E-08f )*Y-1.28835028104639E-06f )*Y+ 4.41413815691885E-05f )*Y-7.61738385590776E-04f )*Y+ 9.66090902985550E-03f )*Y-1.01410568057649E-01f )*Y+ 9.54714798156712E-01f; WW4 = ((((((((((((-7.56882223582704E-19f*Y+7.53541779268175E-18f)*Y- 1.157318032236E-16f)*Y+2.411195002314E-15f)*Y- 3.601794386996E-14f)*Y+4.082150659615E-13f)*Y- 4.289542980767E-12f)*Y+5.086829642731E-11f)*Y- 6.35435561050807E-10f )*Y+6.82309323251123E-09f )*Y- 5.63374555753167E-08f )*Y+3.57005361100431E-07f )*Y- 2.40050045173721E-06f )*Y+4.94171300536397E-05f; WW3 = (((((((((((-5.54451040921657E-17f*Y+2.68748367250999E-16f)*Y+ 1.349020069254E-14f)*Y-2.507452792892E-13f)*Y+ 1.944339743818E-12f)*Y-1.29816917658823E-11f )*Y+ 3.49977768819641E-10f )*Y-8.67270669346398E-09f )*Y+ 1.31381116840118E-07f )*Y-1.36790720600822E-06f )*Y+ 1.19210697673160E-05f )*Y-1.42181943986587E-04f )*Y+ 4.12615396191829E-03f; WW2 = (((((((((((-1.86506057729700E-16f*Y+1.16661114435809E-15f)*Y+ 2.563712856363E-14f)*Y-4.498350984631E-13f)*Y+ 1.765194089338E-12f)*Y+9.04483676345625E-12f )*Y+ 4.98930345609785E-10f )*Y-2.11964170928181E-08f )*Y+ 3.98295476005614E-07f )*Y-5.49390160829409E-06f )*Y+ 7.74065155353262E-05f )*Y-1.48201933009105E-03f )*Y+ 4.97836392625268E-02f; WW1 = (( 1.9623264149430E-01f/X-4.9695241464490E-01f)/X - 6.0156581186481E-05f)*expf(-X)+WW1-WW2-WW3-WW4; } else if (X <= 35.f) { WW1 = sqrtf(PIE4/X); E = expf(-X); RT1 = ((((((-4.45711399441838E-05f*X+1.27267770241379E-03f)*X - 2.36954961381262E-01f)*X+1.54330657903756E+01f)*X - 5.22799159267808E+02f)*X+1.05951216669313E+04f)*X + (-2.51177235556236E+06f/X+8.72975373557709E+05f)/X - 1.29194382386499E+05f)*E + R14/(X-R14); RT2 = (((((-7.85617372254488E-02f*X+6.35653573484868E+00f)*X - 3.38296938763990E+02f)*X+1.25120495802096E+04f)*X - 3.16847570511637E+05f)*X + ((-1.02427466127427E+09f/X + 3.70104713293016E+08f)/X-5.87119005093822E+07f)/X + 5.38614211391604E+06f)*E + R24/(X-R24); RT3 = (((((-2.37900485051067E-01f*X+1.84122184400896E+01f)*X - 1.00200731304146E+03f)*X+3.75151841595736E+04f)*X - 9.50626663390130E+05f)*X + ((-2.88139014651985E+09f/X + 1.06625915044526E+09f)/X-1.72465289687396E+08f)/X + 1.60419390230055E+07f)*E + R34/(X-R34); RT4 = ((((((-6.00691586407385E-04f*X-3.64479545338439E-01f)*X + 1.57496131755179E+01f)*X-6.54944248734901E+02f)*X + 1.70830039597097E+04f)*X-2.90517939780207E+05f)*X + (3.49059698304732E+07f/X-1.64944522586065E+07f)/X + 2.96817940164703E+06f)*E + R44/(X-R44); if (X <= 25.f) WW4 = ((((((( 2.33766206773151E-07f*X- 3.81542906607063E-05f)*X +3.51416601267000E-03f)*X- 1.66538571864728E-01f)*X +4.80006136831847E+00f)*X- 8.73165934223603E+01f)*X +9.77683627474638E+02f)*X + 1.66000945117640E+04f/X -6.14479071209961E+03f)*E + W44*WW1; else WW4 = (((((( 5.74245945342286E-06f*X- 7.58735928102351E-05f)*X +2.35072857922892E-04f)*X- 3.78812134013125E-03f)*X +3.09871652785805E-01f)*X- 7.11108633061306E+00f)*X +5.55297573149528E+01f)*E + W44*WW1; WW3 = (((((( 2.36392855180768E-04f*X-9.16785337967013E-03f)*X + 4.62186525041313E-01f)*X-1.96943786006540E+01f)*X + 4.99169195295559E+02f)*X-6.21419845845090E+03f)*X + ((+5.21445053212414E+07f/X-1.34113464389309E+07f)/X + 1.13673298305631E+06f)/X-2.81501182042707E+03f)*E + W34*WW1; WW2 = (((((( 7.29841848989391E-04f*X-3.53899555749875E-02f)*X + 2.07797425718513E+00f)*X-1.00464709786287E+02f)*X + 3.15206108877819E+03f)*X-6.27054715090012E+04f)*X + (+1.54721246264919E+07f/X-5.26074391316381E+06f)/X + 7.67135400969617E+05f)*E + W24*WW1; WW1 = (( 1.9623264149430E-01f/X-4.9695241464490E-01f)/X - 6.0156581186481E-05f)*E + WW1-WW2-WW3-WW4; } else if (X <= 53.f) { WW1 = sqrtf(PIE4/X); E = expf(-X)*powf(X,4.f); RT4 = ((-2.19135070169653E-03f*X-1.19108256987623E-01f)*X - 7.50238795695573E-01f)*E + R44/(X-R44); RT3 = ((-9.65842534508637E-04f*X-4.49822013469279E-02f)*X + 6.08784033347757E-01f)*E + R34/(X-R34); RT2 = ((-3.62569791162153E-04f*X-9.09231717268466E-03f)*X + 1.84336760556262E-01f)*E + R24/(X-R24); RT1 = ((-4.07557525914600E-05f*X-6.88846864931685E-04f)*X + 1.74725309199384E-02f)*E + R14/(X-R14); WW4 = (( 5.76631982000990E-06f*X-7.89187283804890E-05f)*X + 3.28297971853126E-04f)*E + W44*WW1; WW3 = (( 2.08294969857230E-04f*X-3.77489954837361E-03f)*X + 2.09857151617436E-02f)*E + W34*WW1; WW2 = (( 6.16374517326469E-04f*X-1.26711744680092E-02f)*X + 8.14504890732155E-02f)*E + W24*WW1; WW1 = WW1-WW2-WW3-WW4; } else { WW1 = sqrtf(PIE4/X); RT1 = R14/(X-R14); RT2 = R24/(X-R24); RT3 = R34/(X-R34); RT4 = R44/(X-R44); WW4 = W44*WW1; WW3 = W34*WW1; WW2 = W24*WW1; WW1 = WW1-WW2-WW3-WW4; } roots[0] = RT1; weights[0] = WW1; roots[1] = RT2; weights[1] = WW2; roots[2] = RT3; weights[2] = WW3; roots[3] = RT4; weights[3] = WW4; return; } __device__ void cuda_Root5(float X, float roots[], float weights[]){ float R15,PIE4,R25,W25,R35,W35,R45,W45,R55,W55; float RT1=0,RT2=0,RT3=0,RT4=0,RT5=0, WW1=0,WW2=0,WW3=0,WW4=0,WW5=0; float Y,E=0,XXX; R15 = 1.17581320211778E-01f; PIE4 = 7.85398163397448E-01f; R25 = 1.07456201243690E+00f; W25 = 2.70967405960535E-01f; R35 = 3.08593744371754E+00f; W35 = 3.82231610015404E-02f; R45 = 6.41472973366203E+00f; W45 = 1.51614186862443E-03f; R55 = 1.18071894899717E+01f; W55 = 8.62130526143657E-06f; if (X < 3.e-7f){ RT1 = 2.26659266316985E-02f -2.15865967920897E-03f *X; RT2 = 2.31271692140903E-01f -2.20258754389745E-02f *X; RT3 = 8.57346024118836E-01f -8.16520023025515E-02f *X; RT4 = 2.97353038120346E+00f -2.83193369647137E-01f *X; RT5 = 1.84151859759051E+01f -1.75382723579439E+00f *X; WW1 = 2.95524224714752E-01f -1.96867576909777E-02f *X; WW2 = 2.69266719309995E-01f -5.61737590184721E-02f *X; WW3 = 2.19086362515981E-01f -9.71152726793658E-02f *X; WW4 = 1.49451349150580E-01f -1.02979262193565E-01f *X; WW5 = 6.66713443086877E-02f -5.73782817488315E-02f *X; } else if (X < 1.f){ RT1 = ((((((-4.46679165328413E-11f*X+1.21879111988031E-09f)*X- 2.62975022612104E-08f )*X+5.15106194905897E-07f )*X- 9.27933625824749E-06f )*X+1.51794097682482E-04f )*X- 2.15865967920301E-03f )*X+2.26659266316985E-02f; RT2 = (((((( 1.93117331714174E-10f*X-4.57267589660699E-09f)*X+ 2.48339908218932E-08f )*X+1.50716729438474E-06f )*X- 6.07268757707381E-05f )*X+1.37506939145643E-03f )*X- 2.20258754419939E-02f )*X+2.31271692140905E-01f; RT3 = ((((( 4.84989776180094E-09f*X+1.31538893944284E-07f)*X- 2.766753852879E-06f)*X-7.651163510626E-05f)*X+ 4.033058545972E-03f)*X-8.16520022916145E-02f )*X+ 8.57346024118779E-01f; RT4 = ((((-2.48581772214623E-07f*X-4.34482635782585E-06f)*X- 7.46018257987630E-07f )*X+1.01210776517279E-02f )*X- 2.83193369640005E-01f )*X+2.97353038120345E+00f; RT5 = (((((-8.92432153868554E-09f*X+1.77288899268988E-08f)*X+ 3.040754680666E-06f)*X+1.058229325071E-04f)*X+ 4.596379534985E-02f)*X-1.75382723579114E+00f )*X+ 1.84151859759049E+01f; WW1 = ((((((-2.03822632771791E-09f*X+3.89110229133810E-08f)*X- 5.84914787904823E-07f )*X+8.30316168666696E-06f )*X- 1.13218402310546E-04f )*X+1.49128888586790E-03f )*X- 1.96867576904816E-02f )*X+2.95524224714749E-01f; WW2 = ((((((( 8.62848118397570E-09f*X-1.38975551148989E-07f)*X+ 1.602894068228E-06f)*X-1.646364300836E-05f)*X+ 1.538445806778E-04f)*X-1.28848868034502E-03f )*X+ 9.38866933338584E-03f )*X-5.61737590178812E-02f )*X+ 2.69266719309991E-01f; WW3 = ((((((((-9.41953204205665E-09f*X+1.47452251067755E-07f)*X- 1.57456991199322E-06f )*X+1.45098401798393E-05f )*X- 1.18858834181513E-04f )*X+8.53697675984210E-04f )*X- 5.22877807397165E-03f )*X+2.60854524809786E-02f )*X- 9.71152726809059E-02f )*X+2.19086362515979E-01f; WW4 = ((((((((-3.84961617022042E-08f*X+5.66595396544470E-07f)*X- 5.52351805403748E-06f )*X+4.53160377546073E-05f )*X- 3.22542784865557E-04f )*X+1.95682017370967E-03f )*X- 9.77232537679229E-03f )*X+3.79455945268632E-02f )*X- 1.02979262192227E-01f )*X+1.49451349150573E-01f; WW5 = ((((((((( 4.09594812521430E-09f*X-6.47097874264417E-08f)*X+ 6.743541482689E-07f)*X-5.917993920224E-06f)*X+ 4.531969237381E-05f)*X-2.99102856679638E-04f )*X+ 1.65695765202643E-03f )*X-7.40671222520653E-03f )*X+ 2.50889946832192E-02f )*X-5.73782817487958E-02f )*X+ 6.66713443086877E-02f; } else if (X < 5.f) { Y = X-3.0E+00f; RT1 = ((((((((-2.58163897135138E-14f*Y+8.14127461488273E-13f)*Y- 2.11414838976129E-11f )*Y+5.09822003260014E-10f )*Y- 1.16002134438663E-08f )*Y+2.46810694414540E-07f )*Y- 4.92556826124502E-06f )*Y+9.02580687971053E-05f )*Y- 1.45190025120726E-03f )*Y+1.73416786387475E-02f; RT2 = ((((((((( 1.04525287289788E-14f*Y+5.44611782010773E-14f)*Y- 4.831059411392E-12f)*Y+1.136643908832E-10f)*Y- 1.104373076913E-09f)*Y-2.35346740649916E-08f )*Y+ 1.43772622028764E-06f )*Y-4.23405023015273E-05f )*Y+ 9.12034574793379E-04f )*Y-1.52479441718739E-02f )*Y+ 1.76055265928744E-01f; RT3 = (((((((((-6.89693150857911E-14f*Y+5.92064260918861E-13f)*Y+ 1.847170956043E-11f)*Y-3.390752744265E-10f)*Y- 2.995532064116E-09f)*Y+1.57456141058535E-07f )*Y- 3.95859409711346E-07f )*Y-9.58924580919747E-05f )*Y+ 3.23551502557785E-03f )*Y-5.97587007636479E-02f )*Y+ 6.46432853383057E-01f; RT4 = ((((((((-3.61293809667763E-12f*Y-2.70803518291085E-11f)*Y+ 8.83758848468769E-10f )*Y+1.59166632851267E-08f )*Y- 1.32581997983422E-07f )*Y-7.60223407443995E-06f )*Y- 7.41019244900952E-05f )*Y+9.81432631743423E-03f )*Y- 2.23055570487771E-01f )*Y+2.21460798080643E+00f; RT5 = ((((((((( 7.12332088345321E-13f*Y+3.16578501501894E-12f)*Y- 8.776668218053E-11f)*Y-2.342817613343E-09f)*Y- 3.496962018025E-08f)*Y-3.03172870136802E-07f )*Y+ 1.50511293969805E-06f )*Y+1.37704919387696E-04f )*Y+ 4.70723869619745E-02f )*Y-1.47486623003693E+00f )*Y+ 1.35704792175847E+01f; WW1 = ((((((((( 1.04348658616398E-13f*Y-1.94147461891055E-12f)*Y+ 3.485512360993E-11f)*Y-6.277497362235E-10f)*Y+ 1.100758247388E-08f)*Y-1.88329804969573E-07f )*Y+ 3.12338120839468E-06f )*Y-5.04404167403568E-05f )*Y+ 8.00338056610995E-04f )*Y-1.30892406559521E-02f )*Y+ 2.47383140241103E-01f; WW2 = ((((((((((( 3.23496149760478E-14f*Y-5.24314473469311E-13f)*Y+ 7.743219385056E-12f)*Y-1.146022750992E-10f)*Y+ 1.615238462197E-09f)*Y-2.15479017572233E-08f )*Y+ 2.70933462557631E-07f )*Y-3.18750295288531E-06f )*Y+ 3.47425221210099E-05f )*Y-3.45558237388223E-04f )*Y+ 3.05779768191621E-03f )*Y-2.29118251223003E-02f )*Y+ 1.59834227924213E-01f; WW3 = ((((((((((((-3.42790561802876E-14f*Y+5.26475736681542E-13f)*Y- 7.184330797139E-12f)*Y+9.763932908544E-11f)*Y- 1.244014559219E-09f)*Y+1.472744068942E-08f)*Y- 1.611749975234E-07f)*Y+1.616487851917E-06f)*Y- 1.46852359124154E-05f )*Y+1.18900349101069E-04f )*Y- 8.37562373221756E-04f )*Y+4.93752683045845E-03f )*Y- 2.25514728915673E-02f )*Y+6.95211812453929E-02f; WW4 = ((((((((((((( 1.04072340345039E-14f*Y-1.60808044529211E-13f)* Y+2.183534866798E-12f)*Y-2.939403008391E-11f)*Y+ 3.679254029085E-10f)*Y-4.23775673047899E-09f )*Y+ 4.46559231067006E-08f )*Y-4.26488836563267E-07f )*Y+ 3.64721335274973E-06f )*Y-2.74868382777722E-05f )*Y+ 1.78586118867488E-04f )*Y-9.68428981886534E-04f )*Y+ 4.16002324339929E-03f )*Y-1.28290192663141E-02f )*Y+ 2.22353727685016E-02f; WW5 = ((((((((((((((-8.16770412525963E-16f*Y+1.31376515047977E-14f)* Y-1.856950818865E-13f)*Y+2.596836515749E-12f)*Y- 3.372639523006E-11f)*Y+4.025371849467E-10f)*Y- 4.389453269417E-09f)*Y+4.332753856271E-08f)*Y- 3.82673275931962E-07f )*Y+2.98006900751543E-06f )*Y- 2.00718990300052E-05f )*Y+1.13876001386361E-04f )*Y- 5.23627942443563E-04f )*Y+1.83524565118203E-03f )*Y- 4.37785737450783E-03f )*Y+5.36963805223095E-03f; } else if (X < 10.f) { Y = X-7.5E+00f; RT1 = ((((((((-1.13825201010775E-14f*Y+1.89737681670375E-13f)*Y- 4.81561201185876E-12f )*Y+1.56666512163407E-10f )*Y- 3.73782213255083E-09f )*Y+9.15858355075147E-08f )*Y- 2.13775073585629E-06f )*Y+4.56547356365536E-05f )*Y- 8.68003909323740E-04f )*Y+1.22703754069176E-02f; RT2 = (((((((((-3.67160504428358E-15f*Y+1.27876280158297E-14f)*Y- 1.296476623788E-12f)*Y+1.477175434354E-11f)*Y+ 5.464102147892E-10f)*Y-2.42538340602723E-08f )*Y+ 8.20460740637617E-07f )*Y-2.20379304598661E-05f )*Y+ 4.90295372978785E-04f )*Y-9.14294111576119E-03f )*Y+ 1.22590403403690E-01f; RT3 = ((((((((( 1.39017367502123E-14f*Y-6.96391385426890E-13f)*Y+ 1.176946020731E-12f)*Y+1.725627235645E-10f)*Y- 3.686383856300E-09f)*Y+2.87495324207095E-08f )*Y+ 1.71307311000282E-06f )*Y-7.94273603184629E-05f )*Y+ 2.00938064965897E-03f )*Y-3.63329491677178E-02f )*Y+ 4.34393683888443E-01f; RT4 = ((((((((((-1.27815158195209E-14f*Y+1.99910415869821E-14f)*Y+ 3.753542914426E-12f)*Y-2.708018219579E-11f)*Y- 1.190574776587E-09f)*Y+1.106696436509E-08f)*Y+ 3.954955671326E-07f)*Y-4.398596059588E-06f)*Y- 2.01087998907735E-04f )*Y+7.89092425542937E-03f )*Y- 1.42056749162695E-01f )*Y+1.39964149420683E+00f; RT5 = ((((((((((-1.19442341030461E-13f*Y-2.34074833275956E-12f)*Y+ 6.861649627426E-12f)*Y+6.082671496226E-10f)*Y+ 5.381160105420E-09f)*Y-6.253297138700E-08f)*Y- 2.135966835050E-06f)*Y-2.373394341886E-05f)*Y+ 2.88711171412814E-06f )*Y+4.85221195290753E-02f )*Y- 1.04346091985269E+00f )*Y+7.89901551676692E+00f; WW1 = ((((((((( 7.95526040108997E-15f*Y-2.48593096128045E-13f)*Y+ 4.761246208720E-12f)*Y-9.535763686605E-11f)*Y+ 2.225273630974E-09f)*Y-4.49796778054865E-08f )*Y+ 9.17812870287386E-07f )*Y-1.86764236490502E-05f )*Y+ 3.76807779068053E-04f )*Y-8.10456360143408E-03f )*Y+ 2.01097936411496E-01f; WW2 = ((((((((((( 1.25678686624734E-15f*Y-2.34266248891173E-14f)*Y+ 3.973252415832E-13f)*Y-6.830539401049E-12f)*Y+ 1.140771033372E-10f)*Y-1.82546185762009E-09f )*Y+ 2.77209637550134E-08f )*Y-4.01726946190383E-07f )*Y+ 5.48227244014763E-06f )*Y-6.95676245982121E-05f )*Y+ 8.05193921815776E-04f )*Y-8.15528438784469E-03f )*Y+ 9.71769901268114E-02f; WW3 = ((((((((((((-8.20929494859896E-16f*Y+1.37356038393016E-14f)*Y- 2.022863065220E-13f)*Y+3.058055403795E-12f)*Y- 4.387890955243E-11f)*Y+5.923946274445E-10f)*Y- 7.503659964159E-09f)*Y+8.851599803902E-08f)*Y- 9.65561998415038E-07f )*Y+9.60884622778092E-06f )*Y- 8.56551787594404E-05f )*Y+6.66057194311179E-04f )*Y- 4.17753183902198E-03f )*Y+2.25443826852447E-02f; WW4 = ((((((((((((((-1.08764612488790E-17f*Y+1.85299909689937E-16f)* Y-2.730195628655E-15f)*Y+4.127368817265E-14f)*Y- 5.881379088074E-13f)*Y+7.805245193391E-12f)*Y- 9.632707991704E-11f)*Y+1.099047050624E-09f)*Y- 1.15042731790748E-08f )*Y+1.09415155268932E-07f )*Y- 9.33687124875935E-07f )*Y+7.02338477986218E-06f )*Y- 4.53759748787756E-05f )*Y+2.41722511389146E-04f )*Y- 9.75935943447037E-04f )*Y+2.57520532789644E-03f; WW5 = ((((((((((((((( 7.28996979748849E-19f*Y-1.26518146195173E-17f) *Y+1.886145834486E-16f)*Y-2.876728287383E-15f)*Y+ 4.114588668138E-14f)*Y-5.44436631413933E-13f )*Y+ 6.64976446790959E-12f )*Y-7.44560069974940E-11f )*Y+ 7.57553198166848E-10f )*Y-6.92956101109829E-09f )*Y+ 5.62222859033624E-08f )*Y-3.97500114084351E-07f )*Y+ 2.39039126138140E-06f )*Y-1.18023950002105E-05f )*Y+ 4.52254031046244E-05f )*Y-1.21113782150370E-04f )*Y+ 1.75013126731224E-04f; } else if (X < 15.f) { Y = X-12.5E+00f; RT1 = ((((((((((-4.16387977337393E-17f*Y+7.20872997373860E-16f)*Y+ 1.395993802064E-14f)*Y+3.660484641252E-14f)*Y- 4.154857548139E-12f)*Y+2.301379846544E-11f)*Y- 1.033307012866E-09f)*Y+3.997777641049E-08f)*Y- 9.35118186333939E-07f )*Y+2.38589932752937E-05f )*Y- 5.35185183652937E-04f )*Y+8.85218988709735E-03f; RT2 = ((((((((((-4.56279214732217E-16f*Y+6.24941647247927E-15f)*Y+ 1.737896339191E-13f)*Y+8.964205979517E-14f)*Y- 3.538906780633E-11f)*Y+9.561341254948E-11f)*Y- 9.772831891310E-09f)*Y+4.240340194620E-07f)*Y- 1.02384302866534E-05f )*Y+2.57987709704822E-04f )*Y- 5.54735977651677E-03f )*Y+8.68245143991948E-02f; RT3 = ((((((((((-2.52879337929239E-15f*Y+2.13925810087833E-14f)*Y+ 7.884307667104E-13f)*Y-9.023398159510E-13f)*Y- 5.814101544957E-11f)*Y-1.333480437968E-09f)*Y- 2.217064940373E-08f)*Y+1.643290788086E-06f)*Y- 4.39602147345028E-05f )*Y+1.08648982748911E-03f )*Y- 2.13014521653498E-02f )*Y+2.94150684465425E-01f; RT4 = ((((((((((-6.42391438038888E-15f*Y+5.37848223438815E-15f)*Y+ 8.960828117859E-13f)*Y+5.214153461337E-11f)*Y- 1.106601744067E-10f)*Y-2.007890743962E-08f)*Y+ 1.543764346501E-07f)*Y+4.520749076914E-06f)*Y- 1.88893338587047E-04f )*Y+4.73264487389288E-03f )*Y- 7.91197893350253E-02f )*Y+8.60057928514554E-01f; RT5 = (((((((((((-2.24366166957225E-14f*Y+4.87224967526081E-14f)*Y+ 5.587369053655E-12f)*Y-3.045253104617E-12f)*Y- 1.223983883080E-09f)*Y-2.05603889396319E-09f )*Y+ 2.58604071603561E-07f )*Y+1.34240904266268E-06f )*Y- 5.72877569731162E-05f )*Y-9.56275105032191E-04f )*Y+ 4.23367010370921E-02f )*Y-5.76800927133412E-01f )*Y+ 3.87328263873381E+00f; WW1 = ((((((((( 8.98007931950169E-15f*Y+7.25673623859497E-14f)*Y+ 5.851494250405E-14f)*Y-4.234204823846E-11f)*Y+ 3.911507312679E-10f)*Y-9.65094802088511E-09f )*Y+ 3.42197444235714E-07f )*Y-7.51821178144509E-06f )*Y+ 1.94218051498662E-04f )*Y-5.38533819142287E-03f )*Y+ 1.68122596736809E-01f; WW2 = ((((((((((-1.05490525395105E-15f*Y+1.96855386549388E-14f)*Y- 5.500330153548E-13f)*Y+1.003849567976E-11f)*Y- 1.720997242621E-10f)*Y+3.533277061402E-09f)*Y- 6.389171736029E-08f)*Y+1.046236652393E-06f)*Y- 1.73148206795827E-05f )*Y+2.57820531617185E-04f )*Y- 3.46188265338350E-03f )*Y+7.03302497508176E-02f; WW3 = ((((((((((( 3.60020423754545E-16f*Y-6.24245825017148E-15f)*Y+ 9.945311467434E-14f)*Y-1.749051512721E-12f)*Y+ 2.768503957853E-11f)*Y-4.08688551136506E-10f )*Y+ 6.04189063303610E-09f )*Y-8.23540111024147E-08f )*Y+ 1.01503783870262E-06f )*Y-1.20490761741576E-05f )*Y+ 1.26928442448148E-04f )*Y-1.05539461930597E-03f )*Y+ 1.15543698537013E-02f; WW4 = ((((((((((((( 2.51163533058925E-18f*Y-4.31723745510697E-17f)* Y+6.557620865832E-16f)*Y-1.016528519495E-14f)*Y+ 1.491302084832E-13f)*Y-2.06638666222265E-12f )*Y+ 2.67958697789258E-11f )*Y-3.23322654638336E-10f )*Y+ 3.63722952167779E-09f )*Y-3.75484943783021E-08f )*Y+ 3.49164261987184E-07f )*Y-2.92658670674908E-06f )*Y+ 2.12937256719543E-05f )*Y-1.19434130620929E-04f )*Y+ 6.45524336158384E-04f; WW5 = ((((((((((((((-1.29043630202811E-19f*Y+2.16234952241296E-18f)* Y-3.107631557965E-17f)*Y+4.570804313173E-16f)*Y- 6.301348858104E-15f)*Y+8.031304476153E-14f)*Y- 9.446196472547E-13f)*Y+1.018245804339E-11f)*Y- 9.96995451348129E-11f )*Y+8.77489010276305E-10f )*Y- 6.84655877575364E-09f )*Y+4.64460857084983E-08f )*Y- 2.66924538268397E-07f )*Y+1.24621276265907E-06f )*Y- 4.30868944351523E-06f )*Y+9.94307982432868E-06f; } else if (X < 20.f){ Y = X-17.5E+00f; RT1 = (((((((((( 1.91875764545740E-16f*Y+7.8357401095707E-16f)*Y- 3.260875931644E-14f)*Y-1.186752035569E-13f)*Y+ 4.275180095653E-12f)*Y+3.357056136731E-11f)*Y- 1.123776903884E-09f)*Y+1.231203269887E-08f)*Y- 3.99851421361031E-07f )*Y+1.45418822817771E-05f )*Y- 3.49912254976317E-04f )*Y+6.67768703938812E-03f; RT2 = (((((((((( 2.02778478673555E-15f*Y+1.01640716785099E-14f)*Y- 3.385363492036E-13f)*Y-1.615655871159E-12f)*Y+ 4.527419140333E-11f)*Y+3.853670706486E-10f)*Y- 1.184607130107E-08f)*Y+1.347873288827E-07f)*Y- 4.47788241748377E-06f )*Y+1.54942754358273E-04f )*Y- 3.55524254280266E-03f )*Y+6.44912219301603E-02f; RT3 = (((((((((( 7.79850771456444E-15f*Y+6.00464406395001E-14f)*Y- 1.249779730869E-12f)*Y-1.020720636353E-11f)*Y+ 1.814709816693E-10f)*Y+1.766397336977E-09f)*Y- 4.603559449010E-08f)*Y+5.863956443581E-07f)*Y- 2.03797212506691E-05f )*Y+6.31405161185185E-04f )*Y- 1.30102750145071E-02f )*Y+2.10244289044705E-01f; RT4 = (((((((((((-2.92397030777912E-15f*Y+1.94152129078465E-14f)*Y+ 4.859447665850E-13f)*Y-3.217227223463E-12f)*Y- 7.484522135512E-11f)*Y+7.19101516047753E-10f )*Y+ 6.88409355245582E-09f )*Y-1.44374545515769E-07f )*Y+ 2.74941013315834E-06f )*Y-1.02790452049013E-04f )*Y+ 2.59924221372643E-03f )*Y-4.35712368303551E-02f )*Y+ 5.62170709585029E-01f; RT5 = ((((((((((( 1.17976126840060E-14f*Y+1.24156229350669E-13f)*Y- 3.892741622280E-12f)*Y-7.755793199043E-12f)*Y+ 9.492190032313E-10f)*Y-4.98680128123353E-09f )*Y- 1.81502268782664E-07f )*Y+2.69463269394888E-06f )*Y+ 2.50032154421640E-05f )*Y-1.33684303917681E-03f )*Y+ 2.29121951862538E-02f )*Y-2.45653725061323E-01f )*Y+ 1.89999883453047E+00f; WW1 = (((((((((( 1.74841995087592E-15f*Y-6.95671892641256E-16f)*Y- 3.000659497257E-13f)*Y+2.021279817961E-13f)*Y+ 3.853596935400E-11f)*Y+1.461418533652E-10f)*Y- 1.014517563435E-08f)*Y+1.132736008979E-07f)*Y- 2.86605475073259E-06f )*Y+1.21958354908768E-04f )*Y- 3.86293751153466E-03f )*Y+1.45298342081522E-01f; WW2 = ((((((((((-1.11199320525573E-15f*Y+1.85007587796671E-15f)*Y+ 1.220613939709E-13f)*Y+1.275068098526E-12f)*Y- 5.341838883262E-11f)*Y+6.161037256669E-10f)*Y- 1.009147879750E-08f)*Y+2.907862965346E-07f)*Y- 6.12300038720919E-06f )*Y+1.00104454489518E-04f )*Y- 1.80677298502757E-03f )*Y+5.78009914536630E-02f; WW3 = ((((((((((-9.49816486853687E-16f*Y+6.67922080354234E-15f)*Y+ 2.606163540537E-15f)*Y+1.983799950150E-12f)*Y- 5.400548574357E-11f)*Y+6.638043374114E-10f)*Y- 8.799518866802E-09f)*Y+1.791418482685E-07f)*Y- 2.96075397351101E-06f )*Y+3.38028206156144E-05f )*Y- 3.58426847857878E-04f )*Y+8.39213709428516E-03f; WW4 = ((((((((((( 1.33829971060180E-17f*Y-3.44841877844140E-16f)*Y+ 4.745009557656E-15f)*Y-6.033814209875E-14f)*Y+ 1.049256040808E-12f)*Y-1.70859789556117E-11f )*Y+ 2.15219425727959E-10f )*Y-2.52746574206884E-09f )*Y+ 3.27761714422960E-08f )*Y-3.90387662925193E-07f )*Y+ 3.46340204593870E-06f )*Y-2.43236345136782E-05f )*Y+ 3.54846978585226E-04f; WW5 = ((((((((((((( 2.69412277020887E-20f*Y-4.24837886165685E-19f)* Y+6.030500065438E-18f)*Y-9.069722758289E-17f)*Y+ 1.246599177672E-15f)*Y-1.56872999797549E-14f )*Y+ 1.87305099552692E-13f )*Y-2.09498886675861E-12f )*Y+ 2.11630022068394E-11f )*Y-1.92566242323525E-10f )*Y+ 1.62012436344069E-09f )*Y-1.23621614171556E-08f )*Y+ 7.72165684563049E-08f )*Y-3.59858901591047E-07f )*Y+ 2.43682618601000E-06f; } else if (X < 25.f) { Y = X-22.5E+00f; RT1 = (((((((((-1.13927848238726E-15f*Y+7.39404133595713E-15f)*Y+ 1.445982921243E-13f)*Y-2.676703245252E-12f)*Y+ 5.823521627177E-12f)*Y+2.17264723874381E-10f )*Y+ 3.56242145897468E-09f )*Y-3.03763737404491E-07f )*Y+ 9.46859114120901E-06f )*Y-2.30896753853196E-04f )*Y+ 5.24663913001114E-03f; RT2 = (((((((((( 2.89872355524581E-16f*Y-1.22296292045864E-14f)*Y+ 6.184065097200E-14f)*Y+1.649846591230E-12f)*Y- 2.729713905266E-11f)*Y+3.709913790650E-11f)*Y+ 2.216486288382E-09f)*Y+4.616160236414E-08f)*Y- 3.32380270861364E-06f )*Y+9.84635072633776E-05f )*Y- 2.30092118015697E-03f )*Y+5.00845183695073E-02f; RT3 = (((((((((( 1.97068646590923E-15f*Y-4.89419270626800E-14f)*Y+ 1.136466605916E-13f)*Y+7.546203883874E-12f)*Y- 9.635646767455E-11f)*Y-8.295965491209E-11f)*Y+ 7.534109114453E-09f)*Y+2.699970652707E-07f)*Y- 1.42982334217081E-05f )*Y+3.78290946669264E-04f )*Y- 8.03133015084373E-03f )*Y+1.58689469640791E-01f; RT4 = (((((((((( 1.33642069941389E-14f*Y-1.55850612605745E-13f)*Y- 7.522712577474E-13f)*Y+3.209520801187E-11f)*Y- 2.075594313618E-10f)*Y-2.070575894402E-09f)*Y+ 7.323046997451E-09f)*Y+1.851491550417E-06f)*Y- 6.37524802411383E-05f )*Y+1.36795464918785E-03f )*Y- 2.42051126993146E-02f )*Y+3.97847167557815E-01f; RT5 = ((((((((((-6.07053986130526E-14f*Y+1.04447493138843E-12f)*Y- 4.286617818951E-13f)*Y-2.632066100073E-10f)*Y+ 4.804518986559E-09f)*Y-1.835675889421E-08f)*Y- 1.068175391334E-06f)*Y+3.292234974141E-05f)*Y- 5.94805357558251E-04f )*Y+8.29382168612791E-03f )*Y- 9.93122509049447E-02f )*Y+1.09857804755042E+00f; WW1 = (((((((((-9.10338640266542E-15f*Y+1.00438927627833E-13f)*Y+ 7.817349237071E-13f)*Y-2.547619474232E-11f)*Y+ 1.479321506529E-10f)*Y+1.52314028857627E-09f )*Y+ 9.20072040917242E-09f )*Y-2.19427111221848E-06f )*Y+ 8.65797782880311E-05f )*Y-2.82718629312875E-03f )*Y+ 1.28718310443295E-01f; WW2 = ((((((((( 5.52380927618760E-15f*Y-6.43424400204124E-14f)*Y- 2.358734508092E-13f)*Y+8.261326648131E-12f)*Y+ 9.229645304956E-11f)*Y-5.68108973828949E-09f )*Y+ 1.22477891136278E-07f )*Y-2.11919643127927E-06f )*Y+ 4.23605032368922E-05f )*Y-1.14423444576221E-03f )*Y+ 5.06607252890186E-02f; WW3 = ((((((((( 3.99457454087556E-15f*Y-5.11826702824182E-14f)*Y- 4.157593182747E-14f)*Y+4.214670817758E-12f)*Y+ 6.705582751532E-11f)*Y-3.36086411698418E-09f )*Y+ 6.07453633298986E-08f )*Y-7.40736211041247E-07f )*Y+ 8.84176371665149E-06f )*Y-1.72559275066834E-04f )*Y+ 7.16639814253567E-03f; WW4 = (((((((((((-2.14649508112234E-18f*Y-2.45525846412281E-18f)*Y+ 6.126212599772E-16f)*Y-8.526651626939E-15f)*Y+ 4.826636065733E-14f)*Y-3.39554163649740E-13f )*Y+ 1.67070784862985E-11f )*Y-4.42671979311163E-10f )*Y+ 6.77368055908400E-09f )*Y-7.03520999708859E-08f )*Y+ 6.04993294708874E-07f )*Y-7.80555094280483E-06f )*Y+ 2.85954806605017E-04f; WW5 = ((((((((((((-5.63938733073804E-21f*Y+6.92182516324628E-20f)*Y- 1.586937691507E-18f)*Y+3.357639744582E-17f)*Y- 4.810285046442E-16f)*Y+5.386312669975E-15f)*Y- 6.117895297439E-14f)*Y+8.441808227634E-13f)*Y- 1.18527596836592E-11f )*Y+1.36296870441445E-10f )*Y- 1.17842611094141E-09f )*Y+7.80430641995926E-09f )*Y- 5.97767417400540E-08f )*Y+1.65186146094969E-06f; } else if (X < 40.f) { WW1 = sqrtf(PIE4/X); E = expf(-X); RT1 = ((((((((-1.73363958895356E-06f*X+1.19921331441483E-04f)*X - 1.59437614121125E-02f)*X+1.13467897349442E+00f)*X - 4.47216460864586E+01f)*X+1.06251216612604E+03f)*X - 1.52073917378512E+04f)*X+1.20662887111273E+05f)*X - 4.07186366852475E+05f)*E + R15/(X-R15); RT2 = ((((((((-1.60102542621710E-05f*X+1.10331262112395E-03f)*X - 1.50043662589017E-01f)*X+1.05563640866077E+01f)*X - 4.10468817024806E+02f)*X+9.62604416506819E+03f)*X - 1.35888069838270E+05f)*X+1.06107577038340E+06f)*X - 3.51190792816119E+06f)*E + R25/(X-R25); RT3 = ((((((((-4.48880032128422E-05f*X+2.69025112122177E-03f)*X - 4.01048115525954E-01f)*X+2.78360021977405E+01f)*X - 1.04891729356965E+03f)*X+2.36985942687423E+04f)*X - 3.19504627257548E+05f)*X+2.34879693563358E+06f)*X - 7.16341568174085E+06f)*E + R35/(X-R35); RT4 = ((((((((-6.38526371092582E-05f*X-2.29263585792626E-03f)*X - 7.65735935499627E-02f)*X+9.12692349152792E+00f)*X - 2.32077034386717E+02f)*X+2.81839578728845E+02f)*X + 9.59529683876419E+04f)*X-1.77638956809518E+06f)*X + 1.02489759645410E+07f)*E + R45/(X-R45); RT5 = ((((((((-3.59049364231569E-05f*X-2.25963977930044E-02f)*X + 1.12594870794668E+00f)*X-4.56752462103909E+01f)*X + 1.05804526830637E+03f)*X-1.16003199605875E+04f)*X - 4.07297627297272E+04f)*X+2.22215528319857E+06f)*X - 1.61196455032613E+07f)*E + R55/(X-R55); WW5 = (((((((((-4.61100906133970E-10f*X+1.43069932644286E-07f)*X - 1.63960915431080E-05f)*X+1.15791154612838E-03f)*X - 5.30573476742071E-02f)*X+1.61156533367153E+00f)*X - 3.23248143316007E+01f)*X+4.12007318109157E+02f)*X - 3.02260070158372E+03f)*X+9.71575094154768E+03f)*E + W55*WW1; WW4 = (((((((((-2.40799435809950E-08f*X+8.12621667601546E-06f)*X - 9.04491430884113E-04f)*X+6.37686375770059E-02f)*X - 2.96135703135647E+00f)*X+9.15142356996330E+01f)*X - 1.86971865249111E+03f)*X+2.42945528916947E+04f)*X - 1.81852473229081E+05f)*X+5.96854758661427E+05f)*E + W45*WW1; WW3 = (((((((( 1.83574464457207E-05f*X-1.54837969489927E-03f)*X + 1.18520453711586E-01f)*X-6.69649981309161E+00f)*X + 2.44789386487321E+02f)*X-5.68832664556359E+03f)*X + 8.14507604229357E+04f)*X-6.55181056671474E+05f)*X + 2.26410896607237E+06f)*E + W35*WW1; WW2 = (((((((( 2.77778345870650E-05f*X-2.22835017655890E-03f)*X + 1.61077633475573E-01f)*X-8.96743743396132E+00f)*X + 3.28062687293374E+02f)*X-7.65722701219557E+03f)*X + 1.10255055017664E+05f)*X-8.92528122219324E+05f)*X + 3.10638627744347E+06f)*E + W25*WW1; WW1 = WW1-0.01962E+00f*E-WW2-WW3-WW4-WW5; } else if (X < 59.f) { WW1 = sqrtf(PIE4/X); XXX = powf(X,3.f); E = XXX*expf(-X); RT1 = (((-2.43758528330205E-02f*X+2.07301567989771E+00f)*X - 6.45964225381113E+01f)*X+7.14160088655470E+02f)*E + R15/(X-R15); RT2 = (((-2.28861955413636E-01f*X+1.93190784733691E+01f)*X - 5.99774730340912E+02f)*X+6.61844165304871E+03f)*E + R25/(X-R25); RT3 = (((-6.95053039285586E-01f*X+5.76874090316016E+01f)*X - 1.77704143225520E+03f)*X+1.95366082947811E+04f)*E + R35/(X-R35); RT4 = (((-1.58072809087018E+00f*X+1.27050801091948E+02f)*X - 3.86687350914280E+03f)*X+4.23024828121420E+04f)*E + R45/(X-R45); RT5 = (((-3.33963830405396E+00f*X+2.51830424600204E+02f)*X - 7.57728527654961E+03f)*X+8.21966816595690E+04f)*E + R55/(X-R55); E = XXX*E; WW5 = (( 1.35482430510942E-08f*X-3.27722199212781E-07f)*X + 2.41522703684296E-06f)*E + W55*WW1; WW4 = (( 1.23464092261605E-06f*X-3.55224564275590E-05f)*X + 3.03274662192286E-04f)*E + W45*WW1; WW3 = (( 1.34547929260279E-05f*X-4.19389884772726E-04f)*X + 3.87706687610809E-03f)*E + W35*WW1; WW2 = (( 2.09539509123135E-05f*X-6.87646614786982E-04f)*X + 6.68743788585688E-03f)*E + W25*WW1; WW1 = WW1-WW2-WW3-WW4-WW5; } else { WW1 = sqrtf(PIE4/X); RT1 = R15/(X-R15); RT2 = R25/(X-R25); RT3 = R35/(X-R35); RT4 = R45/(X-R45); RT5 = R55/(X-R55); WW2 = W25*WW1; WW3 = W35*WW1; WW4 = W45*WW1; WW5 = W55*WW1; WW1 = WW1-WW2-WW3-WW4-WW5; } roots[0] = RT1; weights[0] = WW1; roots[1] = RT2; weights[1] = WW2; roots[2] = RT3; weights[2] = WW3; roots[3] = RT4; weights[3] = WW4; roots[4] = RT5; weights[4] = WW5; return; } __device__ void cuda_Root6(int n,float X, float roots[], float weights[]){ // Root6 not implemented yet return; } __device__ float cuda_Int1d(int i, int j, int k, int l, float xi, float xj, float xk, float xl, float alpha_ij_A, float alpha_kl_B, float sqrt_AB, float A, float B, float Px, float Qx, float inv_t1, float B00, float B1, float B1p, float G[][MAXROOTS]) { // Form G(n,m)=I(n,0,m,0) intermediate values for a Rys polynomial int n = i+j; int m = k+l; float xij = xi-xj; float xkl = xk-xl; // RecurFactorsGamess float C = (Px-xi) * inv_t1 + (B*(Qx-xi)+A*(Px-xi))*B00*2.0; float Cp = (Qx-xk) * inv_t1 + (B*(Qx-xk)+A*(Px-xk))*B00*2.0; // ABD eq 11. G[0][0] = (float)M_PI * expf(-alpha_ij_A*xij*xij -alpha_kl_B*xkl*xkl) / sqrt_AB; if (n > 0) { G[1][0] = C *G[0][0]; } // ABD eq 15 if (m > 0) { G[0][1] = Cp*G[0][0]; } // ABD eq 16 for (int a = 2; a < n+1; ++ a) { G[a][0] = B1 *(a-1)*G[a-2][0] + C *G[a-1][0]; } for (int b = 2; b < m+1; ++ b) { G[0][b] = B1p*(b-1)*G[0][b-2] + Cp*G[0][b-1]; } if ((m>0) && (n>0)){ for (int a=1; a<n+1; ++a){ G[a][1] = a*B00*G[a-1][0] + Cp*G[a][0]; for (int b=2; b<m+1; ++b) G[a][b] = B1p*(b-1)*G[a][b-2] + a*B00*G[a-1][b-1] + Cp*G[a][b-1]; } } // Compute and output I(i,j,k,l) from I(i+j,0,k+l,0) (G) float ijkl = 0.0; for (int m=0; m<l+1; ++m){ float ijm0 = 0.0; for (int n=0; n<j+1; ++n) // I(i,j,m,0)<-I(n,0,m,0) ijm0 += cuda_binomial(j,n)*powf(xij,(float)(j-n))*G[n+i][m+k]; ijkl += cuda_binomial(l,m)*powf(xkl,(float)(l-m))*ijm0; // I(i,j,k,l)<-I(i,j,m,0) } return ijkl; } // calculate ERI over 4 primitive basis functions __device__ float cuda_rys_pbf(const double *ptr_i, const double *ptr_j, const double *ptr_k, const double *ptr_l) { // download xyz, lmn, expon, and coef*norm float xa = (float)ptr_i[0]; float ya = (float)ptr_i[1]; float za = (float)ptr_i[2]; int la = (int)ptr_i[3]; int ma = (int)ptr_i[4]; int na = (int)ptr_i[5]; float alphaa = (float)ptr_i[6]; float norma = (float)ptr_i[7]; float xb = (float)ptr_j[0]; float yb = (float)ptr_j[1]; float zb = (float)ptr_j[2]; int lb = (int)ptr_j[3]; int mb = (int)ptr_j[4]; int nb = (int)ptr_j[5]; float alphab = (float)ptr_j[6]; float normb = (float)ptr_j[7]; float xc = (float)ptr_k[0]; float yc = (float)ptr_k[1]; float zc = (float)ptr_k[2]; int lc = (int)ptr_k[3]; int mc = (int)ptr_k[4]; int nc = (int)ptr_k[5]; float alphac = (float)ptr_k[6]; float normc = (float)ptr_k[7]; float xd = (float)ptr_l[0]; float yd = (float)ptr_l[1]; float zd = (float)ptr_l[2]; int ld = (int)ptr_l[3]; int md = (int)ptr_l[4]; int nd = (int)ptr_l[5]; float alphad = (float)ptr_l[6]; float normd = (float)ptr_l[7]; // calculate primitive integral [ij|kl] int norder,i; float A,B,xp,yp,zp,xq,yq,zq,X,rho,sum,t,Ix,Iy,Iz; norder = (la+ma+na+lb+nb+mb+lc+mc+nc+ld+md+nd)/2 + 1; A = alphaa+alphab; B = alphac+alphad; xp = (alphaa*xa+alphab*xb)/A; yp = (alphaa*ya+alphab*yb)/A; zp = (alphaa*za+alphab*zb)/A; xq = (alphac*xc+alphad*xd)/B; yq = (alphac*yc+alphad*yd)/B; zq = (alphac*zc+alphad*zd)/B; rho = A*B/(A+B); X = rho * ((xp-xq)*(xp-xq)+(yp-yq)*(yp-yq)+(zp-zq)*(zp-zq)); float alpha_ab_A = alphaa * alphab / A; float alpha_cd_B = alphac * alphad / B; float sqrt_AB = sqrtf(A * B); float roots[MAXROOTS],weights[MAXROOTS]; float G[MAXROOTS][MAXROOTS]; cuda_Roots(norder,X,roots,weights); // get currect roots/weights sum = 0.; for (i=0; i<norder; ++i){ t = roots[i]; float inv_t1, B00, B1, B1p; inv_t1 = 1.f / (1.f + t); B00 = 0.5f * t/(A+B) * inv_t1; B1 = 0.5f / A * inv_t1 + B00; B1p = 0.5f / B * inv_t1 + B00; Ix = cuda_Int1d(la,lb,lc,ld, xa,xb,xc,xd, alpha_ab_A,alpha_cd_B,sqrt_AB, A,B,xp,xq, inv_t1,B00,B1,B1p, G); Iy = cuda_Int1d(ma,mb,mc,md, ya,yb,yc,yd, alpha_ab_A,alpha_cd_B,sqrt_AB, A,B,yp,yq, inv_t1,B00,B1,B1p, G); Iz = cuda_Int1d(na,nb,nc,nd, za,zb,zc,zd, alpha_ab_A,alpha_cd_B,sqrt_AB, A,B,zp,zq, inv_t1,B00,B1,B1p, G); sum = sum + Ix*Iy*Iz*weights[i]; /* ABD eq 5 & 9 */ } // inv_sqrt_pi_2: 2.0*sqrtf(1.0/M_PI) = 1.12837916709551255856 return 1.12837916709551255856f * sqrtf(rho)*norma*normb*normc*normd*sum; /* ABD eq 5 & 9 */ } // calculate J matrix using 1-thread-1-primitive-integral scheme __global__ void cuda_mat_J_PI( const double *__restrict pbf_xlec, const int *__restrict pbf_to_cbf, int n_pbf, const double *__restrict mat_D, double *__restrict mat_J_PI, const double *__restrict mat_Q) { __shared__ double elem_J_PI[BLOCKSIZE * BLOCKSIZE]; // each block scans over [ij|??] and sum up to a primitive J matrix element int i = blockIdx.x; int j = blockIdx.y; // avoid accessing out of bounds elements and make use of i<=>j symmetry if (i >= n_pbf || j > i) { return; } int ij = cuda_ij2intindex(i,j); const double *ptr_i = &pbf_xlec[i * 8]; const double *ptr_j = &pbf_xlec[j * 8]; int a = pbf_to_cbf[i]; int b = pbf_to_cbf[j]; int ab = cuda_ij2intindex(a,b); // initialize shared array elem_J_PI[threadIdx.x * BLOCKSIZE + threadIdx.y] = 0.0; for (int k = threadIdx.x; k < n_pbf; k += BLOCKSIZE) { int c = pbf_to_cbf[k]; const double *ptr_k = &pbf_xlec[k * 8]; // NOTE: make use of k<=>l symmetry for (int l = threadIdx.y; l <= k; l += BLOCKSIZE) { int d = pbf_to_cbf[l]; int cd = cuda_ij2intindex(c,d); // Schwartz screening if (fabs(mat_Q[ab] * mat_Q[cd] * mat_D[cd]) < SCREEN_THR) { continue; } const double *ptr_l = &pbf_xlec[l * 8]; // calculate ERI double this_eri = cuda_rys_pbf(ptr_i, ptr_j, ptr_k, ptr_l); // NOTE: doubling for off-diagonal elements of D due to k<=>l symmetry elem_J_PI[threadIdx.x *BLOCKSIZE + threadIdx.y] += this_eri * mat_D[cd] * (k == l ? 1.0 : 2.0); } } __syncthreads(); // only update mat_J_PI on one thread of the block if (0 == threadIdx.x && 0 == threadIdx.y) { mat_J_PI[ij] = 0.0; for (int t1 = 0; t1 < BLOCKSIZE; ++ t1) { for (int t2 = 0; t2 < BLOCKSIZE; ++ t2) { mat_J_PI[ij] += elem_J_PI[t1 * BLOCKSIZE + t2]; } } } } // calculate K matrix using 1-thread-1-primitive-integral scheme __global__ void cuda_mat_K_PI( const double *__restrict pbf_xlec, const int *__restrict pbf_to_cbf, int n_pbf, const double *__restrict mat_D, double *__restrict mat_K_PI, const double *__restrict mat_Q) { __shared__ double elem_K_PI[BLOCKSIZE * BLOCKSIZE]; // each block scans over [i?|k?] and sum up to a primitive K matrix element int i = blockIdx.x; int k = blockIdx.y; // avoid accessing out of bounds elements and make use of ij<=>kl symmetry if (i >= n_pbf || k > i) { return; } int ik = cuda_ij2intindex(i,k); const double *ptr_i = &pbf_xlec[i * 8]; const double *ptr_k = &pbf_xlec[k * 8]; int a = pbf_to_cbf[i]; int c = pbf_to_cbf[k]; // initialize shared array elem_K_PI[threadIdx.x * BLOCKSIZE + threadIdx.y] = 0.0; for (int j = threadIdx.x; j < n_pbf; j += BLOCKSIZE) { int b = pbf_to_cbf[j]; int ab = cuda_ij2intindex(a,b); const double *ptr_j = &pbf_xlec[j * 8]; for (int l = threadIdx.y; l < n_pbf; l += BLOCKSIZE) { int d = pbf_to_cbf[l]; int cd = cuda_ij2intindex(c,d); int bd = cuda_ij2intindex(b,d); // Schwartz screening if (fabs(mat_Q[ab] * mat_Q[cd] * mat_D[bd]) < SCREEN_THR) { continue; } const double *ptr_l = &pbf_xlec[l * 8]; // calculate ERI double this_eri = cuda_rys_pbf(ptr_i, ptr_j, ptr_k, ptr_l); // NOTE: no doubling for off-diagonal elements of D elem_K_PI[threadIdx.x * BLOCKSIZE + threadIdx.y] += this_eri * mat_D[bd]; } } __syncthreads(); // only update mat_K_PI on one thread of the block if (0 == threadIdx.x && 0 == threadIdx.y) { mat_K_PI[ik] = 0.0; for (int t1 = 0; t1 < BLOCKSIZE; ++ t1) { for (int t2 = 0; t2 < BLOCKSIZE; ++ t2) { mat_K_PI[ik] += elem_K_PI[t1 * BLOCKSIZE + t2]; } } } }
the_stack
Parallel reduction This sample shows how to perform a reduction operation on an array of values to produce a single value in a single kernel (as opposed to two or more kernel calls as shown in the "reduction" CUDA Sample). Single-pass reduction requires Cooperative Groups. Reductions are a very common computation in parallel algorithms. Any time an array of values needs to be reduced to a single value using a binary associative operator, a reduction can be used. Example applications include statistics computations such as mean and standard deviation, and image processing applications such as finding the total luminance of an image. This code performs sum reductions, but any associative operator such as min() or max() could also be used. It assumes the input size is a power of 2. COMMAND LINE ARGUMENTS "--n=<N>" :Specify the number of elements to reduce (default 33554432) "--threads=<N>" :Specify the number of threads per block (default 128) "--maxblocks=<N>" :Specify the maximum number of thread blocks to launch (kernel 6 only, default 64) */ // includes, system #include <stdlib.h> #include <stdio.h> #include <string.h> #include <math.h> // includes, project #include <helper_functions.h> #include <helper_cuda.h> #include <cuda_runtime.h> const char *sSDKsample = "reductionMultiBlockCG"; #include <cuda_runtime_api.h> #include <cooperative_groups.h> #include <cooperative_groups/reduce.h> namespace cg = cooperative_groups; /* Parallel sum reduction using shared memory - takes log(n) steps for n input elements - uses n/2 threads - only works for power-of-2 arrays This version adds multiple elements per thread sequentially. This reduces the overall cost of the algorithm while keeping the work complexity O(n) and the step complexity O(log n). (Brent's Theorem optimization) See the CUDA SDK "reduction" sample for more information. */ __device__ void reduceBlock(double *sdata, const cg::thread_block &cta) { const unsigned int tid = cta.thread_rank(); cg::thread_block_tile<32> tile32 = cg::tiled_partition<32>(cta); sdata[tid] = cg::reduce(tile32, sdata[tid], cg::plus<double>()); cg::sync(cta); double beta = 0.0; if (cta.thread_rank() == 0) { beta = 0; for (int i = 0; i < blockDim.x; i += tile32.size()) { beta += sdata[i]; } sdata[0] = beta; } cg::sync(cta); } // This reduction kernel reduces an arbitrary size array in a single kernel // invocation // // For more details on the reduction algorithm (notably the multi-pass // approach), see the "reduction" sample in the CUDA SDK. extern "C" __global__ void reduceSinglePassMultiBlockCG(const float *g_idata, float *g_odata, unsigned int n) { // Handle to thread block group cg::thread_block block = cg::this_thread_block(); cg::grid_group grid = cg::this_grid(); extern double __shared__ sdata[]; // Stride over grid and add the values to a shared memory buffer sdata[block.thread_rank()] = 0; for (int i = grid.thread_rank(); i < n; i += grid.size()) { sdata[block.thread_rank()] += g_idata[i]; } cg::sync(block); // Reduce each block (called once per block) reduceBlock(sdata, block); // Write out the result to global memory if (block.thread_rank() == 0) { g_odata[blockIdx.x] = sdata[0]; } cg::sync(grid); if (grid.thread_rank() == 0) { for (int block = 1; block < gridDim.x; block++) { g_odata[0] += g_odata[block]; } } } //////////////////////////////////////////////////////////////////////////////// // Wrapper function for kernel launch //////////////////////////////////////////////////////////////////////////////// void call_reduceSinglePassMultiBlockCG(int size, int threads, int numBlocks, float *d_idata, float *d_odata) { int smemSize = threads * sizeof(double); void *kernelArgs[] = { (void *)&d_idata, (void *)&d_odata, (void *)&size, }; dim3 dimBlock(threads, 1, 1); dim3 dimGrid(numBlocks, 1, 1); cudaLaunchCooperativeKernel((void *)reduceSinglePassMultiBlockCG, dimGrid, dimBlock, kernelArgs, smemSize, NULL); // check if kernel execution generated an error getLastCudaError("Kernel execution failed"); } //////////////////////////////////////////////////////////////////////////////// // declaration, forward bool runTest(int argc, char **argv, int device); //////////////////////////////////////////////////////////////////////////////// // Program main //////////////////////////////////////////////////////////////////////////////// int main(int argc, char **argv) { cudaDeviceProp deviceProp = {0}; int dev; printf("%s Starting...\n\n", sSDKsample); dev = findCudaDevice(argc, (const char **)argv); checkCudaErrors(cudaGetDeviceProperties(&deviceProp, dev)); if (!deviceProp.cooperativeLaunch) { printf( "\nSelected GPU (%d) does not support Cooperative Kernel Launch, " "Waiving the run\n", dev); exit(EXIT_WAIVED); } bool bTestPassed = false; bTestPassed = runTest(argc, argv, dev); exit(bTestPassed ? EXIT_SUCCESS : EXIT_FAILURE); } //////////////////////////////////////////////////////////////////////////////// //! Compute sum reduction on CPU //! We use Kahan summation for an accurate sum of large arrays. //! http://en.wikipedia.org/wiki/Kahan_summation_algorithm //! //! @param data pointer to input data //! @param size number of input data elements //////////////////////////////////////////////////////////////////////////////// template <class T> T reduceCPU(T *data, int size) { T sum = data[0]; T c = (T)0.0; for (int i = 1; i < size; i++) { T y = data[i] - c; T t = sum + y; c = (t - sum) - y; sum = t; } return sum; } unsigned int nextPow2(unsigned int x) { --x; x |= x >> 1; x |= x >> 2; x |= x >> 4; x |= x >> 8; x |= x >> 16; return ++x; } //////////////////////////////////////////////////////////////////////////////// // Compute the number of threads and blocks to use for the reduction // We set threads / block to the minimum of maxThreads and n/2. //////////////////////////////////////////////////////////////////////////////// void getNumBlocksAndThreads(int n, int maxBlocks, int maxThreads, int &blocks, int &threads) { if (n == 1) { threads = 1; blocks = 1; } else { threads = (n < maxThreads * 2) ? nextPow2(n / 2) : maxThreads; blocks = max(1, n / (threads * 2)); } blocks = min(maxBlocks, blocks); } //////////////////////////////////////////////////////////////////////////////// // This function performs a reduction of the input data multiple times and // measures the average reduction time. //////////////////////////////////////////////////////////////////////////////// float benchmarkReduce(int n, int numThreads, int numBlocks, int maxThreads, int maxBlocks, int testIterations, StopWatchInterface *timer, float *h_odata, float *d_idata, float *d_odata) { float gpu_result = 0; cudaError_t error; printf("\nLaunching %s kernel\n", "SinglePass Multi Block Cooperative Groups"); for (int i = 0; i < testIterations; ++i) { gpu_result = 0; sdkStartTimer(&timer); call_reduceSinglePassMultiBlockCG(n, numThreads, numBlocks, d_idata, d_odata); cudaDeviceSynchronize(); sdkStopTimer(&timer); } // copy final sum from device to host error = cudaMemcpy(&gpu_result, d_odata, sizeof(float), cudaMemcpyDeviceToHost); checkCudaErrors(error); return gpu_result; } //////////////////////////////////////////////////////////////////////////////// // The main function which runs the reduction test. //////////////////////////////////////////////////////////////////////////////// bool runTest(int argc, char **argv, int device) { int size = 1 << 25; // number of elements to reduce bool bTestPassed = false; if (checkCmdLineFlag(argc, (const char **)argv, "n")) { size = getCmdLineArgumentInt(argc, (const char **)argv, "n"); } printf("%d elements\n", size); // Set the device to be used cudaDeviceProp prop = {0}; checkCudaErrors(cudaSetDevice(device)); checkCudaErrors(cudaGetDeviceProperties(&prop, device)); // create random input data on CPU unsigned int bytes = size * sizeof(float); float *h_idata = (float *)malloc(bytes); for (int i = 0; i < size; i++) { // Keep the numbers small so we don't get truncation error in the sum h_idata[i] = (rand() & 0xFF) / (float)RAND_MAX; } // Determine the launch configuration (threads, blocks) int maxThreads = 0; int maxBlocks = 0; if (checkCmdLineFlag(argc, (const char **)argv, "threads")) { maxThreads = getCmdLineArgumentInt(argc, (const char **)argv, "threads"); } else { maxThreads = prop.maxThreadsPerBlock; } if (checkCmdLineFlag(argc, (const char **)argv, "maxblocks")) { maxBlocks = getCmdLineArgumentInt(argc, (const char **)argv, "maxblocks"); } else { maxBlocks = prop.multiProcessorCount * (prop.maxThreadsPerMultiProcessor / prop.maxThreadsPerBlock); } int numBlocks = 0; int numThreads = 0; getNumBlocksAndThreads(size, maxBlocks, maxThreads, numBlocks, numThreads); // We calculate the occupancy to know how many block can actually fit on the // GPU int numBlocksPerSm = 0; checkCudaErrors(cudaOccupancyMaxActiveBlocksPerMultiprocessor( &numBlocksPerSm, reduceSinglePassMultiBlockCG, numThreads, numThreads * sizeof(double))); int numSms = prop.multiProcessorCount; if (numBlocks > numBlocksPerSm * numSms) { numBlocks = numBlocksPerSm * numSms; } printf("numThreads: %d\n", numThreads); printf("numBlocks: %d\n", numBlocks); // allocate mem for the result on host side float *h_odata = (float *)malloc(numBlocks * sizeof(float)); // allocate device memory and data float *d_idata = NULL; float *d_odata = NULL; checkCudaErrors(cudaMalloc((void **)&d_idata, bytes)); checkCudaErrors(cudaMalloc((void **)&d_odata, numBlocks * sizeof(float))); // copy data directly to device memory checkCudaErrors(cudaMemcpy(d_idata, h_idata, bytes, cudaMemcpyHostToDevice)); checkCudaErrors(cudaMemcpy(d_odata, h_idata, numBlocks * sizeof(float), cudaMemcpyHostToDevice)); int testIterations = 100; StopWatchInterface *timer = 0; sdkCreateTimer(&timer); float gpu_result = 0; gpu_result = benchmarkReduce(size, numThreads, numBlocks, maxThreads, maxBlocks, testIterations, timer, h_odata, d_idata, d_odata); float reduceTime = sdkGetAverageTimerValue(&timer); printf("Average time: %f ms\n", reduceTime); printf("Bandwidth: %f GB/s\n\n", (size * sizeof(int)) / (reduceTime * 1.0e6)); // compute reference solution float cpu_result = reduceCPU<float>(h_idata, size); printf("GPU result = %0.12f\n", gpu_result); printf("CPU result = %0.12f\n", cpu_result); double threshold = 1e-8 * size; double diff = abs((double)gpu_result - (double)cpu_result); bTestPassed = (diff < threshold); // cleanup sdkDeleteTimer(&timer); free(h_idata); free(h_odata); cudaFree(d_idata); cudaFree(d_odata); return bTestPassed; }
the_stack
#include "caffe/layers/softmax_loss_layer.hpp" #include "caffe/util/math_functions.hpp" namespace caffe { template<typename Dtype, typename MItype, typename MOtype> void SoftmaxWithLossLayer<Dtype, MItype, MOtype>::GenerateProgram() { this->device_program_ = this->device_->CreateProgram(); stringstream ss; ss << this->device_program_->setup(); ss << this->device_program_->template define_type<Dtype>("Dtype"); ss << this->device_program_->template define_type<MItype>("MItype"); ss << this->device_program_->template define_type<MOtype>("MOtype"); ss << this->device_program_->template helper_functions<Dtype>(); KernelArgs fw_args; fw_args.push_back(this->device_program_->template create_kernel_arg<int_tp>( "nthreads", KERNEL_ARG_CONST)); fw_args.push_back(this->device_program_->template create_kernel_arg<Dtype>( "prob_data", KERNEL_ARG_CONST | KERNEL_ARG_GLOBAL_MEM)); fw_args.push_back(this->device_program_->template create_kernel_arg<Dtype>( "label", KERNEL_ARG_CONST | KERNEL_ARG_GLOBAL_MEM)); fw_args.push_back(this->device_program_->template create_kernel_arg<Dtype>( "loss", KERNEL_ARG_GLOBAL_MEM)); fw_args.push_back(this->device_program_->template create_kernel_arg<int_tp>( "num", KERNEL_ARG_CONST)); fw_args.push_back(this->device_program_->template create_kernel_arg<int_tp>( "dim", KERNEL_ARG_CONST)); fw_args.push_back(this->device_program_->template create_kernel_arg<int_tp>( "spatial_dim", KERNEL_ARG_CONST)); fw_args.push_back(this->device_program_->template create_kernel_arg<bool>( "has_ignore_label", KERNEL_ARG_CONST)); fw_args.push_back(this->device_program_->template create_kernel_arg<int_tp>( "ignore_label", KERNEL_ARG_CONST)); fw_args.push_back(this->device_program_->template create_kernel_arg<Dtype>( "counts", KERNEL_ARG_GLOBAL_MEM)); ss << this->device_program_->function("SoftmaxLossForwardGPU", fw_args); ss << this->device_program_->kernel_loop("uint_tp", "index", "nthreads"); ss << "const int_tp n = index / spatial_dim;" << std::endl; ss << "const int_tp s = index % spatial_dim;" << std::endl; ss << "const int_tp label_value = (int_tpc)(label[n * spatial_dim + s]);" << std::endl; ss << "if (has_ignore_label && label_value == ignore_label) {" << std::endl; ss << "loss[index] = (Dtype)0;" << std::endl; ss << "counts[index] = (Dtype)0;" << std::endl; ss << "} else {" << std::endl; ss << "loss[index] = -log(" << "max((float)prob_data[n * dim + label_value * spatial_dim + s], " << "(float)FLT_MIN));" << std::endl; ss << "counts[index] = 1;" << std::endl; ss << "}" << std::endl; ss << "}" << std::endl; ss << "}" << std::endl; KernelArgs bw_args; bw_args.push_back(this->device_program_->template create_kernel_arg<int_tp>( "nthreads", KERNEL_ARG_CONST)); bw_args.push_back(this->device_program_->template create_kernel_arg<Dtype>( "top", KERNEL_ARG_CONST | KERNEL_ARG_GLOBAL_MEM)); bw_args.push_back(this->device_program_->template create_kernel_arg<Dtype>( "label", KERNEL_ARG_CONST | KERNEL_ARG_GLOBAL_MEM)); bw_args.push_back(this->device_program_->template create_kernel_arg<Dtype>( "bottom_diff", KERNEL_ARG_GLOBAL_MEM)); bw_args.push_back(this->device_program_->template create_kernel_arg<int_tp>( "num", KERNEL_ARG_CONST)); bw_args.push_back(this->device_program_->template create_kernel_arg<int_tp>( "dim", KERNEL_ARG_CONST)); bw_args.push_back(this->device_program_->template create_kernel_arg<int_tp>( "spatial_dim", KERNEL_ARG_CONST)); bw_args.push_back(this->device_program_->template create_kernel_arg<bool>( "has_ignore_label", KERNEL_ARG_CONST)); bw_args.push_back(this->device_program_->template create_kernel_arg<int_tp>( "ignore_label", KERNEL_ARG_CONST)); bw_args.push_back(this->device_program_->template create_kernel_arg<Dtype>( "counts", KERNEL_ARG_GLOBAL_MEM)); ss << this->device_program_->function("SoftmaxLossBackwardGPU", bw_args); ss << "const int_tp channels = dim / spatial_dim;" << std::endl; ss << this->device_program_->kernel_loop("uint_tp", "index", "nthreads"); ss << "const int_tp n = index / spatial_dim;" << std::endl; ss << "const int_tp s = index % spatial_dim;" << std::endl; ss << "const int_tp label_value = (int_tpc)(label[n * spatial_dim + s]);" << std::endl; ss << "if (has_ignore_label && label_value == ignore_label) {" << std::endl; ss << "for (int_tp c = 0; c < channels; ++c) {" << std::endl; ss << "bottom_diff[n * dim + c * spatial_dim + s] = 0;" << std::endl; ss << "}" << std::endl; ss << "counts[index] = 0;" << std::endl; ss << "} else {" << std::endl; ss << "bottom_diff[n * dim + label_value * spatial_dim + s] -= 1;" << std::endl; ss << "counts[index] = 1;" << std::endl; ss << "}" << std::endl; ss << "}" << std::endl; ss << "}" << std::endl; this->device_program_->set_source(ss.str()); this->device_program_->Compile(true, true); } template<typename Dtype, typename MItype, typename MOtype> void SoftmaxWithLossLayer<Dtype, MItype, MOtype>::Forward_gpu( const vector<Blob<MItype>*>& bottom, const vector<Blob<MOtype>*>& top) { softmax_layer_->Forward(softmax_bottom_vec_, softmax_top_vec_); vptr<const Dtype> prob_data = prob_.gpu_data(); vptr<const Dtype> label = bottom[1]->gpu_data(); const int_tp dim = prob_.count() / outer_num_; const int_tp nthreads = outer_num_ * inner_num_; // Since this memory is not used for anything, we use it here to avoid having // to allocate new GPU memory to accumulate intermediate results. vptr<Dtype> loss_data = bottom[0]->mutable_gpu_diff(); // Similarly, this memory is never used elsewhere, and thus we can use it // to avoid having to allocate additional GPU memory. vptr<Dtype> counts = prob_.mutable_gpu_diff(); shared_ptr<DeviceKernel> kernel = this->device_program_->GetKernel("SoftmaxLossForwardGPU"); kernel->add_arg(&nthreads); kernel->add_arg(&prob_data); kernel->add_arg(&label); kernel->add_arg(&loss_data); kernel->add_arg(&outer_num_); kernel->add_arg(&dim); kernel->add_arg(&inner_num_); kernel->add_arg(&has_ignore_label_); kernel->add_arg(&ignore_label_); kernel->add_arg(&counts); vector<size_t> work_size(1, nthreads); vector<size_t> group; vector<size_t> local; this->device_->get_threads(&work_size, &group, &local, kernel.get(), true); kernel->Execute(group, local); Dtype loss; this->device_->template asum<Dtype>(nthreads, loss_data, &loss); Dtype valid_count = -1; // Only launch another CUDA kernel if we actually need the count of valid // outputs. if (normalization_ == LossParameter_NormalizationMode_VALID && has_ignore_label_) { this->device_->template asum<Dtype>(nthreads, counts, &valid_count); } top[0]->mutable_cpu_data()[0] = loss / get_normalizer(normalization_, valid_count); if (top.size() >= 2) { top[1]->ShareData(prob_); } // Clear scratch memory to prevent interfering with backward (see #6202). this->device_->template set<Dtype>(bottom[0]->count(), Dtype(0), bottom[0]->mutable_gpu_diff()); } template<typename Dtype, typename MItype, typename MOtype> void SoftmaxWithLossLayer<Dtype, MItype, MOtype>::Backward_gpu( const vector<Blob<MOtype>*>& top, const vector<bool>& propagate_down, const vector<Blob<MItype>*>& bottom) { if (propagate_down[1]) { LOG(FATAL) << this->type() << " Layer cannot backpropagate to label inputs."; } if (propagate_down[0]) { vptr<Dtype> bottom_diff = bottom[0]->mutable_gpu_diff(); vptr<const Dtype> prob_data = prob_.gpu_data(); vptr<const Dtype> top_data = top[0]->gpu_data(); this->device_->template copy<Dtype>(prob_.count(), prob_data, bottom_diff); vptr<const Dtype> label = bottom[1]->gpu_data(); const int_tp dim = prob_.count() / outer_num_; const int_tp nthreads = outer_num_ * inner_num_; // Since this memory is never used for anything else, // we use to to avoid allocating new GPU memory. vptr<Dtype> counts = prob_.mutable_gpu_diff(); shared_ptr<DeviceKernel> kernel = this->device_program_->GetKernel("SoftmaxLossBackwardGPU"); kernel->add_arg(&nthreads); kernel->add_arg(&top_data); kernel->add_arg(&label); kernel->add_arg(&bottom_diff); kernel->add_arg(&outer_num_); kernel->add_arg(&dim); kernel->add_arg(&inner_num_); kernel->add_arg(&has_ignore_label_); kernel->add_arg(&ignore_label_); kernel->add_arg(&counts); vector<size_t> work_size(1, nthreads); vector<size_t> group; vector<size_t> local; this->device_->get_threads(&work_size, &group, &local, kernel.get(), true); kernel->Execute(group, local); Dtype valid_count = -1; if (normalization_ == LossParameter_NormalizationMode_VALID && has_ignore_label_) { this->device_->template asum<Dtype>(nthreads, counts, &valid_count); } const Dtype loss_weight = top[0]->cpu_diff()[0] / get_normalizer(normalization_, valid_count); this->device_->template scal<Dtype>(prob_.count(), loss_weight, bottom_diff); } } INSTANTIATE_CLASST_FUNC_3T_GUARDED(SoftmaxWithLossLayer, GenerateProgram, (half_fp), (half_fp), (half_fp)); INSTANTIATE_CLASST_FUNC_3T_GUARDED(SoftmaxWithLossLayer, GenerateProgram, (float), (float), (float)); INSTANTIATE_CLASST_FUNC_3T_GUARDED(SoftmaxWithLossLayer, GenerateProgram, (double), (double), (double)); INSTANTIATE_CLASST_FUNC_3T_GUARDED(SoftmaxWithLossLayer, Forward_gpu, (half_fp), (half_fp), (half_fp)); INSTANTIATE_CLASST_FUNC_3T_GUARDED(SoftmaxWithLossLayer, Forward_gpu, (float), (float), (float)); INSTANTIATE_CLASST_FUNC_3T_GUARDED(SoftmaxWithLossLayer, Forward_gpu, (double), (double), (double)); INSTANTIATE_CLASST_FUNC_3T_GUARDED(SoftmaxWithLossLayer, Backward_gpu, (half_fp), (half_fp), (half_fp)); INSTANTIATE_CLASST_FUNC_3T_GUARDED(SoftmaxWithLossLayer, Backward_gpu, (float), (float), (float)); INSTANTIATE_CLASST_FUNC_3T_GUARDED(SoftmaxWithLossLayer, Backward_gpu, (double), (double), (double)); } // namespace caffe
the_stack
#include "opencv2/opencv_modules.hpp" #ifndef HAVE_OPENCV_CUDEV #error "opencv_cudev is required" #else #include <cuda.h> #include "stereosgm.hpp" #include "opencv2/cudev/common.hpp" #include "opencv2/cudev/warp/warp.hpp" #include "opencv2/cudastereo.hpp" namespace cv { namespace cuda { namespace device { namespace stereosgm { static constexpr uint16_t INVALID_DISP = static_cast<uint16_t>(-1); namespace detail { template <typename T> __device__ __forceinline__ static T ldg(const T* const p) { #if __CUDA_ARCH__ >= 350 return __ldg(p); #else return *p; #endif } template <unsigned int WARPS_PER_BLOCK, typename T> __device__ __forceinline__ static T shfl(T var, int srcLane, int width = cudev::WARP_SIZE, uint32_t mask = 0xFFFFFFFFU) { #if __CUDA_ARCH__ >= 300 #if CUDA_VERSION >= 9000 return __shfl_sync(mask, var, srcLane, width); #else return __shfl(var, srcLane, width); #endif // CUDA_VERSION #else static __shared__ T smem[WARPS_PER_BLOCK][cudev::WARP_SIZE]; srcLane %= width; smem[cudev::Warp::warpId()][cudev::Warp::laneId()] = var; T ret = smem[cudev::Warp::warpId()][srcLane + (cudev::Warp::laneId() / width) * width]; return ret; #endif // __CUDA_ARCH__ } template <unsigned int WARPS_PER_BLOCK, typename T> __device__ __forceinline__ static T shfl_up(T var, unsigned int delta, int width = cudev::WARP_SIZE, uint32_t mask = 0xFFFFFFFFU) { #if __CUDA_ARCH__ >= 300 #if CUDA_VERSION >= 9000 return __shfl_up_sync(mask, var, delta, width); #else return __shfl_up(var, delta, width); #endif // CUDA_VERSION #else static __shared__ T smem[WARPS_PER_BLOCK][cudev::WARP_SIZE]; smem[cudev::Warp::warpId()][cudev::Warp::laneId()] = var; T ret = var; if (cudev::Warp::laneId() % width >= delta) { ret = smem[cudev::Warp::warpId()][cudev::Warp::laneId() - delta]; } return ret; #endif // __CUDA_ARCH__ } template <unsigned int WARPS_PER_BLOCK, typename T> __device__ __forceinline__ static T shfl_down(T var, unsigned int delta, int width = cudev::WARP_SIZE, uint32_t mask = 0xFFFFFFFFU) { #if __CUDA_ARCH__ >= 300 #if CUDA_VERSION >= 9000 return __shfl_down_sync(mask, var, delta, width); #else return __shfl_down(var, delta, width); #endif // CUDA_VERSION #else static __shared__ T smem[WARPS_PER_BLOCK][cudev::WARP_SIZE]; smem[cudev::Warp::warpId()][cudev::Warp::laneId()] = var; T ret = var; if (cudev::Warp::laneId() % width + delta < width) { ret = smem[cudev::Warp::warpId()][cudev::Warp::laneId() + delta]; } return ret; #endif // __CUDA_ARCH__ } template <unsigned int WARPS_PER_BLOCK, typename T> __device__ __forceinline__ static T shfl_xor(T var, int laneMask, int width = cudev::WARP_SIZE, uint32_t mask = 0xFFFFFFFFU) { #if __CUDA_ARCH__ >= 300 #if CUDA_VERSION >= 9000 return __shfl_xor_sync(mask, var, laneMask, width); #else return __shfl_xor(var, laneMask, width); #endif // CUDA_VERSION #else static __shared__ T smem[WARPS_PER_BLOCK][cudev::WARP_SIZE]; smem[cudev::Warp::warpId()][cudev::Warp::laneId()] = var; T ret = var; if (((cudev::Warp::laneId() % width) ^ laneMask) < width) { ret = smem[cudev::Warp::warpId()][cudev::Warp::laneId() ^ laneMask]; } return ret; #endif // __CUDA_ARCH__ } template <typename T, unsigned int WARPS_PER_BLOCK, unsigned int GROUP_SIZE, unsigned int STEP> struct subgroup_min_impl { static __device__ T call(T x, uint32_t mask) { x = ::min(x, shfl_xor<WARPS_PER_BLOCK, T>(x, STEP / 2, GROUP_SIZE, mask)); return subgroup_min_impl<T, WARPS_PER_BLOCK, GROUP_SIZE, STEP / 2>::call(x, mask); } }; template <typename T, unsigned int WARPS_PER_BLOCK, unsigned int GROUP_SIZE> struct subgroup_min_impl<T, WARPS_PER_BLOCK, GROUP_SIZE, 1u> { static __device__ T call(T x, uint32_t) { return x; } }; template <unsigned int WARPS_PER_BLOCK, unsigned int GROUP_SIZE, unsigned int STEP> struct subgroup_and_impl { static __device__ bool call(bool x, uint32_t mask) { x &= shfl_xor<WARPS_PER_BLOCK>(x, STEP / 2, GROUP_SIZE, mask); return subgroup_and_impl<WARPS_PER_BLOCK, GROUP_SIZE, STEP / 2>::call(x, mask); } }; template <unsigned int WARPS_PER_BLOCK, unsigned int GROUP_SIZE> struct subgroup_and_impl<WARPS_PER_BLOCK, GROUP_SIZE, 1u> { static __device__ bool call(bool x, uint32_t) { return x; } }; } // namespace detail template <unsigned int WARPS_PER_BLOCK, unsigned int GROUP_SIZE, typename T> __device__ inline T subgroup_min(T x, uint32_t mask) { return detail::subgroup_min_impl<T, WARPS_PER_BLOCK, GROUP_SIZE, GROUP_SIZE>::call(x, mask); } template <unsigned int WARPS_PER_BLOCK, unsigned int GROUP_SIZE> __device__ inline bool subgroup_and(bool x, uint32_t mask) { return detail::subgroup_and_impl<WARPS_PER_BLOCK, GROUP_SIZE, GROUP_SIZE>::call(x, mask); } template <typename T, typename S> __device__ inline T load_as(const S *p) { return *reinterpret_cast<const T *>(p); } template <typename T, typename S> __device__ inline void store_as(S *p, const T& x) { *reinterpret_cast<T *>(p) = x; } template <typename T> __device__ inline uint32_t pack_uint8x4(T x, T y, T z, T w) { uchar4 uint8x4; uint8x4.x = static_cast<uint8_t>(x); uint8x4.y = static_cast<uint8_t>(y); uint8x4.z = static_cast<uint8_t>(z); uint8x4.w = static_cast<uint8_t>(w); return load_as<uint32_t>(&uint8x4); } template <unsigned int N> __device__ inline void load_uint8_vector(uint32_t *dest, const uint8_t *ptr); template <> __device__ inline void load_uint8_vector<1u>(uint32_t *dest, const uint8_t *ptr) { dest[0] = static_cast<uint32_t>(ptr[0]); } template <> __device__ inline void load_uint8_vector<2u>(uint32_t *dest, const uint8_t *ptr) { const auto uint8x2 = load_as<uchar2>(ptr); dest[0] = uint8x2.x; dest[1] = uint8x2.y; } template <> __device__ inline void load_uint8_vector<4u>(uint32_t *dest, const uint8_t *ptr) { const auto uint8x4 = load_as<uchar4>(ptr); dest[0] = uint8x4.x; dest[1] = uint8x4.y; dest[2] = uint8x4.z; dest[3] = uint8x4.w; } template <> __device__ inline void load_uint8_vector<8u>(uint32_t *dest, const uint8_t *ptr) { const auto uint32x2 = load_as<uint2>(ptr); load_uint8_vector<4u>(dest + 0, reinterpret_cast<const uint8_t *>(&uint32x2.x)); load_uint8_vector<4u>(dest + 4, reinterpret_cast<const uint8_t *>(&uint32x2.y)); } template <> __device__ inline void load_uint8_vector<16u>(uint32_t *dest, const uint8_t *ptr) { const auto uint32x4 = load_as<uint4>(ptr); load_uint8_vector<4u>(dest + 0, reinterpret_cast<const uint8_t *>(&uint32x4.x)); load_uint8_vector<4u>(dest + 4, reinterpret_cast<const uint8_t *>(&uint32x4.y)); load_uint8_vector<4u>(dest + 8, reinterpret_cast<const uint8_t *>(&uint32x4.z)); load_uint8_vector<4u>(dest + 12, reinterpret_cast<const uint8_t *>(&uint32x4.w)); } template <unsigned int N> __device__ inline void store_uint8_vector(uint8_t *dest, const uint32_t *ptr); template <> __device__ inline void store_uint8_vector<1u>(uint8_t *dest, const uint32_t *ptr) { dest[0] = static_cast<uint8_t>(ptr[0]); } template <> __device__ inline void store_uint8_vector<2u>(uint8_t *dest, const uint32_t *ptr) { uchar2 uint8x2; uint8x2.x = static_cast<uint8_t>(ptr[0]); uint8x2.y = static_cast<uint8_t>(ptr[0]); store_as<uchar2>(dest, uint8x2); } template <> __device__ inline void store_uint8_vector<4u>(uint8_t *dest, const uint32_t *ptr) { store_as<uint32_t>(dest, pack_uint8x4(ptr[0], ptr[1], ptr[2], ptr[3])); } template <> __device__ inline void store_uint8_vector<8u>(uint8_t *dest, const uint32_t *ptr) { uint2 uint32x2; uint32x2.x = pack_uint8x4(ptr[0], ptr[1], ptr[2], ptr[3]); uint32x2.y = pack_uint8x4(ptr[4], ptr[5], ptr[6], ptr[7]); store_as<uint2>(dest, uint32x2); } template <> __device__ inline void store_uint8_vector<16u>(uint8_t *dest, const uint32_t *ptr) { uint4 uint32x4; uint32x4.x = pack_uint8x4(ptr[0], ptr[1], ptr[2], ptr[3]); uint32x4.y = pack_uint8x4(ptr[4], ptr[5], ptr[6], ptr[7]); uint32x4.z = pack_uint8x4(ptr[8], ptr[9], ptr[10], ptr[11]); uint32x4.w = pack_uint8x4(ptr[12], ptr[13], ptr[14], ptr[15]); store_as<uint4>(dest, uint32x4); } template <unsigned int N> __device__ inline void load_uint16_vector(uint32_t *dest, const uint16_t *ptr); template <> __device__ inline void load_uint16_vector<1u>(uint32_t *dest, const uint16_t *ptr) { dest[0] = static_cast<uint32_t>(ptr[0]); } template <> __device__ inline void load_uint16_vector<2u>(uint32_t *dest, const uint16_t *ptr) { const auto uint16x2 = load_as<ushort2>(ptr); dest[0] = uint16x2.x; dest[1] = uint16x2.y; } template <> __device__ inline void load_uint16_vector<4u>(uint32_t *dest, const uint16_t *ptr) { const auto uint16x4 = load_as<ushort4>(ptr); dest[0] = uint16x4.x; dest[1] = uint16x4.y; dest[2] = uint16x4.z; dest[3] = uint16x4.w; } template <> __device__ inline void load_uint16_vector<8u>(uint32_t *dest, const uint16_t *ptr) { const auto uint32x4 = load_as<uint4>(ptr); load_uint16_vector<2u>(dest + 0, reinterpret_cast<const uint16_t *>(&uint32x4.x)); load_uint16_vector<2u>(dest + 2, reinterpret_cast<const uint16_t *>(&uint32x4.y)); load_uint16_vector<2u>(dest + 4, reinterpret_cast<const uint16_t *>(&uint32x4.z)); load_uint16_vector<2u>(dest + 6, reinterpret_cast<const uint16_t *>(&uint32x4.w)); } template <unsigned int N> __device__ inline void store_uint16_vector(uint16_t *dest, const uint32_t *ptr); template <> __device__ inline void store_uint16_vector<1u>(uint16_t *dest, const uint32_t *ptr) { dest[0] = static_cast<uint16_t>(ptr[0]); } template <> __device__ inline void store_uint16_vector<2u>(uint16_t *dest, const uint32_t *ptr) { ushort2 uint16x2; uint16x2.x = static_cast<uint16_t>(ptr[0]); uint16x2.y = static_cast<uint16_t>(ptr[1]); store_as<ushort2>(dest, uint16x2); } template <> __device__ inline void store_uint16_vector<4u>(uint16_t *dest, const uint32_t *ptr) { ushort4 uint16x4; uint16x4.x = static_cast<uint16_t>(ptr[0]); uint16x4.y = static_cast<uint16_t>(ptr[1]); uint16x4.z = static_cast<uint16_t>(ptr[2]); uint16x4.w = static_cast<uint16_t>(ptr[3]); store_as<ushort4>(dest, uint16x4); } template <> __device__ inline void store_uint16_vector<8u>(uint16_t *dest, const uint32_t *ptr) { uint4 uint32x4; store_uint16_vector<2u>(reinterpret_cast<uint16_t *>(&uint32x4.x), &ptr[0]); store_uint16_vector<2u>(reinterpret_cast<uint16_t *>(&uint32x4.y), &ptr[2]); store_uint16_vector<2u>(reinterpret_cast<uint16_t *>(&uint32x4.z), &ptr[4]); store_uint16_vector<2u>(reinterpret_cast<uint16_t *>(&uint32x4.w), &ptr[6]); store_as<uint4>(dest, uint32x4); } template <> __device__ inline void store_uint16_vector<16u>(uint16_t *dest, const uint32_t *ptr) { store_uint16_vector<8u>(dest + 0, ptr + 0); store_uint16_vector<8u>(dest + 8, ptr + 8); } namespace census_transform { namespace { static constexpr int WINDOW_WIDTH = 9; static constexpr int WINDOW_HEIGHT = 7; static constexpr int BLOCK_SIZE = 128; static constexpr int LINES_PER_BLOCK = 16; template <typename T> __global__ void census_transform_kernel( PtrStepSz<T> src, PtrStep<int32_t> dest) { using pixel_type = T; static const int SMEM_BUFFER_SIZE = WINDOW_HEIGHT + 1; const int half_kw = WINDOW_WIDTH / 2; const int half_kh = WINDOW_HEIGHT / 2; __shared__ pixel_type smem_lines[SMEM_BUFFER_SIZE][BLOCK_SIZE]; const int tid = threadIdx.x; const int x0 = blockIdx.x * (BLOCK_SIZE - WINDOW_WIDTH + 1) - half_kw; const int y0 = blockIdx.y * LINES_PER_BLOCK; for (int i = 0; i < WINDOW_HEIGHT; ++i) { const int x = x0 + tid, y = y0 - half_kh + i; pixel_type value = 0; if (0 <= x && x < src.cols && 0 <= y && y < src.rows) { value = src(y, x); } smem_lines[i][tid] = value; } __syncthreads(); #pragma unroll for (int i = 0; i < LINES_PER_BLOCK; ++i) { if (i + 1 < LINES_PER_BLOCK) { // Load to smem const int x = x0 + tid, y = y0 + half_kh + i + 1; pixel_type value = 0; if (0 <= x && x < src.cols && 0 <= y && y < src.rows) { value = src(y, x); } const int smem_x = tid; const int smem_y = (WINDOW_HEIGHT + i) % SMEM_BUFFER_SIZE; smem_lines[smem_y][smem_x] = value; } if (half_kw <= tid && tid < BLOCK_SIZE - half_kw) { // Compute and store const int x = x0 + tid, y = y0 + i; if (half_kw <= x && x < src.cols - half_kw && half_kh <= y && y < src.rows - half_kh) { const int smem_x = tid; const int smem_y = (half_kh + i) % SMEM_BUFFER_SIZE; int32_t f = 0; for (int dy = -half_kh; dy < 0; ++dy) { const int smem_y1 = (smem_y + dy + SMEM_BUFFER_SIZE) % SMEM_BUFFER_SIZE; const int smem_y2 = (smem_y - dy + SMEM_BUFFER_SIZE) % SMEM_BUFFER_SIZE; for (int dx = -half_kw; dx <= half_kw; ++dx) { const int smem_x1 = smem_x + dx; const int smem_x2 = smem_x - dx; const auto a = smem_lines[smem_y1][smem_x1]; const auto b = smem_lines[smem_y2][smem_x2]; f = (f << 1) | (a > b); } } for (int dx = -half_kw; dx < 0; ++dx) { const int smem_x1 = smem_x + dx; const int smem_x2 = smem_x - dx; const auto a = smem_lines[smem_y][smem_x1]; const auto b = smem_lines[smem_y][smem_x2]; f = (f << 1) | (a > b); } dest(y, x) = f; } } __syncthreads(); } } } // anonymous namespace void censusTransform(const GpuMat& src, GpuMat& dest, Stream& _stream) { CV_Assert(src.size() == dest.size()); CV_Assert(src.type() == CV_8UC1 || src.type() == CV_16UC1); const int width_per_block = BLOCK_SIZE - WINDOW_WIDTH + 1; const int height_per_block = LINES_PER_BLOCK; const dim3 gdim( cudev::divUp(src.cols, width_per_block), cudev::divUp(src.rows, height_per_block)); const dim3 bdim(BLOCK_SIZE); cudaStream_t stream = StreamAccessor::getStream(_stream); switch (src.type()) { case CV_8UC1: census_transform_kernel<uint8_t><<<gdim, bdim, 0, stream>>>(src, dest); break; case CV_16UC1: census_transform_kernel<uint16_t><<<gdim, bdim, 0, stream>>>(src, dest); break; } } } // namespace census_transform namespace path_aggregation { template < unsigned int DP_BLOCK_SIZE, unsigned int SUBGROUP_SIZE, unsigned int WARPS_PER_BLOCK> struct DynamicProgramming { static_assert( DP_BLOCK_SIZE >= 2, "DP_BLOCK_SIZE must be greater than or equal to 2"); static_assert( (SUBGROUP_SIZE & (SUBGROUP_SIZE - 1)) == 0, "SUBGROUP_SIZE must be a power of 2"); uint32_t last_min; uint32_t dp[DP_BLOCK_SIZE]; __device__ DynamicProgramming() : last_min(0) { for (unsigned int i = 0; i < DP_BLOCK_SIZE; ++i) { dp[i] = 0; } } __device__ void update( uint32_t *local_costs, uint32_t p1, uint32_t p2, uint32_t mask) { const unsigned int lane_id = threadIdx.x % SUBGROUP_SIZE; const auto dp0 = dp[0]; uint32_t lazy_out = 0, local_min = 0; { const unsigned int k = 0; const uint32_t prev = detail::shfl_up<WARPS_PER_BLOCK>(dp[DP_BLOCK_SIZE - 1], 1, cudev::WARP_SIZE, mask); uint32_t out = ::min(dp[k] - last_min, p2); if (lane_id != 0) { out = ::min(out, prev - last_min + p1); } out = ::min(out, dp[k + 1] - last_min + p1); lazy_out = local_min = out + local_costs[k]; } for (unsigned int k = 1; k + 1 < DP_BLOCK_SIZE; ++k) { uint32_t out = ::min(dp[k] - last_min, p2); out = ::min(out, dp[k - 1] - last_min + p1); out = ::min(out, dp[k + 1] - last_min + p1); dp[k - 1] = lazy_out; lazy_out = out + local_costs[k]; local_min = ::min(local_min, lazy_out); } { const unsigned int k = DP_BLOCK_SIZE - 1; const uint32_t next = detail::shfl_down<WARPS_PER_BLOCK>(dp0, 1, cudev::WARP_SIZE, mask); uint32_t out = ::min(dp[k] - last_min, p2); out = ::min(out, dp[k - 1] - last_min + p1); if (lane_id + 1 != SUBGROUP_SIZE) { out = ::min(out, next - last_min + p1); } dp[k - 1] = lazy_out; dp[k] = out + local_costs[k]; local_min = ::min(local_min, dp[k]); } last_min = subgroup_min<WARPS_PER_BLOCK, SUBGROUP_SIZE>(local_min, mask); } }; template <unsigned int SIZE> __device__ unsigned int generate_mask() { static_assert(SIZE <= 32, "SIZE must be less than or equal to 32"); return static_cast<unsigned int>((1ull << SIZE) - 1u); } namespace horizontal { namespace { static constexpr unsigned int DP_BLOCK_SIZE = 8u; static constexpr unsigned int DP_BLOCKS_PER_THREAD = 1u; static constexpr unsigned int WARPS_PER_BLOCK = 4u; static constexpr unsigned int BLOCK_SIZE = cudev::WARP_SIZE * WARPS_PER_BLOCK; template <int DIRECTION, unsigned int MAX_DISPARITY> __global__ void aggregate_horizontal_path_kernel( PtrStep<int32_t> left, PtrStep<int32_t> right, PtrStep<uint8_t> dest, int width, int height, unsigned int p1, unsigned int p2, int min_disp) { static const unsigned int SUBGROUP_SIZE = MAX_DISPARITY / DP_BLOCK_SIZE; static const unsigned int SUBGROUPS_PER_WARP = cudev::WARP_SIZE / SUBGROUP_SIZE; static const unsigned int PATHS_PER_WARP = cudev::WARP_SIZE * DP_BLOCKS_PER_THREAD / SUBGROUP_SIZE; static const unsigned int PATHS_PER_BLOCK = BLOCK_SIZE * DP_BLOCKS_PER_THREAD / SUBGROUP_SIZE; static_assert(DIRECTION == 1 || DIRECTION == -1, ""); if (width == 0 || height == 0) { return; } int32_t right_buffer[DP_BLOCKS_PER_THREAD][DP_BLOCK_SIZE]; DynamicProgramming<DP_BLOCK_SIZE, SUBGROUP_SIZE, WARPS_PER_BLOCK> dp[DP_BLOCKS_PER_THREAD]; const unsigned int warp_id = cudev::Warp::warpId(); const unsigned int group_id = cudev::Warp::laneId() / SUBGROUP_SIZE; const unsigned int lane_id = threadIdx.x % SUBGROUP_SIZE; const unsigned int shfl_mask = generate_mask<SUBGROUP_SIZE>() << (group_id * SUBGROUP_SIZE); const unsigned int y0 = PATHS_PER_BLOCK * blockIdx.x + PATHS_PER_WARP * warp_id + group_id; const unsigned int feature_step = SUBGROUPS_PER_WARP; const unsigned int dest_step = SUBGROUPS_PER_WARP * MAX_DISPARITY * width; const unsigned int dp_offset = lane_id * DP_BLOCK_SIZE; left = PtrStep<int32_t>(left.ptr(y0), left.step); right = PtrStep<int32_t>(right.ptr(y0), right.step); dest = PtrStep<uint8_t>(&dest(0, y0 * width * MAX_DISPARITY), dest.step); if (y0 >= height) { return; } if (DIRECTION > 0) { for (unsigned int i = 0; i < DP_BLOCKS_PER_THREAD; ++i) { for (unsigned int j = 0; j < DP_BLOCK_SIZE; ++j) { right_buffer[i][j] = 0; } } } else { for (unsigned int i = 0; i < DP_BLOCKS_PER_THREAD; ++i) { for (unsigned int j = 0; j < DP_BLOCK_SIZE; ++j) { const int x = static_cast<int>(width - (min_disp + j + dp_offset)); if (0 <= x && x < static_cast<int>(width)) { right_buffer[i][j] = detail::ldg(&right(i * feature_step, x)); } else { right_buffer[i][j] = 0; } } } } int x0 = (DIRECTION > 0) ? 0 : static_cast<int>((width - 1) & ~(DP_BLOCK_SIZE - 1)); for (unsigned int iter = 0; iter < width; iter += DP_BLOCK_SIZE) { for (unsigned int i = 0; i < DP_BLOCK_SIZE; ++i) { const unsigned int x = x0 + (DIRECTION > 0 ? i : (DP_BLOCK_SIZE - 1 - i)); if (x >= width) { continue; } for (unsigned int j = 0; j < DP_BLOCKS_PER_THREAD; ++j) { const unsigned int y = y0 + j * SUBGROUPS_PER_WARP; if (y >= height) { continue; } const int32_t left_value = detail::ldg(&left(j * feature_step, x)); if (DIRECTION > 0) { const int32_t t = right_buffer[j][DP_BLOCK_SIZE - 1]; for (unsigned int k = DP_BLOCK_SIZE - 1; k > 0; --k) { right_buffer[j][k] = right_buffer[j][k - 1]; } right_buffer[j][0] = detail::shfl_up<WARPS_PER_BLOCK>(t, 1, SUBGROUP_SIZE, shfl_mask); if (lane_id == 0 && x >= min_disp) { right_buffer[j][0] = detail::ldg(&right(j * feature_step, x - min_disp)); } } else { const int32_t t = right_buffer[j][0]; for (unsigned int k = 1; k < DP_BLOCK_SIZE; ++k) { right_buffer[j][k - 1] = right_buffer[j][k]; } right_buffer[j][DP_BLOCK_SIZE - 1] = detail::shfl_down<WARPS_PER_BLOCK>(t, 1, SUBGROUP_SIZE, shfl_mask); if (lane_id + 1 == SUBGROUP_SIZE) { if (x >= min_disp + dp_offset + DP_BLOCK_SIZE - 1) { right_buffer[j][DP_BLOCK_SIZE - 1] = detail::ldg(&right(j * feature_step, x - (min_disp + dp_offset + DP_BLOCK_SIZE - 1))); } else { right_buffer[j][DP_BLOCK_SIZE - 1] = 0; } } } uint32_t local_costs[DP_BLOCK_SIZE]; for (unsigned int k = 0; k < DP_BLOCK_SIZE; ++k) { local_costs[k] = __popc(left_value ^ right_buffer[j][k]); } dp[j].update(local_costs, p1, p2, shfl_mask); store_uint8_vector<DP_BLOCK_SIZE>( &dest(0, j * dest_step + x * MAX_DISPARITY + dp_offset), dp[j].dp); } } x0 += static_cast<int>(DP_BLOCK_SIZE) * DIRECTION; } } } // anonymous namespace template <unsigned int MAX_DISPARITY> void aggregateLeft2RightPath( const GpuMat& left, const GpuMat& right, GpuMat& dest, unsigned int p1, unsigned int p2, int min_disp, Stream& _stream) { CV_Assert(left.size() == right.size()); CV_Assert(left.type() == right.type()); CV_Assert(left.type() == CV_32SC1); static const unsigned int SUBGROUP_SIZE = MAX_DISPARITY / DP_BLOCK_SIZE; static const unsigned int PATHS_PER_BLOCK = BLOCK_SIZE * DP_BLOCKS_PER_THREAD / SUBGROUP_SIZE; const int gdim = cudev::divUp(left.rows, PATHS_PER_BLOCK); const int bdim = BLOCK_SIZE; cudaStream_t stream = cv::cuda::StreamAccessor::getStream(_stream); aggregate_horizontal_path_kernel<1, MAX_DISPARITY><<<gdim, bdim, 0, stream>>>( left, right, dest, left.cols, left.rows, p1, p2, min_disp); } template <unsigned int MAX_DISPARITY> void aggregateRight2LeftPath( const GpuMat& left, const GpuMat& right, GpuMat& dest, unsigned int p1, unsigned int p2, int min_disp, Stream& _stream) { CV_Assert(left.size() == right.size()); CV_Assert(left.type() == right.type()); CV_Assert(left.type() == CV_32SC1); static const unsigned int SUBGROUP_SIZE = MAX_DISPARITY / DP_BLOCK_SIZE; static const unsigned int PATHS_PER_BLOCK = BLOCK_SIZE * DP_BLOCKS_PER_THREAD / SUBGROUP_SIZE; const int gdim = cudev::divUp(left.rows, PATHS_PER_BLOCK); const int bdim = BLOCK_SIZE; cudaStream_t stream = cv::cuda::StreamAccessor::getStream(_stream); aggregate_horizontal_path_kernel<-1, MAX_DISPARITY><<<gdim, bdim, 0, stream>>>( left, right, dest, left.cols, left.rows, p1, p2, min_disp); } template CV_EXPORTS void aggregateLeft2RightPath<64u>( const GpuMat& left, const GpuMat& right, GpuMat& dest, unsigned int p1, unsigned int p2, int min_disp, Stream& _stream); template CV_EXPORTS void aggregateLeft2RightPath<128u>( const GpuMat& left, const GpuMat& right, GpuMat& dest, unsigned int p1, unsigned int p2, int min_disp, Stream& _stream); template CV_EXPORTS void aggregateLeft2RightPath<256u>( const GpuMat& left, const GpuMat& right, GpuMat& dest, unsigned int p1, unsigned int p2, int min_disp, Stream& _stream); template CV_EXPORTS void aggregateRight2LeftPath<64u>( const GpuMat& left, const GpuMat& right, GpuMat& dest, unsigned int p1, unsigned int p2, int min_disp, Stream& _stream); template CV_EXPORTS void aggregateRight2LeftPath<128u>( const GpuMat& left, const GpuMat& right, GpuMat& dest, unsigned int p1, unsigned int p2, int min_disp, Stream& _stream); template CV_EXPORTS void aggregateRight2LeftPath<256u>( const GpuMat& left, const GpuMat& right, GpuMat& dest, unsigned int p1, unsigned int p2, int min_disp, Stream& _stream); } // namespace horizontal namespace vertical { namespace { static constexpr unsigned int DP_BLOCK_SIZE = 16u; static constexpr unsigned int WARPS_PER_BLOCK = 8u; static constexpr unsigned int BLOCK_SIZE = cudev::WARP_SIZE * WARPS_PER_BLOCK; template <int DIRECTION, unsigned int MAX_DISPARITY> __global__ void aggregate_vertical_path_kernel( PtrStep<int32_t> left, PtrStep<int32_t> right, PtrStep<uint8_t> dest, int width, int height, unsigned int p1, unsigned int p2, int min_disp) { static const unsigned int SUBGROUP_SIZE = MAX_DISPARITY / DP_BLOCK_SIZE; static const unsigned int PATHS_PER_WARP = cudev::WARP_SIZE / SUBGROUP_SIZE; static const unsigned int PATHS_PER_BLOCK = BLOCK_SIZE / SUBGROUP_SIZE; static const unsigned int RIGHT_BUFFER_SIZE = MAX_DISPARITY + PATHS_PER_BLOCK; static const unsigned int RIGHT_BUFFER_ROWS = RIGHT_BUFFER_SIZE / DP_BLOCK_SIZE; static_assert(DIRECTION == 1 || DIRECTION == -1, ""); if (width == 0 || height == 0) { return; } __shared__ int32_t right_buffer[2 * DP_BLOCK_SIZE][RIGHT_BUFFER_ROWS + 1]; DynamicProgramming<DP_BLOCK_SIZE, SUBGROUP_SIZE, WARPS_PER_BLOCK> dp; const unsigned int warp_id = cudev::Warp::warpId(); const unsigned int group_id = cudev::Warp::laneId() / SUBGROUP_SIZE; const unsigned int lane_id = threadIdx.x % SUBGROUP_SIZE; const unsigned int shfl_mask = generate_mask<SUBGROUP_SIZE>() << (group_id * SUBGROUP_SIZE); const unsigned int x = blockIdx.x * PATHS_PER_BLOCK + warp_id * PATHS_PER_WARP + group_id; const unsigned int right_x0 = blockIdx.x * PATHS_PER_BLOCK; const unsigned int dp_offset = lane_id * DP_BLOCK_SIZE; const unsigned int right0_addr = (right_x0 + PATHS_PER_BLOCK - 1) - x + dp_offset; const unsigned int right0_addr_lo = right0_addr % DP_BLOCK_SIZE; const unsigned int right0_addr_hi = right0_addr / DP_BLOCK_SIZE; for (unsigned int iter = 0; iter < height; ++iter) { const unsigned int y = (DIRECTION > 0 ? iter : height - 1 - iter); // Load left to register int32_t left_value; if (x < width) { left_value = left(y, x); } // Load right to smem for (unsigned int i0 = 0; i0 < RIGHT_BUFFER_SIZE; i0 += BLOCK_SIZE) { const unsigned int i = i0 + threadIdx.x; if (i < RIGHT_BUFFER_SIZE) { const int x = static_cast<int>(right_x0 + PATHS_PER_BLOCK - 1 - i - min_disp); int32_t right_value = 0; if (0 <= x && x < static_cast<int>(width)) { right_value = right(y, x); } const unsigned int lo = i % DP_BLOCK_SIZE; const unsigned int hi = i / DP_BLOCK_SIZE; right_buffer[lo][hi] = right_value; if (hi > 0) { right_buffer[lo + DP_BLOCK_SIZE][hi - 1] = right_value; } } } __syncthreads(); // Compute if (x < width) { int32_t right_values[DP_BLOCK_SIZE]; for (unsigned int j = 0; j < DP_BLOCK_SIZE; ++j) { right_values[j] = right_buffer[right0_addr_lo + j][right0_addr_hi]; } uint32_t local_costs[DP_BLOCK_SIZE]; for (unsigned int j = 0; j < DP_BLOCK_SIZE; ++j) { local_costs[j] = __popc(left_value ^ right_values[j]); } dp.update(local_costs, p1, p2, shfl_mask); store_uint8_vector<DP_BLOCK_SIZE>( &dest(0, dp_offset + x * MAX_DISPARITY + y * MAX_DISPARITY * width), dp.dp); } __syncthreads(); } } } // anonymous namespace template <unsigned int MAX_DISPARITY> void aggregateUp2DownPath( const GpuMat& left, const GpuMat& right, GpuMat& dest, unsigned int p1, unsigned int p2, int min_disp, Stream& _stream) { static const unsigned int SUBGROUP_SIZE = MAX_DISPARITY / DP_BLOCK_SIZE; static const unsigned int PATHS_PER_BLOCK = BLOCK_SIZE / SUBGROUP_SIZE; const Size size = left.size(); const int gdim = cudev::divUp(size.width, PATHS_PER_BLOCK); const int bdim = BLOCK_SIZE; cudaStream_t stream = cv::cuda::StreamAccessor::getStream(_stream); aggregate_vertical_path_kernel<1, MAX_DISPARITY><<<gdim, bdim, 0, stream>>>( left, right, dest, size.width, size.height, p1, p2, min_disp); } template <unsigned int MAX_DISPARITY> void aggregateDown2UpPath( const GpuMat& left, const GpuMat& right, GpuMat& dest, unsigned int p1, unsigned int p2, int min_disp, Stream& _stream) { static const unsigned int SUBGROUP_SIZE = MAX_DISPARITY / DP_BLOCK_SIZE; static const unsigned int PATHS_PER_BLOCK = BLOCK_SIZE / SUBGROUP_SIZE; const Size size = left.size(); const int gdim = cudev::divUp(size.width, PATHS_PER_BLOCK); const int bdim = BLOCK_SIZE; cudaStream_t stream = cv::cuda::StreamAccessor::getStream(_stream); aggregate_vertical_path_kernel<-1, MAX_DISPARITY><<<gdim, bdim, 0, stream>>>( left, right, dest, size.width, size.height, p1, p2, min_disp); } template CV_EXPORTS void aggregateUp2DownPath<64u>( const GpuMat& left, const GpuMat& right, GpuMat& dest, unsigned int p1, unsigned int p2, int min_disp, Stream& stream); template CV_EXPORTS void aggregateUp2DownPath<128u>( const GpuMat& left, const GpuMat& right, GpuMat& dest, unsigned int p1, unsigned int p2, int min_disp, Stream& stream); template CV_EXPORTS void aggregateUp2DownPath<256u>( const GpuMat& left, const GpuMat& right, GpuMat& dest, unsigned int p1, unsigned int p2, int min_disp, Stream& stream); template CV_EXPORTS void aggregateDown2UpPath<64u>( const GpuMat& left, const GpuMat& right, GpuMat& dest, unsigned int p1, unsigned int p2, int min_disp, Stream& stream); template CV_EXPORTS void aggregateDown2UpPath<128u>( const GpuMat& left, const GpuMat& right, GpuMat& dest, unsigned int p1, unsigned int p2, int min_disp, Stream& stream); template CV_EXPORTS void aggregateDown2UpPath<256u>( const GpuMat& left, const GpuMat& right, GpuMat& dest, unsigned int p1, unsigned int p2, int min_disp, Stream& stream); } // namespace vertical namespace oblique { namespace { static constexpr unsigned int DP_BLOCK_SIZE = 16u; static constexpr unsigned int WARPS_PER_BLOCK = 8u; static constexpr unsigned int BLOCK_SIZE = cudev::WARP_SIZE * WARPS_PER_BLOCK; template <int X_DIRECTION, int Y_DIRECTION, unsigned int MAX_DISPARITY> __global__ void aggregate_oblique_path_kernel( PtrStep<int32_t> left, PtrStep<int32_t> right, PtrStep<uint8_t> dest, int width, int height, unsigned int p1, unsigned int p2, int min_disp) { static const unsigned int SUBGROUP_SIZE = MAX_DISPARITY / DP_BLOCK_SIZE; static const unsigned int PATHS_PER_WARP = cudev::WARP_SIZE / SUBGROUP_SIZE; static const unsigned int PATHS_PER_BLOCK = BLOCK_SIZE / SUBGROUP_SIZE; static const unsigned int RIGHT_BUFFER_SIZE = MAX_DISPARITY + PATHS_PER_BLOCK; static const unsigned int RIGHT_BUFFER_ROWS = RIGHT_BUFFER_SIZE / DP_BLOCK_SIZE; static_assert(X_DIRECTION == 1 || X_DIRECTION == -1, ""); static_assert(Y_DIRECTION == 1 || Y_DIRECTION == -1, ""); if (width == 0 || height == 0) { return; } __shared__ int32_t right_buffer[2 * DP_BLOCK_SIZE][RIGHT_BUFFER_ROWS]; DynamicProgramming<DP_BLOCK_SIZE, SUBGROUP_SIZE, WARPS_PER_BLOCK> dp; const unsigned int warp_id = cudev::Warp::warpId(); const unsigned int group_id = cudev::Warp::laneId() / SUBGROUP_SIZE; const unsigned int lane_id = threadIdx.x % SUBGROUP_SIZE; const unsigned int shfl_mask = generate_mask<SUBGROUP_SIZE>() << (group_id * SUBGROUP_SIZE); const int x0 = blockIdx.x * PATHS_PER_BLOCK + warp_id * PATHS_PER_WARP + group_id + (X_DIRECTION > 0 ? -static_cast<int>(height - 1) : 0); const int right_x00 = blockIdx.x * PATHS_PER_BLOCK + (X_DIRECTION > 0 ? -static_cast<int>(height - 1) : 0); const unsigned int dp_offset = lane_id * DP_BLOCK_SIZE; const unsigned int right0_addr = static_cast<unsigned int>(right_x00 + PATHS_PER_BLOCK - 1 - x0) + dp_offset; const unsigned int right0_addr_lo = right0_addr % DP_BLOCK_SIZE; const unsigned int right0_addr_hi = right0_addr / DP_BLOCK_SIZE; for (unsigned int iter = 0; iter < height; ++iter) { const int y = static_cast<int>(Y_DIRECTION > 0 ? iter : height - 1 - iter); const int x = x0 + static_cast<int>(iter) * X_DIRECTION; const int right_x0 = right_x00 + static_cast<int>(iter) * X_DIRECTION; // Load right to smem for (unsigned int i0 = 0; i0 < RIGHT_BUFFER_SIZE; i0 += BLOCK_SIZE) { const unsigned int i = i0 + threadIdx.x; if (i < RIGHT_BUFFER_SIZE) { const int x = static_cast<int>(right_x0 + PATHS_PER_BLOCK - 1 - i - min_disp); int32_t right_value = 0; if (0 <= x && x < static_cast<int>(width)) { right_value = right(y, x); } const unsigned int lo = i % DP_BLOCK_SIZE; const unsigned int hi = i / DP_BLOCK_SIZE; right_buffer[lo][hi] = right_value; if (hi > 0) { right_buffer[lo + DP_BLOCK_SIZE][hi - 1] = right_value; } } } __syncthreads(); // Compute if (0 <= x && x < static_cast<int>(width)) { const int32_t left_value = detail::ldg(&left(y, x)); int32_t right_values[DP_BLOCK_SIZE]; for (unsigned int j = 0; j < DP_BLOCK_SIZE; ++j) { right_values[j] = right_buffer[right0_addr_lo + j][right0_addr_hi]; } uint32_t local_costs[DP_BLOCK_SIZE]; for (unsigned int j = 0; j < DP_BLOCK_SIZE; ++j) { local_costs[j] = __popc(left_value ^ right_values[j]); } dp.update(local_costs, p1, p2, shfl_mask); store_uint8_vector<DP_BLOCK_SIZE>( &dest(0, dp_offset + x * MAX_DISPARITY + y * MAX_DISPARITY * width), dp.dp); } __syncthreads(); } } } // anonymous namespace template <unsigned int MAX_DISPARITY> void aggregateUpleft2DownrightPath( const GpuMat& left, const GpuMat& right, GpuMat& dest, unsigned int p1, unsigned int p2, int min_disp, Stream& _stream) { static const unsigned int SUBGROUP_SIZE = MAX_DISPARITY / DP_BLOCK_SIZE; static const unsigned int PATHS_PER_BLOCK = BLOCK_SIZE / SUBGROUP_SIZE; const Size size = left.size(); const int gdim = cudev::divUp(size.width + size.height - 1, PATHS_PER_BLOCK); const int bdim = BLOCK_SIZE; cudaStream_t stream = StreamAccessor::getStream(_stream); aggregate_oblique_path_kernel<1, 1, MAX_DISPARITY><<<gdim, bdim, 0, stream>>>( left, right, dest, size.width, size.height, p1, p2, min_disp); } template <unsigned int MAX_DISPARITY> void aggregateUpright2DownleftPath( const GpuMat& left, const GpuMat& right, GpuMat& dest, unsigned int p1, unsigned int p2, int min_disp, Stream& _stream) { static const unsigned int SUBGROUP_SIZE = MAX_DISPARITY / DP_BLOCK_SIZE; static const unsigned int PATHS_PER_BLOCK = BLOCK_SIZE / SUBGROUP_SIZE; const Size size = left.size(); const int gdim = cudev::divUp(size.width + size.height - 1, PATHS_PER_BLOCK); const int bdim = BLOCK_SIZE; cudaStream_t stream = StreamAccessor::getStream(_stream); aggregate_oblique_path_kernel<-1, 1, MAX_DISPARITY><<<gdim, bdim, 0, stream>>>( left, right, dest, size.width, size.height, p1, p2, min_disp); } template <unsigned int MAX_DISPARITY> void aggregateDownright2UpleftPath( const GpuMat& left, const GpuMat& right, GpuMat& dest, unsigned int p1, unsigned int p2, int min_disp, Stream& _stream) { static const unsigned int SUBGROUP_SIZE = MAX_DISPARITY / DP_BLOCK_SIZE; static const unsigned int PATHS_PER_BLOCK = BLOCK_SIZE / SUBGROUP_SIZE; const Size size = left.size(); const int gdim = cudev::divUp(size.width + size.height - 1, PATHS_PER_BLOCK); const int bdim = BLOCK_SIZE; cudaStream_t stream = StreamAccessor::getStream(_stream); aggregate_oblique_path_kernel<-1, -1, MAX_DISPARITY><<<gdim, bdim, 0, stream>>>( left, right, dest, size.width, size.height, p1, p2, min_disp); } template <unsigned int MAX_DISPARITY> void aggregateDownleft2UprightPath( const GpuMat& left, const GpuMat& right, GpuMat& dest, unsigned int p1, unsigned int p2, int min_disp, Stream& _stream) { static const unsigned int SUBGROUP_SIZE = MAX_DISPARITY / DP_BLOCK_SIZE; static const unsigned int PATHS_PER_BLOCK = BLOCK_SIZE / SUBGROUP_SIZE; const Size size = left.size(); const int gdim = cudev::divUp(size.width + size.height - 1, PATHS_PER_BLOCK); const int bdim = BLOCK_SIZE; cudaStream_t stream = StreamAccessor::getStream(_stream); aggregate_oblique_path_kernel<1, -1, MAX_DISPARITY><<<gdim, bdim, 0, stream>>>( left, right, dest, size.width, size.height, p1, p2, min_disp); } template CV_EXPORTS void aggregateUpleft2DownrightPath<64u>( const GpuMat& left, const GpuMat& right, GpuMat& dest, unsigned int p1, unsigned int p2, int min_disp, Stream& stream); template CV_EXPORTS void aggregateUpleft2DownrightPath<128u>( const GpuMat& left, const GpuMat& right, GpuMat& dest, unsigned int p1, unsigned int p2, int min_disp, Stream& stream); template CV_EXPORTS void aggregateUpleft2DownrightPath<256u>( const GpuMat& left, const GpuMat& right, GpuMat& dest, unsigned int p1, unsigned int p2, int min_disp, Stream& stream); template CV_EXPORTS void aggregateUpright2DownleftPath<64u>( const GpuMat& left, const GpuMat& right, GpuMat& dest, unsigned int p1, unsigned int p2, int min_disp, Stream& stream); template CV_EXPORTS void aggregateUpright2DownleftPath<128u>( const GpuMat& left, const GpuMat& right, GpuMat& dest, unsigned int p1, unsigned int p2, int min_disp, Stream& stream); template CV_EXPORTS void aggregateUpright2DownleftPath<256u>( const GpuMat& left, const GpuMat& right, GpuMat& dest, unsigned int p1, unsigned int p2, int min_disp, Stream& stream); template CV_EXPORTS void aggregateDownright2UpleftPath<64u>( const GpuMat& left, const GpuMat& right, GpuMat& dest, unsigned int p1, unsigned int p2, int min_disp, Stream& stream); template CV_EXPORTS void aggregateDownright2UpleftPath<128u>( const GpuMat& left, const GpuMat& right, GpuMat& dest, unsigned int p1, unsigned int p2, int min_disp, Stream& stream); template CV_EXPORTS void aggregateDownright2UpleftPath<256u>( const GpuMat& left, const GpuMat& right, GpuMat& dest, unsigned int p1, unsigned int p2, int min_disp, Stream& stream); template CV_EXPORTS void aggregateDownleft2UprightPath<64u>( const GpuMat& left, const GpuMat& right, GpuMat& dest, unsigned int p1, unsigned int p2, int min_disp, Stream& stream); template CV_EXPORTS void aggregateDownleft2UprightPath<128u>( const GpuMat& left, const GpuMat& right, GpuMat& dest, unsigned int p1, unsigned int p2, int min_disp, Stream& stream); template CV_EXPORTS void aggregateDownleft2UprightPath<256u>( const GpuMat& left, const GpuMat& right, GpuMat& dest, unsigned int p1, unsigned int p2, int min_disp, Stream& stream); } // namespace oblique template <size_t MAX_DISPARITY> void PathAggregation::operator() (const GpuMat& left, const GpuMat& right, GpuMat& dest, int mode, int p1, int p2, int min_disp, Stream& stream) { CV_Assert(left.size() == right.size()); CV_Assert(left.type() == right.type()); CV_Assert(left.type() == CV_32SC1); const int num_paths = mode == StereoSGBM::MODE_HH4 ? 4 : 8; stream.waitForCompletion(); const Size size = left.size(); const int buffer_step = size.width * size.height * static_cast<int>(MAX_DISPARITY); CV_Assert(dest.rows == 1 && buffer_step * num_paths == dest.cols); for (int i = 0; i < num_paths; ++i) { subs[i] = dest.colRange(i * buffer_step, (i + 1) * buffer_step); } vertical::aggregateUp2DownPath<MAX_DISPARITY>(left, right, subs[0], p1, p2, min_disp, streams[0]); vertical::aggregateDown2UpPath<MAX_DISPARITY>(left, right, subs[1], p1, p2, min_disp, streams[1]); horizontal::aggregateLeft2RightPath<MAX_DISPARITY>(left, right, subs[2], p1, p2, min_disp, streams[2]); horizontal::aggregateRight2LeftPath<MAX_DISPARITY>(left, right, subs[3], p1, p2, min_disp, streams[3]); if (mode == StereoSGBM::MODE_HH) { oblique::aggregateUpleft2DownrightPath<MAX_DISPARITY>(left, right, subs[4], p1, p2, min_disp, streams[4]); oblique::aggregateUpright2DownleftPath<MAX_DISPARITY>(left, right, subs[5], p1, p2, min_disp, streams[5]); oblique::aggregateDownright2UpleftPath<MAX_DISPARITY>(left, right, subs[6], p1, p2, min_disp, streams[6]); oblique::aggregateDownleft2UprightPath<MAX_DISPARITY>(left, right, subs[7], p1, p2, min_disp, streams[7]); } // synchronization for (int i = 0; i < num_paths; ++i) { events[i].record(streams[i]); stream.waitEvent(events[i]); streams[i].waitForCompletion(); } } template void PathAggregation::operator()< 64>(const GpuMat& left, const GpuMat& right, GpuMat& dest, int mode, int p1, int p2, int min_disp, Stream& stream); template void PathAggregation::operator()<128>(const GpuMat& left, const GpuMat& right, GpuMat& dest, int mode, int p1, int p2, int min_disp, Stream& stream); template void PathAggregation::operator()<256>(const GpuMat& left, const GpuMat& right, GpuMat& dest, int mode, int p1, int p2, int min_disp, Stream& stream); } // namespace path_aggregation namespace winner_takes_all { namespace { static constexpr unsigned int WARPS_PER_BLOCK = 8u; static constexpr unsigned int BLOCK_SIZE = WARPS_PER_BLOCK * cudev::WARP_SIZE; __device__ inline uint32_t pack_cost_index(uint32_t cost, uint32_t index) { union { uint32_t uint32; ushort2 uint16x2; } u; u.uint16x2.x = static_cast<uint16_t>(index); u.uint16x2.y = static_cast<uint16_t>(cost); return u.uint32; } __device__ uint32_t unpack_cost(uint32_t packed) { return packed >> 16; } __device__ int unpack_index(uint32_t packed) { return packed & 0xffffu; } using ComputeDisparity = uint32_t(*)(uint32_t, uint32_t, uint16_t*); __device__ inline uint32_t compute_disparity_normal(uint32_t disp, uint32_t cost = 0, uint16_t* smem = nullptr) { return disp; } template <size_t MAX_DISPARITY> __device__ inline uint32_t compute_disparity_subpixel(uint32_t disp, uint32_t cost, uint16_t* smem) { uint32_t subp = disp; subp <<= StereoSGBM::DISP_SHIFT; if (disp > 0 && disp < MAX_DISPARITY - 1) { const int left = smem[disp - 1]; const int right = smem[disp + 1]; const int numer = left - right; const int denom = left - 2 * cost + right; subp += ((numer << StereoSGBM::DISP_SHIFT) + denom) / (2 * denom); } return subp; } template <unsigned int MAX_DISPARITY, unsigned int NUM_PATHS, ComputeDisparity compute_disparity = compute_disparity_normal> __global__ void winner_takes_all_kernel( const PtrStep<uint8_t> _src, PtrStep<int16_t> _left_dest, PtrStep<int16_t> _right_dest, int width, int height, float uniqueness) { static const unsigned int ACCUMULATION_PER_THREAD = 16u; static const unsigned int REDUCTION_PER_THREAD = MAX_DISPARITY / cudev::WARP_SIZE; static const unsigned int ACCUMULATION_INTERVAL = ACCUMULATION_PER_THREAD / REDUCTION_PER_THREAD; static const unsigned int UNROLL_DEPTH = (REDUCTION_PER_THREAD > ACCUMULATION_INTERVAL) ? REDUCTION_PER_THREAD : ACCUMULATION_INTERVAL; const unsigned int cost_step = MAX_DISPARITY * width * height; const unsigned int warp_id = cudev::Warp::warpId(); const unsigned int lane_id = cudev::Warp::laneId(); const unsigned int y = blockIdx.x * WARPS_PER_BLOCK + warp_id; const PtrStep<uint8_t> src{ (uint8_t*)&_src(0, y * MAX_DISPARITY * width), height * width * MAX_DISPARITY * NUM_PATHS }; PtrStep<int16_t> left_dest{ _left_dest.ptr(y), _left_dest.step }; PtrStep<int16_t> right_dest{ _right_dest.ptr(y), _right_dest.step }; if (y >= height) { return; } __shared__ uint16_t smem_cost_sum[WARPS_PER_BLOCK][ACCUMULATION_INTERVAL][MAX_DISPARITY]; uint32_t right_best[REDUCTION_PER_THREAD]; for (unsigned int i = 0; i < REDUCTION_PER_THREAD; ++i) { right_best[i] = 0xffffffffu; } for (unsigned int x0 = 0; x0 < width; x0 += UNROLL_DEPTH) { #pragma unroll for (unsigned int x1 = 0; x1 < UNROLL_DEPTH; ++x1) { if (x1 % ACCUMULATION_INTERVAL == 0) { const unsigned int k = lane_id * ACCUMULATION_PER_THREAD; const unsigned int k_hi = k / MAX_DISPARITY; const unsigned int k_lo = k % MAX_DISPARITY; const unsigned int x = x0 + x1 + k_hi; if (x < width) { const unsigned int offset = x * MAX_DISPARITY + k_lo; uint32_t sum[ACCUMULATION_PER_THREAD]; for (unsigned int i = 0; i < ACCUMULATION_PER_THREAD; ++i) { sum[i] = 0; } for (unsigned int p = 0; p < NUM_PATHS; ++p) { uint32_t load_buffer[ACCUMULATION_PER_THREAD]; load_uint8_vector<ACCUMULATION_PER_THREAD>( load_buffer, &src(0, p * cost_step + offset)); for (unsigned int i = 0; i < ACCUMULATION_PER_THREAD; ++i) { sum[i] += load_buffer[i]; } } store_uint16_vector<ACCUMULATION_PER_THREAD>( &smem_cost_sum[warp_id][k_hi][k_lo], sum); } #if CUDA_VERSION >= 9000 __syncwarp(); #else __threadfence_block(); #endif } const unsigned int x = x0 + x1; if (x < width) { // Load sum of costs const unsigned int smem_x = x1 % ACCUMULATION_INTERVAL; const unsigned int k0 = lane_id * REDUCTION_PER_THREAD; uint32_t local_cost_sum[REDUCTION_PER_THREAD]; load_uint16_vector<REDUCTION_PER_THREAD>( local_cost_sum, &smem_cost_sum[warp_id][smem_x][k0]); // Pack sum of costs and dispairty uint32_t local_packed_cost[REDUCTION_PER_THREAD]; for (unsigned int i = 0; i < REDUCTION_PER_THREAD; ++i) { local_packed_cost[i] = pack_cost_index(local_cost_sum[i], k0 + i); } // Update left uint32_t best = 0xffffffffu; for (unsigned int i = 0; i < REDUCTION_PER_THREAD; ++i) { best = ::min(best, local_packed_cost[i]); } best = subgroup_min<WARPS_PER_BLOCK, cudev::WARP_SIZE>(best, 0xffffffffu); // Update right #pragma unroll for (unsigned int i = 0; i < REDUCTION_PER_THREAD; ++i) { const unsigned int k = lane_id * REDUCTION_PER_THREAD + i; const int p = static_cast<int>(((x - k) & ~(MAX_DISPARITY - 1)) + k); const unsigned int d = static_cast<unsigned int>(x - p); uint32_t recv = detail::shfl<WARPS_PER_BLOCK>(local_packed_cost[(REDUCTION_PER_THREAD - i + x1) % REDUCTION_PER_THREAD], d / REDUCTION_PER_THREAD); right_best[i] = ::min(right_best[i], recv); if (d == MAX_DISPARITY - 1) { if (0 <= p) { right_dest(0, p) = compute_disparity_normal(unpack_index(right_best[i])); } right_best[i] = 0xffffffffu; } } // Resume updating left to avoid execution dependency const uint32_t bestCost = unpack_cost(best); const int bestDisp = unpack_index(best); bool uniq = true; for (unsigned int i = 0; i < REDUCTION_PER_THREAD; ++i) { const uint32_t x = local_packed_cost[i]; const bool uniq1 = unpack_cost(x) * uniqueness >= bestCost; const bool uniq2 = ::abs(unpack_index(x) - bestDisp) <= 1; uniq &= uniq1 || uniq2; } uniq = subgroup_and<WARPS_PER_BLOCK, cudev::WARP_SIZE>(uniq, 0xffffffffu); if (lane_id == 0) { left_dest(0, x) = uniq ? compute_disparity(bestDisp, bestCost, smem_cost_sum[warp_id][smem_x]) : INVALID_DISP; } } } } for (unsigned int i = 0; i < REDUCTION_PER_THREAD; ++i) { const unsigned int k = lane_id * REDUCTION_PER_THREAD + i; const int p = static_cast<int>(((width - k) & ~(MAX_DISPARITY - 1)) + k); if (0 <= p && p < width) { right_dest(0, p) = compute_disparity_normal(unpack_index(right_best[i])); } } } } // anonymous namespace template <size_t MAX_DISPARITY> void winnerTakesAll(const GpuMat& src, GpuMat& left, GpuMat& right, float uniqueness, bool subpixel, int mode, cv::cuda::Stream& _stream) { cv::Size size = left.size(); int num_paths = mode == StereoSGBM::MODE_HH4 ? 4 : 8; CV_Assert(src.rows == 1 && src.cols == size.width * size.height * static_cast<int>(MAX_DISPARITY) * num_paths); CV_Assert(size == right.size()); CV_Assert(left.type() == right.type()); CV_Assert(src.type() == CV_8UC1); CV_Assert(mode == StereoSGBM::MODE_HH || mode == StereoSGBM::MODE_HH4); const int gdim = cudev::divUp(size.height, WARPS_PER_BLOCK); const int bdim = BLOCK_SIZE; cudaStream_t stream = cv::cuda::StreamAccessor::getStream(_stream); if (subpixel && mode == StereoSGBM::MODE_HH) { winner_takes_all_kernel<MAX_DISPARITY, 8, compute_disparity_subpixel<MAX_DISPARITY>><<<gdim, bdim, 0, stream>>>( src, left, right, size.width, size.height, uniqueness); } else if (subpixel && mode == StereoSGBM::MODE_HH4) { winner_takes_all_kernel<MAX_DISPARITY, 4, compute_disparity_subpixel<MAX_DISPARITY>><<<gdim, bdim, 0, stream>>>( src, left, right, size.width, size.height, uniqueness); } else if (!subpixel && mode == StereoSGBM::MODE_HH) { winner_takes_all_kernel<MAX_DISPARITY, 8, compute_disparity_normal><<<gdim, bdim, 0, stream>>>( src, left, right, size.width, size.height, uniqueness); } else /* if (!subpixel && mode == StereoSGBM::MODE_HH4) */ { winner_takes_all_kernel<MAX_DISPARITY, 4, compute_disparity_normal><<<gdim, bdim, 0, stream>>>( src, left, right, size.width, size.height, uniqueness); } } template CV_EXPORTS void winnerTakesAll< 64>(const GpuMat&, GpuMat&, GpuMat&, float, bool, int, cv::cuda::Stream&); template CV_EXPORTS void winnerTakesAll<128>(const GpuMat&, GpuMat&, GpuMat&, float, bool, int, cv::cuda::Stream&); template CV_EXPORTS void winnerTakesAll<256>(const GpuMat&, GpuMat&, GpuMat&, float, bool, int, cv::cuda::Stream&); } // namespace winner_takes_all namespace median_filter { namespace { const int BLOCK_X = 16; const int BLOCK_Y = 16; const int KSIZE = 3; const int RADIUS = KSIZE / 2; const int KSIZE_SQ = KSIZE * KSIZE; template <typename T> __device__ inline void swap(T& x, T& y) { T tmp(x); x = y; y = tmp; } // sort, min, max of 1 element template <typename T, int V = 1> __device__ inline void dev_sort(T& x, T& y) { if (x > y) swap(x, y); } template <typename T, int V = 1> __device__ inline void dev_min(T& x, T& y) { x = ::min(x, y); } template <typename T, int V = 1> __device__ inline void dev_max(T& x, T& y) { y = ::max(x, y); } // sort, min, max of 2 elements __device__ inline void dev_sort_2(uint32_t& x, uint32_t& y) { const uint32_t mask = __vcmpgtu2(x, y); const uint32_t tmp = (x ^ y) & mask; x ^= tmp; y ^= tmp; } __device__ inline void dev_min_2(uint32_t& x, uint32_t& y) { x = __vminu2(x, y); } __device__ inline void dev_max_2(uint32_t& x, uint32_t& y) { y = __vmaxu2(x, y); } template <> __device__ inline void dev_sort<uint32_t, 2>(uint32_t& x, uint32_t& y) { dev_sort_2(x, y); } template <> __device__ inline void dev_min<uint32_t, 2>(uint32_t& x, uint32_t& y) { dev_min_2(x, y); } template <> __device__ inline void dev_max<uint32_t, 2>(uint32_t& x, uint32_t& y) { dev_max_2(x, y); } // sort, min, max of 4 elements __device__ inline void dev_sort_4(uint32_t& x, uint32_t& y) { const uint32_t mask = __vcmpgtu4(x, y); const uint32_t tmp = (x ^ y) & mask; x ^= tmp; y ^= tmp; } __device__ inline void dev_min_4(uint32_t& x, uint32_t& y) { x = __vminu4(x, y); } __device__ inline void dev_max_4(uint32_t& x, uint32_t& y) { y = __vmaxu4(x, y); } template <> __device__ inline void dev_sort<uint32_t, 4>(uint32_t& x, uint32_t& y) { dev_sort_4(x, y); } template <> __device__ inline void dev_min<uint32_t, 4>(uint32_t& x, uint32_t& y) { dev_min_4(x, y); } template <> __device__ inline void dev_max<uint32_t, 4>(uint32_t& x, uint32_t& y) { dev_max_4(x, y); } template <typename T, int V = 1> __device__ inline void median_selection_network_9(T* buf) { #define SWAP_OP(i, j) dev_sort<T, V>(buf[i], buf[j]) #define MIN_OP(i, j) dev_min<T, V>(buf[i], buf[j]) #define MAX_OP(i, j) dev_max<T, V>(buf[i], buf[j]) SWAP_OP(0, 1); SWAP_OP(3, 4); SWAP_OP(6, 7); SWAP_OP(1, 2); SWAP_OP(4, 5); SWAP_OP(7, 8); SWAP_OP(0, 1); SWAP_OP(3, 4); SWAP_OP(6, 7); MAX_OP(0, 3); MAX_OP(3, 6); SWAP_OP(1, 4); MIN_OP(4, 7); MAX_OP(1, 4); MIN_OP(5, 8); MIN_OP(2, 5); SWAP_OP(2, 4); MIN_OP(4, 6); MAX_OP(2, 4); #undef SWAP_OP #undef MIN_OP #undef MAX_OP } __global__ void median_kernel_3x3_8u(const PtrStepSz<uint8_t> src, PtrStep<uint8_t> dst) { const int x = blockIdx.x * blockDim.x + threadIdx.x; const int y = blockIdx.y * blockDim.y + threadIdx.y; if (x < RADIUS || x >= src.cols - RADIUS || y < RADIUS || y >= src.rows - RADIUS) return; uint8_t buf[KSIZE_SQ]; for (int i = 0; i < KSIZE_SQ; i++) buf[i] = src(y - RADIUS + i / KSIZE, x - RADIUS + i % KSIZE); median_selection_network_9(buf); dst(y, x) = buf[KSIZE_SQ / 2]; } __global__ void median_kernel_3x3_16u(const PtrStepSz<uint16_t> src, PtrStep<uint16_t> dst) { const int x = blockIdx.x * blockDim.x + threadIdx.x; const int y = blockIdx.y * blockDim.y + threadIdx.y; if (x < RADIUS || x >= src.cols - RADIUS || y < RADIUS || y >= src.rows - RADIUS) return; uint16_t buf[KSIZE_SQ]; for (int i = 0; i < KSIZE_SQ; i++) buf[i] = src(y - RADIUS + i / KSIZE, x - RADIUS + i % KSIZE); median_selection_network_9(buf); dst(y, x) = buf[KSIZE_SQ / 2]; } __global__ void median_kernel_3x3_8u_v4(const PtrStepSz<uint8_t> src, PtrStep<uint8_t> dst) { const int x_4 = 4 * (blockIdx.x * blockDim.x + threadIdx.x); const int y = blockIdx.y * blockDim.y + threadIdx.y; if (y < RADIUS || y >= src.rows - RADIUS) return; uint32_t buf[KSIZE_SQ]; if (x_4 >= 4 && x_4 + 7 < src.cols) { buf[0] = *((const uint32_t*)&src(y - 1, x_4 - 4)); buf[1] = *((const uint32_t*)&src(y - 1, x_4 - 0)); buf[2] = *((const uint32_t*)&src(y - 1, x_4 + 4)); buf[3] = *((const uint32_t*)&src(y - 0, x_4 - 4)); buf[4] = *((const uint32_t*)&src(y - 0, x_4 - 0)); buf[5] = *((const uint32_t*)&src(y - 0, x_4 + 4)); buf[6] = *((const uint32_t*)&src(y + 1, x_4 - 4)); buf[7] = *((const uint32_t*)&src(y + 1, x_4 - 0)); buf[8] = *((const uint32_t*)&src(y + 1, x_4 + 4)); buf[0] = (buf[1] << 8) | (buf[0] >> 24); buf[2] = (buf[1] >> 8) | (buf[2] << 24); buf[3] = (buf[4] << 8) | (buf[3] >> 24); buf[5] = (buf[4] >> 8) | (buf[5] << 24); buf[6] = (buf[7] << 8) | (buf[6] >> 24); buf[8] = (buf[7] >> 8) | (buf[8] << 24); median_selection_network_9<uint32_t, 4>(buf); *((uint32_t*)&dst(y, x_4)) = buf[KSIZE_SQ / 2]; } else if (x_4 == 0) { for (int x = RADIUS; x < 4; x++) { uint8_t* buf_u8 = (uint8_t*)buf; for (int i = 0; i < KSIZE_SQ; i++) buf_u8[i] = src(y - RADIUS + i / KSIZE, x - RADIUS + i % KSIZE); median_selection_network_9(buf_u8); dst(y, x) = buf_u8[KSIZE_SQ / 2]; } } else if (x_4 < src.cols) { for (int x = x_4; x < ::min(x_4 + 4, src.cols - RADIUS); x++) { uint8_t* buf_u8 = (uint8_t*)buf; for (int i = 0; i < KSIZE_SQ; i++) buf_u8[i] = src(y - RADIUS + i / KSIZE, x - RADIUS + i % KSIZE); median_selection_network_9(buf_u8); dst(y, x) = buf_u8[KSIZE_SQ / 2]; } } } __global__ void median_kernel_3x3_16u_v2(const PtrStepSz<uint16_t> src, PtrStep<uint16_t> dst) { const int x_2 = 2 * (blockIdx.x * blockDim.x + threadIdx.x); const int y = blockIdx.y * blockDim.y + threadIdx.y; if (y < RADIUS || y >= src.rows - RADIUS) return; uint32_t buf[KSIZE_SQ]; if (x_2 >= 2 && x_2 + 3 < src.cols) { buf[0] = *((const uint32_t*)&src(y - 1, x_2 - 2)); buf[1] = *((const uint32_t*)&src(y - 1, x_2 - 0)); buf[2] = *((const uint32_t*)&src(y - 1, x_2 + 2)); buf[3] = *((const uint32_t*)&src(y - 0, x_2 - 2)); buf[4] = *((const uint32_t*)&src(y - 0, x_2 - 0)); buf[5] = *((const uint32_t*)&src(y - 0, x_2 + 2)); buf[6] = *((const uint32_t*)&src(y + 1, x_2 - 2)); buf[7] = *((const uint32_t*)&src(y + 1, x_2 - 0)); buf[8] = *((const uint32_t*)&src(y + 1, x_2 + 2)); buf[0] = (buf[1] << 16) | (buf[0] >> 16); buf[2] = (buf[1] >> 16) | (buf[2] << 16); buf[3] = (buf[4] << 16) | (buf[3] >> 16); buf[5] = (buf[4] >> 16) | (buf[5] << 16); buf[6] = (buf[7] << 16) | (buf[6] >> 16); buf[8] = (buf[7] >> 16) | (buf[8] << 16); median_selection_network_9<uint32_t, 2>(buf); *((uint32_t*)&dst(y, x_2)) = buf[KSIZE_SQ / 2]; } else if (x_2 == 0) { for (int x = RADIUS; x < 2; x++) { uint8_t* buf_u8 = (uint8_t*)buf; for (int i = 0; i < KSIZE_SQ; i++) buf_u8[i] = src(y - RADIUS + i / KSIZE, x - RADIUS + i % KSIZE); median_selection_network_9(buf_u8); dst(y, x) = buf_u8[KSIZE_SQ / 2]; } } else if (x_2 < src.cols) { for (int x = x_2; x < ::min(x_2 + 2, src.cols - RADIUS); x++) { uint8_t* buf_u8 = (uint8_t*)buf; for (int i = 0; i < KSIZE_SQ; i++) buf_u8[i] = src(y - RADIUS + i / KSIZE, x - RADIUS + i % KSIZE); median_selection_network_9(buf_u8); dst(y, x) = buf_u8[KSIZE_SQ / 2]; } } } template <typename T> void median_filter(const PtrStepSz<T> d_src, PtrStep<T> d_dst, Stream& _stream); template <> void median_filter<uint8_t>(const PtrStepSz<uint8_t> d_src, PtrStep<uint8_t> d_dst, Stream& _stream) { cudaStream_t stream = cv::cuda::StreamAccessor::getStream(_stream); if ((d_src.step / sizeof(uint8_t)) % 4 == 0) { const dim3 block(BLOCK_X, BLOCK_Y); const dim3 grid(cudev::divUp(d_src.cols / 4, block.x), cudev::divUp(d_src.rows, block.y)); median_kernel_3x3_8u_v4<<<grid, block, 0, stream>>>(d_src, d_dst); } else { const dim3 block(BLOCK_X, BLOCK_Y); const dim3 grid(cudev::divUp(d_src.cols, block.x), cudev::divUp(d_src.rows, block.y)); median_kernel_3x3_8u<<<grid, block, 0, stream>>>(d_src, d_dst); } CV_CUDEV_SAFE_CALL(cudaGetLastError()); } template <> void median_filter(const PtrStepSz<uint16_t> d_src, PtrStep<uint16_t> d_dst, Stream& _stream) { cudaStream_t stream = cv::cuda::StreamAccessor::getStream(_stream); if ((d_src.step / sizeof(uint16_t)) % 2 == 0) { const dim3 block(BLOCK_X, BLOCK_Y); const dim3 grid(cudev::divUp(d_src.cols / 2, block.x), cudev::divUp(d_src.rows, block.y)); median_kernel_3x3_16u_v2<<<grid, block, 0, stream>>>(d_src, d_dst); } else { const dim3 block(BLOCK_X, BLOCK_Y); const dim3 grid(cudev::divUp(d_src.cols, block.x), cudev::divUp(d_src.rows, block.y)); median_kernel_3x3_16u<<<grid, block, 0, stream>>>(d_src, d_dst); } CV_CUDEV_SAFE_CALL(cudaGetLastError()); } } // anonymous namespace void medianFilter(const GpuMat& src, GpuMat& dst, Stream& stream) { CV_Assert(src.size() == dst.size()); CV_Assert(src.type() == CV_16SC1); CV_Assert(src.type() == dst.type()); switch (src.type()) { case CV_8UC1: median_filter<uint8_t>(src, dst, stream); break; case CV_16SC1: case CV_16UC1: median_filter<uint16_t>(src, dst, stream); break; default: CV_Error(cv::Error::BadDepth, "Unsupported depth"); } } } // namespace median_filter namespace check_consistency { namespace { template<typename SRC_T, typename DST_T> __global__ void check_consistency_kernel(PtrStep<DST_T> d_leftDisp, const PtrStep<DST_T> d_rightDisp, const PtrStep<SRC_T> d_left, int width, int height, bool subpixel) { const int j = blockIdx.x * blockDim.x + threadIdx.x; const int i = blockIdx.y * blockDim.y + threadIdx.y; // left-right consistency check, only on leftDisp, but could be done for rightDisp too SRC_T mask = d_left(i, j); DST_T org = d_leftDisp(i, j); int d = org; if (subpixel) { d >>= StereoMatcher::DISP_SHIFT; } int k = j - d; if (mask == 0 || org == INVALID_DISP || (k >= 0 && k < width && abs(d_rightDisp(i, k) - d) > 1)) { // masked or left-right inconsistent pixel -> invalid d_leftDisp(i, j) = static_cast<DST_T>(INVALID_DISP); } } template <typename disp_type, typename image_type> void check_consistency(PtrStep<disp_type> d_left_disp, const PtrStep<disp_type> d_right_disp, const PtrStep<image_type> d_src_left, int width, int height, bool subpixel, Stream& _stream) { const dim3 blocks(width / 16, height / 16); const dim3 threads(16, 16); cudaStream_t stream = cv::cuda::StreamAccessor::getStream(_stream); check_consistency_kernel<image_type, disp_type><<<blocks, threads, 0, stream>>>(d_left_disp, d_right_disp, d_src_left, width, height, subpixel); CV_CUDEV_SAFE_CALL(cudaGetLastError()); } } // anonymous namespace void checkConsistency(GpuMat& left_disp, const GpuMat& right_disp, const GpuMat& src_left, bool subpixel, Stream& stream) { Size size = left_disp.size(); CV_Assert(size == right_disp.size()); CV_Assert(size == src_left.size()); CV_Assert(left_disp.type() == CV_16SC1); CV_Assert(left_disp.type() == right_disp.type()); CV_Assert(src_left.type() == CV_8UC1 || src_left.type() == CV_16UC1); switch (src_left.type()) { case CV_8UC1: check_consistency<uint16_t, uint8_t>(left_disp, right_disp, src_left, size.width, size.height, subpixel, stream); break; case CV_16SC1: case CV_16UC1: check_consistency<uint16_t, uint16_t>(left_disp, right_disp, src_left, size.width, size.height, subpixel, stream); break; default: CV_Error(cv::Error::BadDepth, "Unsupported depth"); } } } // namespace check_consistency namespace correct_disparity_range { namespace { __global__ void correct_disparity_range_kernel( PtrStepSz<uint16_t> disp, int min_disp_scaled, int invalid_disp_scaled) { const int x = blockIdx.x * blockDim.x + threadIdx.x; const int y = blockIdx.y * blockDim.y + threadIdx.y; if (x >= disp.cols || y >= disp.rows) { return; } uint16_t d = disp(y, x); if (d == INVALID_DISP) { d = invalid_disp_scaled; } else { d += min_disp_scaled; } disp(y, x) = d; } } // anonymous namespace void correctDisparityRange( GpuMat& disp, bool subpixel, int min_disp, Stream& _stream) { CV_Assert(disp.type() == CV_16SC1); static constexpr int SIZE = 16; cv::Size size = disp.size(); const dim3 blocks(cudev::divUp(size.width, SIZE), cudev::divUp(size.height, SIZE)); const dim3 threads(SIZE, SIZE); cudaStream_t stream = cv::cuda::StreamAccessor::getStream(_stream); const int scale = subpixel ? StereoSGBM::DISP_SCALE : 1; const int min_disp_scaled = min_disp * scale; const int invalid_disp_scaled = (min_disp - 1) * scale; correct_disparity_range_kernel<<<blocks, threads, 0, stream>>>(disp, min_disp_scaled, invalid_disp_scaled); } } // namespace correct_disparity_range } // namespace stereosgm }}} // namespace cv { namespace cuda { namespace device { #endif // HAVE_OPENCV_CUDEV
the_stack
#include "caffe/layers/bn_layer.hpp" #include "caffe/util/math_functions.hpp" namespace caffe { template <typename Dtype> void BNLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { const Dtype* const_bottom_data = bottom[0]->gpu_data(); const Dtype* const_top_data = top[0]->gpu_data(); Dtype* top_data = top[0]->mutable_gpu_data(); const Dtype* scale_data = this->blobs_[0]->gpu_data(); const Dtype* shift_data = this->blobs_[1]->gpu_data(); // Mean normalization if (frozen_ || this->phase_ == TEST) { // Use the moving average mean caffe_copy(batch_statistic_.count(), this->blobs_[2]->gpu_data(), batch_statistic_.mutable_gpu_data()); } else { // Compute the mean by averaging over spatial and batch dimensions. caffe_gpu_gemv<Dtype>(CblasNoTrans, num_ * channels_, height_ * width_, Dtype(1) / (height_ * width_), const_bottom_data, spatial_sum_multiplier_.gpu_data(), Dtype(0), spatial_statistic_.mutable_gpu_data()); caffe_gpu_gemv<Dtype>(CblasTrans, num_, channels_, Dtype(1) / num_, spatial_statistic_.gpu_data(), batch_sum_multiplier_.gpu_data(), Dtype(0), batch_statistic_.mutable_gpu_data()); // Add to the moving average if (!frozen_) { caffe_gpu_axpby(batch_statistic_.count(), Dtype(1) - bn_momentum_, batch_statistic_.gpu_data(), bn_momentum_, this->blobs_[2]->mutable_gpu_data()); } } // Broadcast the mean vector caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasNoTrans, num_, channels_, 1, Dtype(1), batch_sum_multiplier_.gpu_data(), batch_statistic_.gpu_data(), Dtype(0), spatial_statistic_.mutable_gpu_data()); caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasNoTrans, num_ * channels_, height_ * width_, 1, Dtype(-1), spatial_statistic_.gpu_data(), spatial_sum_multiplier_.gpu_data(), Dtype(0), broadcast_buffer_.mutable_gpu_data()); // Subtract caffe_gpu_add(broadcast_buffer_.count(), const_bottom_data, broadcast_buffer_.gpu_data(), top_data); // Variance normalization if (frozen_ || this->phase_ == TEST) { // Use the moving average variance caffe_copy(batch_statistic_.count(), this->blobs_[3]->gpu_data(), batch_statistic_.mutable_gpu_data()); } else { caffe_gpu_powx(broadcast_buffer_.count(), const_top_data, Dtype(2), broadcast_buffer_.mutable_gpu_data()); caffe_gpu_gemv<Dtype>(CblasNoTrans, num_ * channels_, height_ * width_, Dtype(1) / (height_ * width_), broadcast_buffer_.gpu_data(), spatial_sum_multiplier_.gpu_data(), Dtype(0), spatial_statistic_.mutable_gpu_data()); caffe_gpu_gemv<Dtype>(CblasTrans, num_, channels_, Dtype(1) / num_, spatial_statistic_.gpu_data(), batch_sum_multiplier_.gpu_data(), Dtype(0), batch_statistic_.mutable_gpu_data()); // Add to the moving average caffe_gpu_axpby(batch_statistic_.count(), Dtype(1) - bn_momentum_, batch_statistic_.gpu_data(), bn_momentum_, this->blobs_[3]->mutable_gpu_data()); } // Add eps caffe_gpu_add_scalar(batch_statistic_.count(), bn_eps_, batch_statistic_.mutable_gpu_data()); // Inverse standard deviation caffe_gpu_powx(batch_statistic_.count(), batch_statistic_.gpu_data(), Dtype(-0.5), batch_statistic_.mutable_gpu_data()); // Broadcast the inverse std caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasNoTrans, num_, channels_, 1, Dtype(1), batch_sum_multiplier_.gpu_data(), batch_statistic_.gpu_data(), Dtype(0), spatial_statistic_.mutable_gpu_data()); caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasNoTrans, num_ * channels_, height_ * width_, 1, Dtype(1), spatial_statistic_.gpu_data(), spatial_sum_multiplier_.gpu_data(), Dtype(0), broadcast_buffer_.mutable_gpu_data()); // Multiply with the inverse std caffe_gpu_mul(broadcast_buffer_.count(), const_top_data, broadcast_buffer_.gpu_data(), top_data); // Save the normalized inputs and std for backprop if (!frozen_) { caffe_copy(broadcast_buffer_.count(), const_top_data, x_norm_.mutable_gpu_data()); caffe_copy(batch_statistic_.count(), batch_statistic_.gpu_data(), x_inv_std_.mutable_gpu_data()); } // Scale caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasNoTrans, num_, channels_, 1, Dtype(1), batch_sum_multiplier_.gpu_data(), scale_data, Dtype(0), spatial_statistic_.mutable_gpu_data()); caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasNoTrans, num_ * channels_, height_ * width_, 1, Dtype(1), spatial_statistic_.gpu_data(), spatial_sum_multiplier_.gpu_data(), Dtype(0), broadcast_buffer_.mutable_gpu_data()); caffe_gpu_mul(broadcast_buffer_.count(), const_top_data, broadcast_buffer_.gpu_data(), top_data); // Shift caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasNoTrans, num_, channels_, 1, Dtype(1), batch_sum_multiplier_.gpu_data(), shift_data, Dtype(0), spatial_statistic_.mutable_gpu_data()); caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasNoTrans, num_ * channels_, height_ * width_, 1, Dtype(1), spatial_statistic_.gpu_data(), spatial_sum_multiplier_.gpu_data(), Dtype(0), broadcast_buffer_.mutable_gpu_data()); caffe_gpu_add(broadcast_buffer_.count(), const_top_data, broadcast_buffer_.gpu_data(), top_data); } template <typename Dtype> void BNLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) { if (frozen_) { if (propagate_down[0]) { const Dtype* const_top_diff = top[0]->gpu_diff(); Dtype* bottom_diff = bottom[0]->mutable_gpu_diff(); // Use the moving average variance caffe_copy(batch_statistic_.count(), this->blobs_[3]->gpu_data(), batch_statistic_.mutable_gpu_data()); caffe_gpu_add_scalar(batch_statistic_.count(), bn_eps_, batch_statistic_.mutable_gpu_data()); caffe_gpu_powx(batch_statistic_.count(), batch_statistic_.gpu_data(), Dtype(-0.5), batch_statistic_.mutable_gpu_data()); // Multiple slope with inverse std caffe_gpu_mul(batch_statistic_.count(), this->blobs_[0]->gpu_data(), batch_statistic_.gpu_data(), batch_statistic_.mutable_gpu_data()); // Broadcast caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasNoTrans, num_, channels_, 1, Dtype(1), batch_sum_multiplier_.gpu_data(), batch_statistic_.gpu_data(), Dtype(0), spatial_statistic_.mutable_gpu_data()); caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasNoTrans, num_ * channels_, height_ * width_, 1, Dtype(1), spatial_statistic_.gpu_data(), spatial_sum_multiplier_.gpu_data(), Dtype(0), broadcast_buffer_.mutable_gpu_data()); // Elementwise multiply top grad with (slope / std) caffe_gpu_mul(broadcast_buffer_.count(), const_top_diff, broadcast_buffer_.gpu_data(), bottom_diff); } return; } // gradient w.r.t. slope if (this->param_propagate_down_[0]) { const Dtype* const_top_diff = top[0]->gpu_diff(); Dtype* scale_diff = this->blobs_[0]->mutable_gpu_diff(); caffe_gpu_mul(broadcast_buffer_.count(), x_norm_.gpu_data(), const_top_diff, broadcast_buffer_.mutable_gpu_data()); caffe_gpu_gemv<Dtype>(CblasNoTrans, num_ * channels_, height_ * width_, Dtype(1), broadcast_buffer_.gpu_data(), spatial_sum_multiplier_.gpu_data(), Dtype(0), spatial_statistic_.mutable_gpu_data()); caffe_gpu_gemv<Dtype>(CblasTrans, num_, channels_, Dtype(1), spatial_statistic_.gpu_data(), batch_sum_multiplier_.gpu_data(), Dtype(1), scale_diff); } // gradient w.r.t. bias if (this->param_propagate_down_[1]) { const Dtype* const_top_diff = top[0]->gpu_diff(); Dtype* shift_diff = this->blobs_[1]->mutable_gpu_diff(); caffe_gpu_gemv<Dtype>(CblasNoTrans, num_ * channels_, height_ * width_, Dtype(1), const_top_diff, spatial_sum_multiplier_.gpu_data(), Dtype(0), spatial_statistic_.mutable_gpu_data()); caffe_gpu_gemv<Dtype>(CblasTrans, num_, channels_, Dtype(1), spatial_statistic_.gpu_data(), batch_sum_multiplier_.gpu_data(), Dtype(1), shift_diff); } // gradient w.r.t. normalized inputs if (propagate_down[0]) { const Dtype* const_top_diff = top[0]->gpu_diff(); const Dtype* const_bottom_diff = bottom[0]->gpu_diff(); Dtype* bottom_diff = bottom[0]->mutable_gpu_diff(); const Dtype* scale_data = this->blobs_[0]->gpu_data(); caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasNoTrans, num_, channels_, 1, Dtype(1), batch_sum_multiplier_.gpu_data(), scale_data, Dtype(0), spatial_statistic_.mutable_gpu_data()); caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasNoTrans, num_ * channels_, height_ * width_, 1, Dtype(1), spatial_statistic_.gpu_data(), spatial_sum_multiplier_.gpu_data(), Dtype(0), broadcast_buffer_.mutable_gpu_data()); caffe_gpu_mul(broadcast_buffer_.count(), const_top_diff, broadcast_buffer_.gpu_data(), broadcast_buffer_.mutable_gpu_data()); // sum of x_hat * (dl / dx_hat) caffe_gpu_mul(broadcast_buffer_.count(), x_norm_.gpu_data(), broadcast_buffer_.gpu_data(), bottom_diff); caffe_gpu_gemv<Dtype>(CblasNoTrans, num_ * channels_, height_ * width_, Dtype(1), const_bottom_diff, spatial_sum_multiplier_.gpu_data(), Dtype(0), spatial_statistic_.mutable_gpu_data()); caffe_gpu_gemv<Dtype>(CblasTrans, num_, channels_, Dtype(1), spatial_statistic_.gpu_data(), batch_sum_multiplier_.gpu_data(), Dtype(0), batch_statistic_.mutable_gpu_data()); // x_hat times the sum caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasNoTrans, num_, channels_, 1, Dtype(1), batch_sum_multiplier_.gpu_data(), batch_statistic_.gpu_data(), Dtype(0), spatial_statistic_.mutable_gpu_data()); caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasNoTrans, num_ * channels_, height_ * width_, 1, Dtype(1), spatial_statistic_.gpu_data(), spatial_sum_multiplier_.gpu_data(), Dtype(0), bottom_diff); caffe_gpu_mul(broadcast_buffer_.count(), x_norm_.gpu_data(), const_bottom_diff, bottom_diff); // Subtract the average of x_hat times the sum caffe_gpu_gemv<Dtype>(CblasNoTrans, num_ * channels_, height_ * width_, Dtype(1), broadcast_buffer_.gpu_data(), spatial_sum_multiplier_.gpu_data(), Dtype(0), spatial_statistic_.mutable_gpu_data()); caffe_gpu_gemv<Dtype>(CblasTrans, num_, channels_, Dtype(1), spatial_statistic_.gpu_data(), batch_sum_multiplier_.gpu_data(), Dtype(0), batch_statistic_.mutable_gpu_data()); caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasNoTrans, num_, channels_, 1, Dtype(1), batch_sum_multiplier_.gpu_data(), batch_statistic_.gpu_data(), Dtype(0), spatial_statistic_.mutable_gpu_data()); caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasNoTrans, num_ * channels_, height_ * width_, 1, Dtype(1), spatial_statistic_.gpu_data(), spatial_sum_multiplier_.gpu_data(), Dtype(1), bottom_diff); caffe_gpu_axpby(broadcast_buffer_.count(), Dtype(1), broadcast_buffer_.gpu_data(), Dtype(-1) / (num_ * height_ * width_), bottom_diff); // Multiply with the inverse std caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasNoTrans, num_, channels_, 1, Dtype(1), batch_sum_multiplier_.gpu_data(), x_inv_std_.gpu_data(), Dtype(0), spatial_statistic_.mutable_gpu_data()); caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasNoTrans, num_ * channels_, height_ * width_, 1, Dtype(1), spatial_statistic_.gpu_data(), spatial_sum_multiplier_.gpu_data(), Dtype(0), broadcast_buffer_.mutable_gpu_data()); caffe_gpu_mul(broadcast_buffer_.count(), const_bottom_diff, broadcast_buffer_.gpu_data(), bottom_diff); } } INSTANTIATE_LAYER_GPU_FUNCS(BNLayer); } // namespace caffe
the_stack
/// \file GPUReconstructionCUDA.cu /// \author David Rohr #include "GPUReconstructionCUDADef.h" #include "GPUReconstructionCUDAIncludes.h" #include <cuda_profiler_api.h> #include <unistd.h> #include "GPUReconstructionCUDA.h" #include "GPUReconstructionCUDAInternals.h" #include "GPUReconstructionIncludes.h" #include "GPUParamRTC.h" static constexpr size_t REQUIRE_MIN_MEMORY = 1024L * 1024 * 1024; static constexpr size_t REQUIRE_MEMORY_RESERVED = 512L * 1024 * 1024; static constexpr size_t REQUIRE_FREE_MEMORY_RESERVED_PER_SM = 40L * 1024 * 1024; static constexpr size_t RESERVE_EXTRA_MEM_THRESHOLD = 10L * 1024 * 1024 * 1024; static constexpr size_t RESERVE_EXTRA_MEM_OFFSET = 1L * 512 * 1024 * 1024; using namespace GPUCA_NAMESPACE::gpu; #ifdef GPUCA_USE_TEXTURES texture<cahit2, cudaTextureType1D, cudaReadModeElementType> gAliTexRefu2; texture<calink, cudaTextureType1D, cudaReadModeElementType> gAliTexRefu; #endif __global__ void dummyInitKernel(void*) { } #if defined(HAVE_O2HEADERS) && !defined(GPUCA_NO_ITS_TRAITS) #include "ITStrackingCUDA/TrackerTraitsNV.h" #include "ITStrackingCUDA/VertexerTraitsGPU.h" #else namespace o2::its { class TrackerTraitsNV : public TrackerTraits { }; class VertexerTraitsGPU : public VertexerTraits { }; } // namespace o2::its #endif class GPUDebugTiming { public: GPUDebugTiming(bool d, void** t, cudaStream_t* s, GPUReconstruction::krnlSetup& x, GPUReconstructionCUDABackend* r = nullptr) : mDeviceTimers(t), mStreams(s), mXYZ(x), mRec(r), mDo(d) { if (mDo) { if (mDeviceTimers) { GPUFailedMsg(cudaEventRecord((cudaEvent_t)mDeviceTimers[0], mStreams[mXYZ.x.stream])); } else { mTimer.ResetStart(); } } } ~GPUDebugTiming() { if (mDo) { if (mDeviceTimers) { GPUFailedMsg(cudaEventRecord((cudaEvent_t)mDeviceTimers[1], mStreams[mXYZ.x.stream])); GPUFailedMsg(cudaEventSynchronize((cudaEvent_t)mDeviceTimers[1])); float v; GPUFailedMsg(cudaEventElapsedTime(&v, (cudaEvent_t)mDeviceTimers[0], (cudaEvent_t)mDeviceTimers[1])); mXYZ.t = v * 1.e-3; } else { GPUFailedMsg(cudaStreamSynchronize(mStreams[mXYZ.x.stream])); mXYZ.t = mTimer.GetCurrentElapsedTime(); } } } private: void** mDeviceTimers; cudaStream_t* mStreams; GPUReconstruction::krnlSetup& mXYZ; GPUReconstructionCUDABackend* mRec; HighResTimer mTimer; bool mDo; }; #include "GPUReconstructionIncludesDevice.h" #ifndef GPUCA_ALIROOT_LIB extern "C" char _curtc_GPUReconstructionCUDArtc_cu_src[]; extern "C" unsigned int _curtc_GPUReconstructionCUDArtc_cu_src_size; extern "C" char _curtc_GPUReconstructionCUDArtc_cu_command[]; #endif /* // Not using templated kernel any more, since nvidia profiler does not resolve template names template <class T, int I, typename... Args> GPUg() void runKernelCUDA(GPUCA_CONSMEM_PTR int iSlice_internal, Args... args) { GPUshared() typename T::GPUSharedMemory smem; T::template Thread<I>(get_num_groups(0), get_local_size(0), get_group_id(0), get_local_id(0), smem, T::Processor(GPUCA_CONSMEM)[iSlice_internal], args...); } */ #undef GPUCA_KRNL_REG #define GPUCA_KRNL_REG(args) __launch_bounds__(GPUCA_M_MAX2_3(GPUCA_M_STRIP(args))) #define GPUCA_KRNL(x_class, x_attributes, x_arguments, x_forward) \ GPUCA_KRNL_PROP(x_class, x_attributes) \ GPUCA_KRNL_WRAP(GPUCA_KRNL_, x_class, x_attributes, x_arguments, x_forward) #define GPUCA_KRNL_CALL_single(x_class, x_attributes, x_arguments, x_forward) \ GPUCA_M_CAT(krnl_, GPUCA_M_KRNL_NAME(x_class))<<<x.nBlocks, x.nThreads, 0, me->mInternals->Streams[x.stream]>>>(GPUCA_CONSMEM_CALL y.start, args...); #define GPUCA_KRNL_CALL_multi(x_class, x_attributes, x_arguments, x_forward) \ GPUCA_M_CAT3(krnl_, GPUCA_M_KRNL_NAME(x_class), _multi)<<<x.nBlocks, x.nThreads, 0, me->mInternals->Streams[x.stream]>>>(GPUCA_CONSMEM_CALL y.start, y.num, args...); #include "GPUReconstructionKernels.h" #undef GPUCA_KRNL template <bool multi, class T, int I> int GPUReconstructionCUDAInternals::getRTCkernelNum(int k) { static int num = k; if (num < 0) { throw std::runtime_error("Invalid kernel"); } return num; } template <> void GPUReconstructionCUDABackend::runKernelBackendInternal<GPUMemClean16, 0>(krnlSetup& _xyz, void* const& ptr, unsigned long const& size) { GPUDebugTiming timer(mProcessingSettings.debugLevel, nullptr, mInternals->Streams, _xyz, this); GPUFailedMsg(cudaMemsetAsync(ptr, 0, size, mInternals->Streams[_xyz.x.stream])); } static void getArgPtrs(const void** pArgs) {} template <typename T, typename... Args> static void getArgPtrs(const void** pArgs, const T& arg, const Args&... args) { *pArgs = &arg; getArgPtrs(pArgs + 1, args...); } template <class T, int I, typename... Args> void GPUReconstructionCUDABackend::runKernelBackendInternal(krnlSetup& _xyz, const Args&... args) { GPUDebugTiming timer(mProcessingSettings.deviceTimers && mProcessingSettings.debugLevel > 0, (void**)mDebugEvents, mInternals->Streams, _xyz); if (mProcessingSettings.enableRTC) { auto& x = _xyz.x; auto& y = _xyz.y; if (y.num <= 1) { const void* pArgs[sizeof...(Args) + 1]; pArgs[0] = &y.start; getArgPtrs(&pArgs[1], args...); GPUFailedMsg(cuLaunchKernel(*mInternals->rtcFunctions[mInternals->getRTCkernelNum<false, T, I>()], x.nBlocks, 1, 1, x.nThreads, 1, 1, 0, mInternals->Streams[x.stream], (void**)pArgs, nullptr)); } else { const void* pArgs[sizeof...(Args) + 2]; pArgs[0] = &y.start; pArgs[1] = &y.num; getArgPtrs(&pArgs[2], args...); GPUFailedMsg(cuLaunchKernel(*mInternals->rtcFunctions[mInternals->getRTCkernelNum<true, T, I>()], x.nBlocks, 1, 1, x.nThreads, 1, 1, 0, mInternals->Streams[x.stream], (void**)pArgs, nullptr)); } } else { backendInternal<T, I>::runKernelBackendMacro(_xyz, this, args...); } if (mProcessingSettings.checkKernelFailures) { if (GPUDebug(GetKernelName<T, I>(), _xyz.x.stream, true)) { throw std::runtime_error("Kernel Failure"); } } } template <class T, int I, typename... Args> int GPUReconstructionCUDABackend::runKernelBackend(krnlSetup& _xyz, const Args&... args) { auto& x = _xyz.x; auto& z = _xyz.z; if (z.evList) { for (int k = 0; k < z.nEvents; k++) { GPUFailedMsg(cudaStreamWaitEvent(mInternals->Streams[x.stream], ((cudaEvent_t*)z.evList)[k], 0)); } } runKernelBackendInternal<T, I>(_xyz, args...); GPUFailedMsg(cudaGetLastError()); if (z.ev) { GPUFailedMsg(cudaEventRecord(*(cudaEvent_t*)z.ev, mInternals->Streams[x.stream])); } return 0; } GPUReconstructionCUDABackend::GPUReconstructionCUDABackend(const GPUSettingsDeviceBackend& cfg) : GPUReconstructionDeviceBase(cfg, sizeof(GPUReconstructionDeviceBase)) { if (mMaster == nullptr) { mInternals = new GPUReconstructionCUDAInternals; } mDeviceBackendSettings.deviceType = DeviceType::CUDA; } GPUReconstructionCUDABackend::~GPUReconstructionCUDABackend() { Exit(); // Make sure we destroy everything (in particular the ITS tracker) before we exit CUDA if (mMaster == nullptr) { delete mInternals; } } GPUReconstruction* GPUReconstruction_Create_CUDA(const GPUSettingsDeviceBackend& cfg) { return new GPUReconstructionCUDA(cfg); } void GPUReconstructionCUDABackend::GetITSTraits(std::unique_ptr<o2::its::TrackerTraits>* trackerTraits, std::unique_ptr<o2::its::VertexerTraits>* vertexerTraits) { if (trackerTraits) { trackerTraits->reset(new o2::its::TrackerTraitsNV); } if (vertexerTraits) { vertexerTraits->reset(new o2::its::VertexerTraitsGPU); } } void GPUReconstructionCUDABackend::UpdateSettings() { GPUCA_GPUReconstructionUpdateDefailts(); } int GPUReconstructionCUDABackend::InitDevice_Runtime() { if (mMaster == nullptr) { cudaDeviceProp cudaDeviceProp; int count, bestDevice = -1; double bestDeviceSpeed = -1, deviceSpeed; if (GPUFailedMsgI(cuInit(0))) { GPUError("Error initializing CUDA!"); return (1); } if (GPUFailedMsgI(cudaGetDeviceCount(&count))) { GPUError("Error getting CUDA Device Count"); return (1); } if (mProcessingSettings.debugLevel >= 2) { GPUInfo("Available CUDA devices:"); } const int reqVerMaj = 2; const int reqVerMin = 0; std::vector<bool> devicesOK(count, false); std::vector<size_t> devMemory(count, 0); bool contextCreated = false; for (int i = 0; i < count; i++) { if (mProcessingSettings.debugLevel >= 4) { GPUInfo("Examining device %d", i); } size_t free, total; CUdevice tmpDevice; if (GPUFailedMsgI(cuDeviceGet(&tmpDevice, i))) { GPUError("Could not set CUDA device!"); return (1); } if (GPUFailedMsgI(cuCtxCreate(&mInternals->CudaContext, 0, tmpDevice))) { if (mProcessingSettings.debugLevel >= 4) { GPUWarning("Couldn't create context for device %d. Skipping it.", i); } continue; } contextCreated = true; if (GPUFailedMsgI(cuMemGetInfo(&free, &total))) { if (mProcessingSettings.debugLevel >= 4) { GPUWarning("Error obtaining CUDA memory info about device %d! Skipping it.", i); } GPUFailedMsg(cuCtxDestroy(mInternals->CudaContext)); continue; } if (count > 1) { GPUFailedMsg(cuCtxDestroy(mInternals->CudaContext)); contextCreated = false; } if (mProcessingSettings.debugLevel >= 4) { GPUInfo("Obtained current memory usage for device %d", i); } if (GPUFailedMsgI(cudaGetDeviceProperties(&cudaDeviceProp, i))) { continue; } if (mProcessingSettings.debugLevel >= 4) { GPUInfo("Obtained device properties for device %d", i); } int deviceOK = true; const char* deviceFailure = ""; if (cudaDeviceProp.major < reqVerMaj || (cudaDeviceProp.major == reqVerMaj && cudaDeviceProp.minor < reqVerMin)) { deviceOK = false; deviceFailure = "Too low device revision"; } else if (free < std::max<size_t>(mDeviceMemorySize, REQUIRE_MIN_MEMORY)) { deviceOK = false; deviceFailure = "Insufficient GPU memory"; } deviceSpeed = (double)cudaDeviceProp.multiProcessorCount * (double)cudaDeviceProp.clockRate * (double)cudaDeviceProp.warpSize * (double)free * (double)cudaDeviceProp.major * (double)cudaDeviceProp.major; if (mProcessingSettings.debugLevel >= 2) { GPUImportant("Device %s%2d: %s (Rev: %d.%d - Mem Avail %lu / %lu)%s %s", deviceOK ? " " : "[", i, cudaDeviceProp.name, cudaDeviceProp.major, cudaDeviceProp.minor, free, (size_t)cudaDeviceProp.totalGlobalMem, deviceOK ? " " : " ]", deviceOK ? "" : deviceFailure); } if (!deviceOK) { continue; } devicesOK[i] = true; devMemory[i] = std::min<size_t>(free, std::max<long int>(0, total - REQUIRE_MEMORY_RESERVED)); if (deviceSpeed > bestDeviceSpeed) { bestDevice = i; bestDeviceSpeed = deviceSpeed; } else { if (mProcessingSettings.debugLevel >= 2 && mProcessingSettings.deviceNum < 0) { GPUInfo("Skipping: Speed %f < %f\n", deviceSpeed, bestDeviceSpeed); } } } bool noDevice = false; if (bestDevice == -1) { GPUWarning("No %sCUDA Device available, aborting CUDA Initialisation", count ? "appropriate " : ""); GPUImportant("Requiring Revision %d.%d, Mem: %lu", reqVerMaj, reqVerMin, std::max<size_t>(mDeviceMemorySize, REQUIRE_MIN_MEMORY)); noDevice = true; } else if (mProcessingSettings.deviceNum > -1) { if (mProcessingSettings.deviceNum >= (signed)count) { GPUError("Requested device ID %d does not exist", mProcessingSettings.deviceNum); noDevice = true; } else if (!devicesOK[mProcessingSettings.deviceNum]) { GPUError("Unsupported device requested (%d)", mProcessingSettings.deviceNum); noDevice = true; } else { bestDevice = mProcessingSettings.deviceNum; } } if (noDevice) { if (contextCreated) { GPUFailedMsgI(cuCtxDestroy(mInternals->CudaContext)); } return (1); } mDeviceId = bestDevice; GPUFailedMsgI(cudaGetDeviceProperties(&cudaDeviceProp, mDeviceId)); if (mProcessingSettings.debugLevel >= 2) { GPUInfo("Using CUDA Device %s with Properties:", cudaDeviceProp.name); GPUInfo("\ttotalGlobalMem = %lld", (unsigned long long int)cudaDeviceProp.totalGlobalMem); GPUInfo("\tsharedMemPerBlock = %lld", (unsigned long long int)cudaDeviceProp.sharedMemPerBlock); GPUInfo("\tregsPerBlock = %d", cudaDeviceProp.regsPerBlock); GPUInfo("\twarpSize = %d", cudaDeviceProp.warpSize); GPUInfo("\tmemPitch = %lld", (unsigned long long int)cudaDeviceProp.memPitch); GPUInfo("\tmaxThreadsPerBlock = %d", cudaDeviceProp.maxThreadsPerBlock); GPUInfo("\tmaxThreadsDim = %d %d %d", cudaDeviceProp.maxThreadsDim[0], cudaDeviceProp.maxThreadsDim[1], cudaDeviceProp.maxThreadsDim[2]); GPUInfo("\tmaxGridSize = %d %d %d", cudaDeviceProp.maxGridSize[0], cudaDeviceProp.maxGridSize[1], cudaDeviceProp.maxGridSize[2]); GPUInfo("\ttotalConstMem = %lld", (unsigned long long int)cudaDeviceProp.totalConstMem); GPUInfo("\tmajor = %d", cudaDeviceProp.major); GPUInfo("\tminor = %d", cudaDeviceProp.minor); GPUInfo("\tclockRate = %d", cudaDeviceProp.clockRate); GPUInfo("\tmemoryClockRate = %d", cudaDeviceProp.memoryClockRate); GPUInfo("\tmultiProcessorCount = %d", cudaDeviceProp.multiProcessorCount); GPUInfo("\ttextureAlignment = %lld", (unsigned long long int)cudaDeviceProp.textureAlignment); GPUInfo(" "); } if (cudaDeviceProp.warpSize != GPUCA_WARP_SIZE) { throw std::runtime_error("Invalid warp size on GPU"); } mBlockCount = cudaDeviceProp.multiProcessorCount; mWarpSize = 32; mMaxThreads = std::max<int>(mMaxThreads, cudaDeviceProp.maxThreadsPerBlock * mBlockCount); mDeviceName = cudaDeviceProp.name; mDeviceName += " (CUDA GPU)"; if (cudaDeviceProp.major < 3) { GPUError("Unsupported CUDA Device"); return (1); } #ifdef GPUCA_USE_TEXTURES if (GPUCA_SLICE_DATA_MEMORY * NSLICES > (size_t)cudaDeviceProp.maxTexture1DLinear) { GPUError("Invalid maximum texture size of device: %lld < %lld\n", (long long int)cudaDeviceProp.maxTexture1DLinear, (long long int)(GPUCA_SLICE_DATA_MEMORY * NSLICES)); return (1); } #endif #ifndef GPUCA_NO_CONSTANT_MEMORY if (gGPUConstantMemBufferSize > cudaDeviceProp.totalConstMem) { GPUError("Insufficient constant memory available on GPU %d < %d!", (int)cudaDeviceProp.totalConstMem, (int)gGPUConstantMemBufferSize); return (1); } #endif if (contextCreated == 0 && GPUFailedMsgI(cuCtxCreate(&mInternals->CudaContext, CU_CTX_SCHED_AUTO, mDeviceId))) { GPUError("Could not set CUDA Device!"); return (1); } if (GPUFailedMsgI(cudaDeviceSetLimit(cudaLimitStackSize, GPUCA_GPU_STACK_SIZE))) { GPUError("Error setting CUDA stack size"); GPUFailedMsgI(cudaDeviceReset()); return (1); } if (GPUFailedMsgI(cudaDeviceSetLimit(cudaLimitMallocHeapSize, GPUCA_GPU_HEAP_SIZE))) { GPUError("Error setting CUDA stack size"); GPUFailedMsgI(cudaDeviceReset()); return (1); } if (mDeviceMemorySize == 1 || mDeviceMemorySize == 2) { mDeviceMemorySize = std::max<long int>(0, devMemory[mDeviceId] - REQUIRE_FREE_MEMORY_RESERVED_PER_SM * cudaDeviceProp.multiProcessorCount); // Take all GPU memory but some reserve if (mDeviceMemorySize >= RESERVE_EXTRA_MEM_THRESHOLD) { mDeviceMemorySize -= RESERVE_EXTRA_MEM_OFFSET; } } if (mDeviceMemorySize == 2) { mDeviceMemorySize = mDeviceMemorySize * 2 / 3; // Leave 1/3 of GPU memory for event display } if (mDeviceMemorySize > cudaDeviceProp.totalGlobalMem || GPUFailedMsgI(cudaMalloc(&mDeviceMemoryBase, mDeviceMemorySize))) { GPUError("CUDA Memory Allocation Error (trying %lld bytes, %lld available)", (long long int)mDeviceMemorySize, (long long int)cudaDeviceProp.totalGlobalMem); GPUFailedMsgI(cudaDeviceReset()); return (1); } if (GPUFailedMsgI(cudaMallocHost(&mHostMemoryBase, mHostMemorySize))) { GPUError("Error allocating Page Locked Host Memory (trying %lld bytes)", (long long int)mHostMemorySize); GPUFailedMsgI(cudaDeviceReset()); return (1); } if (mProcessingSettings.debugLevel >= 1) { GPUInfo("Memory ptrs: GPU (%lld bytes): %p - Host (%lld bytes): %p", (long long int)mDeviceMemorySize, mDeviceMemoryBase, (long long int)mHostMemorySize, mHostMemoryBase); memset(mHostMemoryBase, 0xDD, mHostMemorySize); if (GPUFailedMsgI(cudaMemset(mDeviceMemoryBase, 0xDD, mDeviceMemorySize))) { GPUError("Error during CUDA memset"); GPUFailedMsgI(cudaDeviceReset()); return (1); } } for (int i = 0; i < mNStreams; i++) { if (GPUFailedMsgI(cudaStreamCreateWithFlags(&mInternals->Streams[i], cudaStreamNonBlocking))) { GPUError("Error creating CUDA Stream"); GPUFailedMsgI(cudaDeviceReset()); return (1); } } dummyInitKernel<<<mBlockCount, 256>>>(mDeviceMemoryBase); GPUInfo("CUDA Initialisation successfull (Device %d: %s (Frequency %d, Cores %d), %lld / %lld bytes host / global memory, Stack frame %d, Constant memory %lld)", mDeviceId, cudaDeviceProp.name, cudaDeviceProp.clockRate, cudaDeviceProp.multiProcessorCount, (long long int)mHostMemorySize, (long long int)mDeviceMemorySize, (int)GPUCA_GPU_STACK_SIZE, (long long int)gGPUConstantMemBufferSize); #ifndef GPUCA_ALIROOT_LIB if (mProcessingSettings.enableRTC) { if (mProcessingSettings.debugLevel >= 0) { GPUInfo("Starting CUDA RTC Compilation"); } HighResTimer rtcTimer; rtcTimer.ResetStart(); std::string filename = "/tmp/o2cagpu_rtc_"; filename += std::to_string(getpid()); filename += "_"; filename += std::to_string(rand()); if (mProcessingSettings.debugLevel >= 3) { printf("Writing to %s\n", filename.c_str()); } FILE* fp = fopen((filename + ".cu").c_str(), "w+b"); if (fp == nullptr) { throw std::runtime_error("Error opening file"); } std::string rtcparam = GPUParamRTC::generateRTCCode(param(), mProcessingSettings.rtcConstexpr); if (fwrite(rtcparam.c_str(), 1, rtcparam.size(), fp) != rtcparam.size()) { throw std::runtime_error("Error writing file"); } if (fwrite(_curtc_GPUReconstructionCUDArtc_cu_src, 1, _curtc_GPUReconstructionCUDArtc_cu_src_size, fp) != _curtc_GPUReconstructionCUDArtc_cu_src_size) { throw std::runtime_error("Error writing file"); } fclose(fp); std::string command = _curtc_GPUReconstructionCUDArtc_cu_command; command += " -cubin -c " + filename + ".cu -o " + filename + ".o"; if (mProcessingSettings.debugLevel >= 3) { printf("Running command %s\n", command.c_str()); } if (system(command.c_str())) { throw std::runtime_error("Runtime compilation failed"); } GPUFailedMsg(cuModuleLoad(&mInternals->rtcModule, (filename + ".o").c_str())); #define GPUCA_KRNL(x_class, x_attributes, x_arguments, x_forward) GPUCA_KRNL_WRAP(GPUCA_KRNL_LOAD_, x_class, x_attributes, x_arguments, x_forward) #define GPUCA_KRNL_LOAD_single(x_class, x_attributes, x_arguments, x_forward) \ mInternals->getRTCkernelNum<false, GPUCA_M_KRNL_TEMPLATE(x_class)>(mInternals->rtcFunctions.size()); \ mInternals->rtcFunctions.emplace_back(new CUfunction); \ GPUFailedMsg(cuModuleGetFunction(mInternals->rtcFunctions.back().get(), mInternals->rtcModule, GPUCA_M_STR(GPUCA_M_CAT(krnl_, GPUCA_M_KRNL_NAME(x_class))))); #define GPUCA_KRNL_LOAD_multi(x_class, x_attributes, x_arguments, x_forward) \ mInternals->getRTCkernelNum<true, GPUCA_M_KRNL_TEMPLATE(x_class)>(mInternals->rtcFunctions.size()); \ mInternals->rtcFunctions.emplace_back(new CUfunction); \ GPUFailedMsg(cuModuleGetFunction(mInternals->rtcFunctions.back().get(), mInternals->rtcModule, GPUCA_M_STR(GPUCA_M_CAT3(krnl_, GPUCA_M_KRNL_NAME(x_class), _multi)))); #include "GPUReconstructionKernels.h" #undef GPUCA_KRNL #undef GPUCA_KRNL_LOAD_single #undef GPUCA_KRNL_LOAD_multi remove((filename + ".cu").c_str()); remove((filename + ".o").c_str()); if (mProcessingSettings.debugLevel >= 0) { GPUInfo("RTC Compilation finished (%f seconds)", rtcTimer.GetCurrentElapsedTime()); } } #endif void* devPtrConstantMem; #ifndef GPUCA_NO_CONSTANT_MEMORY if (mProcessingSettings.enableRTC) { GPUFailedMsg(cuModuleGetGlobal((CUdeviceptr*)&devPtrConstantMem, nullptr, mInternals->rtcModule, "gGPUConstantMemBuffer")); } else { GPUFailedMsg(cudaGetSymbolAddress(&devPtrConstantMem, gGPUConstantMemBuffer)); } #else GPUFailedMsg(cudaMalloc(&devPtrConstantMem, gGPUConstantMemBufferSize)); #endif mDeviceConstantMem = (GPUConstantMem*)devPtrConstantMem; } else { GPUReconstructionCUDABackend* master = dynamic_cast<GPUReconstructionCUDABackend*>(mMaster); mDeviceId = master->mDeviceId; mBlockCount = master->mBlockCount; mWarpSize = master->mWarpSize; mMaxThreads = master->mMaxThreads; mDeviceName = master->mDeviceName; mDeviceConstantMem = master->mDeviceConstantMem; mInternals = master->mInternals; GPUFailedMsgI(cuCtxPushCurrent(mInternals->CudaContext)); } if (mProcessingSettings.debugLevel >= 1) { } for (unsigned int i = 0; i < mEvents.size(); i++) { cudaEvent_t* events = (cudaEvent_t*)mEvents[i].data(); for (unsigned int j = 0; j < mEvents[i].size(); j++) { if (GPUFailedMsgI(cudaEventCreate(&events[j]))) { GPUError("Error creating event"); GPUFailedMsgI(cudaDeviceReset()); return 1; } } } if (GPUFailedMsgI(cuCtxPopCurrent(&mInternals->CudaContext))) { GPUError("Error popping CUDA context!"); return (1); } return (0); } int GPUReconstructionCUDABackend::ExitDevice_Runtime() { // Uninitialize CUDA GPUFailedMsgI(cuCtxPushCurrent(mInternals->CudaContext)); SynchronizeGPU(); for (unsigned int i = 0; i < mEvents.size(); i++) { cudaEvent_t* events = (cudaEvent_t*)mEvents[i].data(); for (unsigned int j = 0; j < mEvents[i].size(); j++) { GPUFailedMsgI(cudaEventDestroy(events[j])); } } if (mMaster == nullptr) { GPUFailedMsgI(cudaFree(mDeviceMemoryBase)); #ifdef GPUCA_NO_CONSTANT_MEMORY GPUFailedMsgI(cudaFree(mDeviceConstantMem)); #endif for (int i = 0; i < mNStreams; i++) { GPUFailedMsgI(cudaStreamDestroy(mInternals->Streams[i])); } GPUFailedMsgI(cudaFreeHost(mHostMemoryBase)); GPUFailedMsgI(cuCtxDestroy(mInternals->CudaContext)); GPUInfo("CUDA Uninitialized"); } else { GPUFailedMsgI(cuCtxPopCurrent(&mInternals->CudaContext)); } mDeviceMemoryBase = nullptr; mHostMemoryBase = nullptr; /*if (GPUFailedMsgI(cudaDeviceReset())) { // No longer doing this, another thread might have used the GPU GPUError("Could not uninitialize GPU"); return (1); }*/ return (0); } size_t GPUReconstructionCUDABackend::GPUMemCpy(void* dst, const void* src, size_t size, int stream, int toGPU, deviceEvent* ev, deviceEvent* evList, int nEvents) { if (mProcessingSettings.debugLevel >= 3) { stream = -1; } if (stream == -1) { SynchronizeGPU(); GPUFailedMsg(cudaMemcpy(dst, src, size, toGPU ? cudaMemcpyHostToDevice : cudaMemcpyDeviceToHost)); } else { if (evList == nullptr) { nEvents = 0; } for (int k = 0; k < nEvents; k++) { GPUFailedMsg(cudaStreamWaitEvent(mInternals->Streams[stream], ((cudaEvent_t*)evList)[k], 0)); } GPUFailedMsg(cudaMemcpyAsync(dst, src, size, toGPU == -2 ? cudaMemcpyDeviceToDevice : toGPU ? cudaMemcpyHostToDevice : cudaMemcpyDeviceToHost, mInternals->Streams[stream])); } if (ev) { GPUFailedMsg(cudaEventRecord(*(cudaEvent_t*)ev, mInternals->Streams[stream == -1 ? 0 : stream])); } return size; } size_t GPUReconstructionCUDABackend::TransferMemoryInternal(GPUMemoryResource* res, int stream, deviceEvent* ev, deviceEvent* evList, int nEvents, bool toGPU, const void* src, void* dst) { if (!(res->Type() & GPUMemoryResource::MEMORY_GPU)) { if (mProcessingSettings.debugLevel >= 4) { GPUInfo("Skipped transfer of non-GPU memory resource: %s", res->Name()); } return 0; } if (mProcessingSettings.debugLevel >= 3 && (strcmp(res->Name(), "ErrorCodes") || mProcessingSettings.debugLevel >= 4)) { GPUInfo("Copying to %s: %s - %lld bytes", toGPU ? "GPU" : "Host", res->Name(), (long long int)res->Size()); } return GPUMemCpy(dst, src, res->Size(), stream, toGPU, ev, evList, nEvents); } size_t GPUReconstructionCUDABackend::WriteToConstantMemory(size_t offset, const void* src, size_t size, int stream, deviceEvent* ev) { #ifndef GPUCA_NO_CONSTANT_MEMORY if (stream == -1) { GPUFailedMsg(cudaMemcpyToSymbol(gGPUConstantMemBuffer, src, size, offset, cudaMemcpyHostToDevice)); } else { GPUFailedMsg(cudaMemcpyToSymbolAsync(gGPUConstantMemBuffer, src, size, offset, cudaMemcpyHostToDevice, mInternals->Streams[stream])); } if (mProcessingSettings.enableRTC) #endif { std::unique_ptr<GPUParamRTC> tmpParam; if (mProcessingSettings.rtcConstexpr) { if (offset < sizeof(GPUParam) && (offset != 0 || size > sizeof(GPUParam))) { throw std::runtime_error("Invalid write to constant memory, crossing GPUParam border"); } if (offset == 0) { tmpParam.reset(new GPUParamRTC); tmpParam->setFrom(*(GPUParam*)src); src = tmpParam.get(); size = sizeof(*tmpParam); } else { offset = offset - sizeof(GPUParam) + sizeof(GPUParamRTC); } } if (stream == -1) { GPUFailedMsg(cudaMemcpy(((char*)mDeviceConstantMem) + offset, src, size, cudaMemcpyHostToDevice)); } else { GPUFailedMsg(cudaMemcpyAsync(((char*)mDeviceConstantMem) + offset, src, size, cudaMemcpyHostToDevice, mInternals->Streams[stream])); } } if (ev && stream != -1) { GPUFailedMsg(cudaEventRecord(*(cudaEvent_t*)ev, mInternals->Streams[stream])); } return size; } void GPUReconstructionCUDABackend::ReleaseEvent(deviceEvent* ev) {} void GPUReconstructionCUDABackend::RecordMarker(deviceEvent* ev, int stream) { GPUFailedMsg(cudaEventRecord(*(cudaEvent_t*)ev, mInternals->Streams[stream])); } GPUReconstructionCUDABackend::GPUThreadContextCUDA::GPUThreadContextCUDA(GPUReconstructionCUDAInternals* context) : GPUThreadContext(), mContext(context) { if (mContext->cudaContextObtained++ == 0) { cuCtxPushCurrent(mContext->CudaContext); } } GPUReconstructionCUDABackend::GPUThreadContextCUDA::~GPUThreadContextCUDA() { if (--mContext->cudaContextObtained == 0) { cuCtxPopCurrent(&mContext->CudaContext); } } std::unique_ptr<GPUReconstruction::GPUThreadContext> GPUReconstructionCUDABackend::GetThreadContext() { return std::unique_ptr<GPUThreadContext>(new GPUThreadContextCUDA(mInternals)); } void GPUReconstructionCUDABackend::SynchronizeGPU() { GPUFailedMsg(cudaDeviceSynchronize()); } void GPUReconstructionCUDABackend::SynchronizeStream(int stream) { GPUFailedMsg(cudaStreamSynchronize(mInternals->Streams[stream])); } void GPUReconstructionCUDABackend::SynchronizeEvents(deviceEvent* evList, int nEvents) { for (int i = 0; i < nEvents; i++) { GPUFailedMsg(cudaEventSynchronize(((cudaEvent_t*)evList)[i])); } } void GPUReconstructionCUDABackend::StreamWaitForEvents(int stream, deviceEvent* evList, int nEvents) { for (int i = 0; i < nEvents; i++) { GPUFailedMsg(cudaStreamWaitEvent(mInternals->Streams[stream], ((cudaEvent_t*)evList)[i], 0)); } } bool GPUReconstructionCUDABackend::IsEventDone(deviceEvent* evList, int nEvents) { for (int i = 0; i < nEvents; i++) { cudaError_t retVal = cudaEventSynchronize(((cudaEvent_t*)evList)[i]); if (retVal == cudaErrorNotReady) { return false; } GPUFailedMsg(retVal); } return (true); } int GPUReconstructionCUDABackend::GPUDebug(const char* state, int stream, bool force) { // Wait for CUDA-Kernel to finish and check for CUDA errors afterwards, in case of debugmode cudaError cuErr; cuErr = cudaGetLastError(); if (cuErr != cudaSuccess) { GPUError("Cuda Error %s while running kernel (%s) (Stream %d)", cudaGetErrorString(cuErr), state, stream); return (1); } if (force == false && mProcessingSettings.debugLevel <= 0) { return (0); } if (GPUFailedMsgI(stream == -1 ? cudaDeviceSynchronize() : cudaStreamSynchronize(mInternals->Streams[stream]))) { GPUError("CUDA Error while synchronizing (%s) (Stream %d)", state, stream); return (1); } if (mProcessingSettings.debugLevel >= 3) { GPUInfo("GPU Sync Done"); } return (0); } int GPUReconstructionCUDABackend::PrepareTextures() { #ifdef GPUCA_USE_TEXTURES cudaChannelFormatDesc channelDescu2 = cudaCreateChannelDesc<cahit2>(); size_t offset; GPUFailedMsg(cudaBindTexture(&offset, &gAliTexRefu2, mProcessorsShadow->tpcTrackers[0].Data().Memory(), &channelDescu2, NSLICES * GPUCA_SLICE_DATA_MEMORY)); cudaChannelFormatDesc channelDescu = cudaCreateChannelDesc<calink>(); GPUFailedMsg(cudaBindTexture(&offset, &gAliTexRefu, mProcessorsShadow->tpcTrackers[0].Data().Memory(), &channelDescu, NSLICES * GPUCA_SLICE_DATA_MEMORY)); #endif return (0); } int GPUReconstructionCUDABackend::registerMemoryForGPU(const void* ptr, size_t size) { return GPUFailedMsgI(cudaHostRegister((void*)ptr, size, cudaHostRegisterDefault)); } int GPUReconstructionCUDABackend::unregisterMemoryForGPU(const void* ptr) { return GPUFailedMsgI(cudaHostUnregister((void*)ptr)); } void GPUReconstructionCUDABackend::PrintKernelOccupancies() { int maxBlocks, threads, suggestedBlocks; cudaFuncAttributes attr; GPUFailedMsg(cuCtxPushCurrent(mInternals->CudaContext)); #define GPUCA_KRNL(x_class, x_attributes, x_arguments, x_forward) GPUCA_KRNL_WRAP(GPUCA_KRNL_LOAD_, x_class, x_attributes, x_arguments, x_forward) #define GPUCA_KRNL_LOAD_single(x_class, x_attributes, x_arguments, x_forward) \ GPUFailedMsg(cudaOccupancyMaxPotentialBlockSize(&suggestedBlocks, &threads, GPUCA_M_CAT(krnl_, GPUCA_M_KRNL_NAME(x_class)))); \ GPUFailedMsg(cudaOccupancyMaxActiveBlocksPerMultiprocessor(&maxBlocks, GPUCA_M_CAT(krnl_, GPUCA_M_KRNL_NAME(x_class)), threads, 0)); \ GPUFailedMsg(cudaFuncGetAttributes(&attr, GPUCA_M_CAT(krnl_, GPUCA_M_KRNL_NAME(x_class)))); \ GPUInfo("Kernel: %50s Block size: %4d, Maximum active blocks: %3d, Suggested blocks: %3d, Regs: %3d, smem: %3d", GPUCA_M_STR(GPUCA_M_CAT(krnl_, GPUCA_M_KRNL_NAME(x_class))), threads, maxBlocks, suggestedBlocks, attr.numRegs, (int)attr.sharedSizeBytes); #define GPUCA_KRNL_LOAD_multi(x_class, x_attributes, x_arguments, x_forward) \ GPUFailedMsg(cudaOccupancyMaxPotentialBlockSize(&suggestedBlocks, &threads, GPUCA_M_CAT3(krnl_, GPUCA_M_KRNL_NAME(x_class), _multi))); \ GPUFailedMsg(cudaOccupancyMaxActiveBlocksPerMultiprocessor(&maxBlocks, GPUCA_M_CAT3(krnl_, GPUCA_M_KRNL_NAME(x_class), _multi), threads, 0)); \ GPUFailedMsg(cudaFuncGetAttributes(&attr, GPUCA_M_CAT3(krnl_, GPUCA_M_KRNL_NAME(x_class), _multi))); \ GPUInfo("Kernel: %50s Block size: %4d, Maximum active blocks: %3d, Suggested blocks: %3d, Regs: %3d, smem: %3d", GPUCA_M_STR(GPUCA_M_CAT3(krnl_, GPUCA_M_KRNL_NAME(x_class), _multi)), threads, maxBlocks, suggestedBlocks, attr.numRegs, (int)attr.sharedSizeBytes); #include "GPUReconstructionKernels.h" #undef GPUCA_KRNL #undef GPUCA_KRNL_LOAD_single #undef GPUCA_KRNL_LOAD_multi GPUFailedMsg(cuCtxPopCurrent(&mInternals->CudaContext)); } void GPUReconstructionCUDABackend::startGPUProfiling() { GPUFailedMsg(cudaProfilerStart()); } void GPUReconstructionCUDABackend::endGPUProfiling() { GPUFailedMsg(cudaProfilerStop()); }
the_stack
namespace caffe2 { __global__ void AdamUpdate( int N, const float* g, const float* m, const float* v, float* ng, float* nm, float* nv, float beta1, float beta2, float eps_hat, float correction, const float* lr) { CUDA_1D_KERNEL_LOOP(i, N) { float gi = g[i]; float mi = nm[i] = m[i] * beta1 + gi * (1 - beta1); float vi = nv[i] = v[i] * beta2 + gi * gi * (1 - beta2); ng[i] = lr[0] * correction * mi / (sqrtf(vi) + eps_hat); } } template <> void adam_update<CUDAContext>( int N, const float* g, const float* m, const float* v, float* ng, float* nm, float* nv, float beta1, float beta2, float eps_hat, float correction, const float* lr, CUDAContext* context) { AdamUpdate<<< CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS, 0, context->cuda_stream()>>>( N, g, m, v, ng, nm, nv, beta1, beta2, eps_hat, correction, lr); C10_CUDA_KERNEL_LAUNCH_CHECK(); } __global__ void AdamCompute( int N, const float* w, const float* g, const float* m, const float* v, float* nw, float* nm, float* nv, float beta1, float beta2, float eps_hat, float correction, const float* lr) { CUDA_1D_KERNEL_LOOP(i, N) { float gi = g[i]; float mi = nm[i] = m[i] * beta1 + gi * (1 - beta1); float vi = nv[i] = v[i] * beta2 + gi * gi * (1 - beta2); float ng = lr[0] * correction * mi / (sqrtf(vi) + eps_hat); nw[i] = w[i] + ng; } } template <> void adam_compute<CUDAContext>( int N, const float* w, const float* g, const float* m, const float* v, float* nw, float* nm, float* nv, float beta1, float beta2, float eps_hat, float correction, const float* lr, CUDAContext* context) { AdamCompute<<< CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS, 0, context->cuda_stream()>>>( N, w, g, m, v, nw, nm, nv, beta1, beta2, eps_hat, correction, lr); C10_CUDA_KERNEL_LAUNCH_CHECK(); } __global__ void AdamComputeOutputGrad( int N, const float* w, const float* g, const float* m, const float* v, float* nw, float* nm, float* nv, float* ng, float beta1, float beta2, float eps_hat, float correction, const float* lr) { CUDA_1D_KERNEL_LOOP(i, N) { float gi = g[i]; float mi = nm[i] = m[i] * beta1 + gi * (1 - beta1); float vi = nv[i] = v[i] * beta2 + gi * gi * (1 - beta2); float ngi = ng[i] = correction * mi / (sqrtf(vi) + eps_hat); nw[i] = w[i] + lr[0] * ngi; } } template <> void adam_compute_output_grad<CUDAContext>( int N, const float* w, const float* g, const float* m, const float* v, float* nw, float* nm, float* nv, float* ng, float beta1, float beta2, float eps_hat, float correction, const float* lr, CUDAContext* context) { AdamComputeOutputGrad<<< CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS, 0, context->cuda_stream()>>>( N, w, g, m, v, nw, nm, nv, ng, beta1, beta2, eps_hat, correction, lr); C10_CUDA_KERNEL_LAUNCH_CHECK(); } template <typename SIndex> __global__ void SparseAdamKernel( const size_t N, const size_t grad_slice_sz, const float beta1, const float beta2, const float epsilon, float* param, float* mom1, float* mom2, const SIndex* indices, const float* grad, const float correction, const float* lr, const float iter) { CUDA_1D_KERNEL_LOOP(i, N) { const size_t gradIdx = i; const SIndex index = indices[i / grad_slice_sz]; const size_t paramIdx = index * grad_slice_sz + (i % grad_slice_sz); float m1n = mom1[paramIdx] = mom1[paramIdx] * beta1 + grad[gradIdx] * (1.0f - beta1); float m2n = mom2[paramIdx] = mom2[paramIdx] * beta2 + grad[gradIdx] * grad[gradIdx] * (1.0f - beta2); param[paramIdx] += lr[0] * correction * m1n / (sqrt(m2n) + epsilon); } } template <typename SIndex> __global__ void SparseAdamOutputGradKernel( const size_t N, const size_t grad_slice_sz, const float beta1, const float beta2, const float epsilon, float* param, float* mom1, float* mom2, float* output_grad, const SIndex* indices, const float* grad, const float correction, const float* lr, const float iter) { CUDA_1D_KERNEL_LOOP(i, N) { const size_t gradIdx = i; const SIndex index = indices[i / grad_slice_sz]; const size_t paramIdx = index * grad_slice_sz + (i % grad_slice_sz); float m1n = mom1[paramIdx] = mom1[paramIdx] * beta1 + grad[gradIdx] * (1.0f - beta1); float m2n = mom2[paramIdx] = mom2[paramIdx] * beta2 + grad[gradIdx] * grad[gradIdx] * (1.0f - beta2); float gradOut = output_grad[gradIdx] = correction * m1n / (sqrt(m2n) + epsilon); param[paramIdx] += lr[0] * gradOut; } } template <typename SIndex> __global__ void RowWiseSparseAdamKernel( const int M, const int N, const float beta1, const float beta2, const float epsilon, float* param, float* mom1, float* mom2, const SIndex* indices, const float* grad, const float correction, const float* lr) { typedef cub::BlockReduce<float, CAFFE_CUDA_NUM_THREADS> BlockReduce; __shared__ BlockReduce::TempStorage temp_storage; int valid = min(N, CAFFE_CUDA_NUM_THREADS); // in case gridDim is smaller than M for (int i = blockIdx.x; i < M; i += gridDim.x) { const SIndex index = indices[i]; float sum_squares = 0.0; __shared__ float row_sum_squares_avg; // in case N is bigger than block size which is 512 by default for (int j = threadIdx.x; j < N; j += blockDim.x) { const float x_ij = grad[i * N + j]; sum_squares += x_ij * x_ij; } float reduce_sum_squares = BlockReduce(temp_storage).Sum(sum_squares, valid); if (threadIdx.x == 0) { row_sum_squares_avg = reduce_sum_squares / (float)N; mom2[index] = mom2[index] * beta2 + row_sum_squares_avg * (1.0f - beta2); } __syncthreads(); // update param float step = correction / (std::sqrt(mom2[index]) + epsilon); for (int j = threadIdx.x; j < N; j += blockDim.x) { mom1[index * N + j] = mom1[index * N + j] * beta1 + grad[i * N + j] * (1.0f - beta1); param[index * N + j] += lr[0] * mom1[index * N + j] * step; } } } template <typename SIndex> __global__ void RowWiseSparseAdamOutputGradKernel( const int M, const int N, const float beta1, const float beta2, const float epsilon, float* param, float* mom1, float* mom2, float* output_grad, const SIndex* indices, const float* grad, const float correction, const float* lr) { typedef cub::BlockReduce<float, CAFFE_CUDA_NUM_THREADS> BlockReduce; __shared__ BlockReduce::TempStorage temp_storage; int valid = min(N, CAFFE_CUDA_NUM_THREADS); // in case gridDim is smaller than M for (int i = blockIdx.x; i < M; i += gridDim.x) { const SIndex index = indices[i]; float sum_squares = 0.0; __shared__ float row_sum_squares_avg; // in case N is bigger than block size which is 512 by default for (int j = threadIdx.x; j < N; j += blockDim.x) { const float x_ij = grad[i * N + j]; sum_squares += x_ij * x_ij; } float reduce_sum_squares = BlockReduce(temp_storage).Sum(sum_squares, valid); if (threadIdx.x == 0) { row_sum_squares_avg = reduce_sum_squares / (float)N; mom2[index] = mom2[index] * beta2 + row_sum_squares_avg * (1.0f - beta2); } __syncthreads(); // update param float step = correction / (std::sqrt(mom2[index]) + epsilon); for (int j = threadIdx.x; j < N; j += blockDim.x) { mom1[index * N + j] = mom1[index * N + j] * beta1 + grad[i * N + j] * (1.0f - beta1); output_grad[i * N + j] = mom1[index * N + j] * step; param[index * N + j] += lr[0] * output_grad[i * N + j]; } } } template <> template <typename SIndex> bool SparseAdamOp<float, CUDAContext>::DoRunWithType() { Output(OUTPUT_PARAM)->ResizeLike(Input(PARAM)); Output(OUTPUT_MOMENT_1)->ResizeLike(Input(MOMENT_1)); Output(OUTPUT_MOMENT_2)->ResizeLike(Input(MOMENT_2)); auto N = Input(GRAD).size(); auto grad_slice_sz = Input(GRAD).size_from_dim(Input(INDICES).ndim()); const auto iter = OperatorBase::Input<Tensor>(ITER, CPU).template data<int64_t>()[0]; const float correction = sqrtf(1.0f - std::pow(beta2_, iter + 1)) / (1.0f - std::pow(beta1_, iter + 1)); if (OutputSize() == 3) { SparseAdamKernel<SIndex> <<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS, 0, context_.cuda_stream()>>>( N, grad_slice_sz, beta1_, beta2_, epsilon_, Output(OUTPUT_PARAM)->template mutable_data<float>(), Output(OUTPUT_MOMENT_1)->template mutable_data<float>(), Output(OUTPUT_MOMENT_2)->template mutable_data<float>(), Input(INDICES).template data<SIndex>(), Input(GRAD).template data<float>(), correction, Input(LR).template data<float>(), iter); C10_CUDA_KERNEL_LAUNCH_CHECK(); } else { Output(OUTPUT_GRAD)->ResizeLike(Input(GRAD)); SparseAdamOutputGradKernel<SIndex> <<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS, 0, context_.cuda_stream()>>>( N, grad_slice_sz, beta1_, beta2_, epsilon_, Output(OUTPUT_PARAM)->template mutable_data<float>(), Output(OUTPUT_MOMENT_1)->template mutable_data<float>(), Output(OUTPUT_MOMENT_2)->template mutable_data<float>(), Output(OUTPUT_GRAD)->template mutable_data<float>(), Input(INDICES).template data<SIndex>(), Input(GRAD).template data<float>(), correction, Input(LR).template data<float>(), iter); C10_CUDA_KERNEL_LAUNCH_CHECK(); } return true; } template <> template <typename SIndex> bool RowWiseSparseAdamOp<float, CUDAContext>::DoRunWithType() { Output(OUTPUT_PARAM)->ResizeLike(Input(PARAM)); Output(OUTPUT_MOMENT_1)->ResizeLike(Input(MOMENT_1)); Output(OUTPUT_MOMENT_2)->ResizeLike(Input(MOMENT_2)); auto N = Input(GRAD).size(); if (N == 0) { // empty grad, nothing to do here, not even launching the kernel return true; } const auto iter = OperatorBase::Input<Tensor>(ITER, CPU).template data<int64_t>()[0]; const float correction = sqrtf(1.0f - std::pow(beta2_, iter + 1)) / (1.0f - std::pow(beta1_, iter + 1)); // size of the 1st dimension of the input gradient auto GRAD_M = Input(GRAD).dim32(0); auto GRAD_N = N / GRAD_M; if (OutputSize() == 3) { RowWiseSparseAdamKernel<SIndex> <<<std::min(GRAD_M, CAFFE_MAXIMUM_NUM_BLOCKS), CAFFE_CUDA_NUM_THREADS, 0, context_.cuda_stream()>>>( GRAD_M, GRAD_N, beta1_, beta2_, epsilon_, Output(OUTPUT_PARAM)->template mutable_data<float>(), Output(OUTPUT_MOMENT_1)->template mutable_data<float>(), Output(OUTPUT_MOMENT_2)->template mutable_data<float>(), Input(INDICES).template data<SIndex>(), Input(GRAD).template data<float>(), correction, Input(LR).template data<float>()); C10_CUDA_KERNEL_LAUNCH_CHECK(); } else { Output(OUTPUT_GRAD)->ResizeLike(Input(GRAD)); RowWiseSparseAdamOutputGradKernel<SIndex> <<<std::min(GRAD_M, CAFFE_MAXIMUM_NUM_BLOCKS), CAFFE_CUDA_NUM_THREADS, 0, context_.cuda_stream()>>>( GRAD_M, GRAD_N, beta1_, beta2_, epsilon_, Output(OUTPUT_PARAM)->template mutable_data<float>(), Output(OUTPUT_MOMENT_1)->template mutable_data<float>(), Output(OUTPUT_MOMENT_2)->template mutable_data<float>(), Output(OUTPUT_GRAD)->template mutable_data<float>(), Input(INDICES).template data<SIndex>(), Input(GRAD).template data<float>(), correction, Input(LR).template data<float>()); C10_CUDA_KERNEL_LAUNCH_CHECK(); } return true; } REGISTER_CUDA_OPERATOR(Adam, AdamOp<float, CUDAContext>); REGISTER_CUDA_OPERATOR(SparseAdam, SparseAdamOp<float, CUDAContext>); REGISTER_CUDA_OPERATOR( RowWiseSparseAdam, RowWiseSparseAdamOp<float, CUDAContext>); } // namespace caffe2
the_stack
#include "caffe/filler.hpp" #include "caffe/layer.hpp" #include "caffe/util/im2col.hpp" #include "caffe/util/math_functions.hpp" #include "caffe/util/output.hpp" #include "caffe/layers/correlation_layer.hpp" #include "caffe/caffe.hpp" #define ROUND_OFF 50000 #define WARPS_PER_BLOCK 1 #define THREADS_PER_WARP 32 namespace caffe { // == Dimension rearrangement Kernel template <typename Dtype> __global__ void blob_rearrange_kernel2(const Dtype* in, Dtype* out, int num, int channels, int width, int height, int widthheight, int padding, int pwidthheight) { int xy = blockIdx.x*blockDim.x + threadIdx.x; if(xy>=widthheight) return; int ch = blockIdx.y; int n = blockIdx.z; Dtype value=in[(n*channels+ch)*widthheight+xy]; __syncthreads(); int xpad = (xy % width + padding); int ypad = (xy / width + padding); int xypad = ypad * (width+2*padding) + xpad; out[(n*pwidthheight+xypad)*channels + ch] = value; } // == Correlation Kernel template <typename Dtype> __global__ void CorrelateData(const int nthreads, int num, int topwidth, int topheight, int topchannels, int topcount, int max_displacement, int neighborhood_grid_radius, int neighborhood_grid_width, int kernel_radius, int kernel_size, int stride1, int stride2, int bottomwidth, int bottomheight, int bottomchannels, const Dtype *bottom0, const Dtype *bottom1, Dtype *top) { extern __shared__ char patch_data_char[]; Dtype *patch_data = (Dtype *)patch_data_char; // First (upper left) position of kernel upper-left corner in current center position of neighborhood in image 1 int x1 = blockIdx.x*stride1 + max_displacement; int y1 = blockIdx.y*stride1 + max_displacement; int item = blockIdx.z; int ch_off = threadIdx.x; // Load 3D patch into shared shared memory for(int j = 0; j < kernel_size; j++) { // HEIGHT for(int i = 0; i < kernel_size; i++) { // WIDTH int ji_off = ((j * kernel_size) + i) * bottomchannels; for(int ch = ch_off; ch < bottomchannels; ch += (WARPS_PER_BLOCK*THREADS_PER_WARP)) { // CHANNELS int idx1 = ((item * bottomheight + y1+j) * bottomwidth + x1+i) * bottomchannels + ch; int idxPatchData = ji_off + ch; patch_data[idxPatchData] = bottom0[idx1]; } } } __syncthreads(); __shared__ Dtype sum[WARPS_PER_BLOCK*THREADS_PER_WARP]; // Compute correlation for(int top_channel = 0; top_channel < topchannels; top_channel++) { sum[ch_off] = 0; int s2o = (top_channel % neighborhood_grid_width - neighborhood_grid_radius) * stride2; int s2p = (top_channel / neighborhood_grid_width - neighborhood_grid_radius) * stride2; for(int j = 0; j < kernel_size; j++) { // HEIGHT for(int i = 0; i < kernel_size; i++) { // WIDTH int ji_off = ((j * kernel_size) + i) * bottomchannels; for(int ch = ch_off; ch < bottomchannels; ch += (WARPS_PER_BLOCK*THREADS_PER_WARP)) { // CHANNELS int x2 = x1 + s2o; int y2 = y1 + s2p; int idxPatchData = ji_off + ch; int idx2 = ((item * bottomheight + y2+j) * bottomwidth + x2+i) * bottomchannels + ch; sum[ch_off] += patch_data[idxPatchData] * bottom1[idx2]; } } } __syncthreads(); if(ch_off == 0) { Dtype total_sum = 0; for(int idx = 0; idx < WARPS_PER_BLOCK*THREADS_PER_WARP; idx++) { total_sum += sum[idx]; } const int sumelems = kernel_size*kernel_size*bottomchannels; const int index = ((top_channel*topheight + blockIdx.y)*topwidth)+blockIdx.x; top[index + item*topcount] = total_sum / (float)sumelems; } } // Aggregate } // == Correlation Backward Pass Kernel (For Blob 0) template <typename Dtype> __global__ void CorrelateDataBackward0(const int nthreads, int num, int item, int topwidth, int topheight, int topchannels, int max_displacement, int neighborhood_grid_radius, int neighborhood_grid_width, int kernel_radius, int stride1, int stride2, int bottomwidth, int bottomheight, int pbottomwidth, int pbottomheight, int bottomchannels, int bottomcount, int pad_size, Dtype *bottom0diff, const Dtype *bottom1, const Dtype *topdiff) { CUDA_KERNEL_LOOP(index, nthreads) { int n = index % bottomchannels; //channels int l = (index / bottomchannels) % bottomwidth + pad_size; //w-pos int m = (index / bottomchannels / bottomwidth) % bottomheight + pad_size; //h-pos //Get X,Y ranges and clamp // round_off is a trick to enable integer division with ceil, even for negative numbers // We use a large offset, for the inner part not to become negative. const int round_off = ROUND_OFF; const int round_off_s1 = stride1 * round_off; // We add round_off before_s1 the int division and subtract round_off after it, to ensure the formula matches ceil behavior: int xmin = (l - 2*kernel_radius - max_displacement + round_off_s1 - 1) / stride1 + 1 - round_off; // ceil (l - 2*kernel_radius - max_displacement) / stride1 int ymin = (m - 2*kernel_radius - max_displacement + round_off_s1 - 1) / stride1 + 1 - round_off; // ceil (l - 2*kernel_radius - max_displacement) / stride1 // Same here: int xmax = (l - max_displacement + round_off_s1) / stride1 - round_off; // floor (l - max_displacement) / stride1 int ymax = (m - max_displacement + round_off_s1) / stride1 - round_off; // floor (m - max_displacement) / stride1 Dtype sum = 0; if(xmax>=0 && ymax>=0 && (xmin<=topwidth-1) && (ymin<=topheight-1)) { xmin = max(0,xmin); xmax = min(topwidth-1,xmax); ymin = max(0,ymin); ymax = min(topheight-1,ymax); for(int p = -neighborhood_grid_radius; p <= neighborhood_grid_radius; p++) { for(int o = -neighborhood_grid_radius; o <= neighborhood_grid_radius; o++) { // Get bottom1 data: int s2o = stride2 * o; int s2p = stride2 * p; int idxbot1 = ((item * pbottomheight + (m+s2p)) * pbottomwidth + (l+s2o)) * bottomchannels + n; Dtype bot1tmp = bottom1[idxbot1]; // bottom1[l+s2o,m+s2p,n] // Index offset for topdiff in following loops: int op = (p+neighborhood_grid_radius) * neighborhood_grid_width + (o+neighborhood_grid_radius); // index [o,p] int idxopoffset = (item * topchannels + op); for(int y = ymin; y <= ymax; y++) { for(int x = xmin; x <= xmax; x++) { int idxtopdiff = (idxopoffset * topheight + y) * topwidth + x; // topdiff[x,y,o,p] sum += topdiff[idxtopdiff] * bot1tmp; } } } } } const int sumelems = (kernel_radius*2+1)*(kernel_radius*2+1)*bottomchannels; const int bot0index = ((n * bottomheight) + (m-pad_size)) * bottomwidth + (l-pad_size); bottom0diff[bot0index + item*bottomcount] = sum / (float)sumelems; } } // == Correlation Backward Pass Kernel (For Blob 1) template <typename Dtype> __global__ void CorrelateDataBackward1(const int nthreads, int num, int item, int topwidth, int topheight, int topchannels, int max_displacement, int neighborhood_grid_radius, int neighborhood_grid_width, int kernel_radius, int stride1, int stride2, int bottomwidth, int bottomheight, int pbottomwidth, int pbottomheight, int bottomchannels, int bottomcount, int pad_size, const Dtype *bottom0, Dtype *bottom1diff, const Dtype *topdiff) { CUDA_KERNEL_LOOP(index, nthreads) { //int l = index % bottomwidth + pad_size; //w-pos //int m = (index / bottomwidth) % bottomheight + pad_size; //h-pos //int n = (index / bottomwidth / bottomheight) % bottomchannels; //channels int n = index % bottomchannels; //channels int l = (index / bottomchannels) % bottomwidth + pad_size; //w-pos int m = (index / bottomchannels / bottomwidth) % bottomheight + pad_size; //h-pos // round_off is a trick to enable integer division with ceil, even for negative numbers // We use a large offset, for the inner part not to become negative. const int round_off = ROUND_OFF; const int round_off_s1 = stride1 * round_off; Dtype sum = 0; for(int p = -neighborhood_grid_radius; p <= neighborhood_grid_radius; p++) { for(int o = -neighborhood_grid_radius; o <= neighborhood_grid_radius; o++) { int s2o = stride2 * o; int s2p = stride2 * p; //Get X,Y ranges and clamp // We add round_off before_s1 the int division and subtract round_off after it, to ensure the formula matches ceil behavior: int xmin = (l - 2*kernel_radius - max_displacement - s2o + round_off_s1 - 1) / stride1 + 1 - round_off; // ceil (l - 2*kernel_radius - max_displacement - s2o) / stride1 int ymin = (m - 2*kernel_radius - max_displacement - s2p + round_off_s1 - 1) / stride1 + 1 - round_off; // ceil (l - 2*kernel_radius - max_displacement - s2o) / stride1 // Same here: int xmax = (l - max_displacement - s2o + round_off_s1) / stride1 - round_off; // floor (l - max_displacement - s2o) / stride1 int ymax = (m - max_displacement - s2p + round_off_s1) / stride1 - round_off; // floor (m - max_displacement - s2p) / stride1 if(xmax>=0 && ymax>=0 && (xmin<=topwidth-1) && (ymin<=topheight-1)) { xmin = max(0,xmin); xmax = min(topwidth-1,xmax); ymin = max(0,ymin); ymax = min(topheight-1,ymax); // Get bottom0 data: int idxbot0 = ((item * pbottomheight + (m-s2p)) * pbottomwidth + (l-s2o)) * bottomchannels + n; Dtype bot0tmp = bottom0[idxbot0]; // bottom1[l+s2o,m+s2p,n] // Index offset for topdiff in following loops: int op = (p+neighborhood_grid_radius) * neighborhood_grid_width + (o+neighborhood_grid_radius); // index [o,p] int idxOpOffset = (item * topchannels + op); for(int y = ymin; y <= ymax; y++) { for(int x = xmin; x <= xmax; x++) { int idxtopdiff = (idxOpOffset * topheight + y) * topwidth + x; // topdiff[x,y,o,p] sum += topdiff[idxtopdiff] * bot0tmp; } } } } } const int sumelems = (kernel_radius*2+1)*(kernel_radius*2+1)*bottomchannels; const int bot1index = ((n * bottomheight) + (m-pad_size)) * bottomwidth + (l-pad_size); bottom1diff[bot1index + item*bottomcount] = sum / (float)sumelems; } } // == Correlation Kernel Subtraction template <typename Dtype> __global__ void CorrelateDataSubtract(const int nthreads, int num, int item, int topwidth, int topheight, int topchannels, int topcount, int max_displacement, int neighborhood_grid_radius, int neighborhood_grid_width, int kernel_radius, int stride1, int stride2, int bottomwidth, int bottomheight, int bottomchannels, const Dtype *bottom0, const Dtype *bottom1, Dtype *top) { CUDA_KERNEL_LOOP(index, nthreads) { int x = index % topwidth; //w-pos int y = (index / topwidth) % topheight; //h-pos int c = (index / topwidth / topheight) % topchannels; //channels // Offset of patch in image 2 int s2o = (c % neighborhood_grid_width - neighborhood_grid_radius) * stride2; int s2p = (c / neighborhood_grid_width - neighborhood_grid_radius) * stride2; // First (upper left) position of kernel center in current neighborhood in image 1 int x1 = x*stride1 + kernel_radius + max_displacement; int y1 = y*stride1 + kernel_radius + max_displacement; // Iterate through 3D patch Dtype sum = 0; for(int j = -kernel_radius; j <= kernel_radius; j++) { // HEIGHT for(int i = -kernel_radius; i <= kernel_radius; i++) { // WIDTH for(int l = 0; l < bottomchannels; l++) { // CHANNELS // Calculate position in image 2 int x2 = x1 + s2o; int y2 = y1 + s2p; // Indices in bottom data: (CH=l,W=x2,H=y2,N) int idx1 = ((item * bottomheight + y1+j) * bottomwidth + x1+i) * bottomchannels + l; int idx2 = ((item * bottomheight + y2+j) * bottomwidth + x2+i) * bottomchannels + l; // Do the correlation: sum += fabsf(bottom0[idx1] - bottom1[idx2]); } } } const int sumelems = (kernel_radius*2+1)*(kernel_radius*2+1)*bottomchannels; top[index + item*topcount] = sum / (float)sumelems; } } // == Correlation Backward Pass Kernel (For Blob 0) template <typename Dtype> __global__ void CorrelateDataBackward0Subtract(const int nthreads, int num, int item, int topwidth, int topheight, int topchannels, int max_displacement, int neighborhood_grid_radius, int neighborhood_grid_width, int kernel_radius, int stride1, int stride2, int bottomwidth, int bottomheight, int pbottomwidth, int pbottomheight, int bottomchannels, int bottomcount, int pad_size, Dtype *bottom0diff, const Dtype *bottom0, const Dtype *bottom1, const Dtype *topdiff) { CUDA_KERNEL_LOOP(index, nthreads) { int l = index % bottomwidth + pad_size; //w-pos int m = (index / bottomwidth) % bottomheight + pad_size; //h-pos int n = (index / bottomwidth / bottomheight) % bottomchannels; //channels //Get X,Y ranges and clamp // round_off is a trick to enable integer division with ceil, even for negative numbers // We use a large offset, for the inner part not to become negative. const int round_off = ROUND_OFF; const int round_off_s1 = stride1 * round_off; // We add round_off before_s1 the int division and subtract round_off after it, to ensure the formula matches ceil behavior: int xmin = (l - 2*kernel_radius - max_displacement + round_off_s1 - 1) / stride1 + 1 - round_off; // ceil (l - 2*kernel_radius - max_displacement) / stride1 int ymin = (m - 2*kernel_radius - max_displacement + round_off_s1 - 1) / stride1 + 1 - round_off; // ceil (l - 2*kernel_radius - max_displacement) / stride1 // Same here: int xmax = (l - max_displacement + round_off_s1) / stride1 - round_off; // floor (l - max_displacement) / stride1 int ymax = (m - max_displacement + round_off_s1) / stride1 - round_off; // floor (m - max_displacement) / stride1 Dtype sum = 0; if(xmax>=0 && ymax>=0 && (xmin<=topwidth-1) && (ymin<=topheight-1)) { xmin = max(0,xmin); xmax = min(topwidth-1,xmax); ymin = max(0,ymin); ymax = min(topheight-1,ymax); for(int p = -neighborhood_grid_radius; p <= neighborhood_grid_radius; p++) { for(int o = -neighborhood_grid_radius; o <= neighborhood_grid_radius; o++) { // Get bottom1 data: int s2o = stride2 * o; int s2p = stride2 * p; int idxbot = ((item * pbottomheight + (m+s2p)) * pbottomwidth + (l+s2o)) * bottomchannels + n; Dtype bot0tmp = bottom0[idxbot]; // bottom0[l+s2o,m+s2p,n] Dtype bot1tmp = bottom1[idxbot]; // bottom1[l+s2o,m+s2p,n] Dtype sign = (bot0tmp >= bot1tmp) ? Dtype(1.0) : Dtype(-1.0); // Index offset for topdiff in following loops: int op = (p+neighborhood_grid_radius) * neighborhood_grid_width + (o+neighborhood_grid_radius); // index [o,p] int idxopoffset = (item * topchannels + op); for(int y = ymin; y <= ymax; y++) { for(int x = xmin; x <= xmax; x++) { int idxtopdiff = (idxopoffset * topheight + y) * topwidth + x; // topdiff[x,y,o,p] sum += topdiff[idxtopdiff] * sign; } } } } } const int sumelems = (kernel_radius*2+1)*(kernel_radius*2+1)*bottomchannels; bottom0diff[index + item*bottomcount] = sum / (float)sumelems; } } // == Correlation Backward Pass Kernel (For Blob 1) template <typename Dtype> __global__ void CorrelateDataBackward1Subtract(const int nthreads, int num, int item, int topwidth, int topheight, int topchannels, int max_displacement, int neighborhood_grid_radius, int neighborhood_grid_width, int kernel_radius, int stride1, int stride2, int bottomwidth, int bottomheight, int pbottomwidth, int pbottomheight, int bottomchannels, int bottomcount, int pad_size, const Dtype *bottom0, const Dtype *bottom1, Dtype *bottom1diff, const Dtype *topdiff) { CUDA_KERNEL_LOOP(index, nthreads) { int l = index % bottomwidth + pad_size; //w-pos int m = (index / bottomwidth) % bottomheight + pad_size; //h-pos int n = (index / bottomwidth / bottomheight) % bottomchannels; //channels // round_off is a trick to enable integer division with ceil, even for negative numbers // We use a large offset, for the inner part not to become negative. const int round_off = ROUND_OFF; const int round_off_s1 = stride1 * round_off; Dtype sum = 0; for(int p = -neighborhood_grid_radius; p <= neighborhood_grid_radius; p++) { for(int o = -neighborhood_grid_radius; o <= neighborhood_grid_radius; o++) { int s2o = stride2 * o; int s2p = stride2 * p; //Get X,Y ranges and clamp // We add round_off before_s1 the int division and subtract round_off after it, to ensure the formula matches ceil behavior: int xmin = (l - 2*kernel_radius - max_displacement - s2o + round_off_s1 - 1) / stride1 + 1 - round_off; // ceil (l - 2*kernel_radius - max_displacement - s2o) / stride1 int ymin = (m - 2*kernel_radius - max_displacement - s2p + round_off_s1 - 1) / stride1 + 1 - round_off; // ceil (l - 2*kernel_radius - max_displacement - s2o) / stride1 // Same here: int xmax = (l - max_displacement - s2o + round_off_s1) / stride1 - round_off; // floor (l - max_displacement - s2o) / stride1 int ymax = (m - max_displacement - s2p + round_off_s1) / stride1 - round_off; // floor (m - max_displacement - s2p) / stride1 if(xmax>=0 && ymax>=0 && (xmin<=topwidth-1) && (ymin<=topheight-1)) { xmin = max(0,xmin); xmax = min(topwidth-1,xmax); ymin = max(0,ymin); ymax = min(topheight-1,ymax); // Get bottom0 data: int idxbot = ((item * pbottomheight + (m-s2p)) * pbottomwidth + (l-s2o)) * bottomchannels + n; Dtype bot0tmp = bottom0[idxbot]; // bottom0[l+s2o,m+s2p,n] Dtype bot1tmp = bottom1[idxbot]; // bottom1[l+s2o,m+s2p,n] Dtype sign = (bot0tmp >= bot1tmp) ? Dtype(-1.0) : Dtype(1.0); // Index offset for topdiff in following loops: int op = (p+neighborhood_grid_radius) * neighborhood_grid_width + (o+neighborhood_grid_radius); // index [o,p] int idxOpOffset = (item * topchannels + op); for(int y = ymin; y <= ymax; y++) { for(int x = xmin; x <= xmax; x++) { int idxtopdiff = (idxOpOffset * topheight + y) * topwidth + x; // topdiff[x,y,o,p] sum += topdiff[idxtopdiff] * sign; } } } } } const int sumelems = (kernel_radius*2+1)*(kernel_radius*2+1)*bottomchannels; bottom1diff[index + item*bottomcount] = sum / (float)sumelems; } } // == Forward template <typename Dtype> void CorrelationLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { CHECK_EQ(bottom.size(),2); CHECK_EQ(top.size(),1); const int bnum = bottom[0]->num(); const int bchannels = bottom[0]->channels(); const int bheight = bottom[0]->height(); const int bwidth = bottom[0]->width(); const int bwidthheight = bwidth * bheight; const int topcount = top_width_ * top_height_ * top_channels_; dim3 threadsPerBlock(THREADS_PER_WARP * WARPS_PER_BLOCK); cudaMemset(rbot1_->mutable_gpu_data(), 0, rbot1_->count()*sizeof(Dtype)); cudaMemset(rbot2_->mutable_gpu_data(), 0, rbot2_->count()*sizeof(Dtype)); int threads_per_block=16; dim3 totalBlocksRearr((bwidthheight-1)/threads_per_block+1, bchannels, bnum); const int pwidthheight = (bwidth + 2 * pad_size_) * (bheight + 2 * pad_size_); blob_rearrange_kernel2<Dtype><<<totalBlocksRearr,threads_per_block>>> (bottom[0]->gpu_data(),rbot1_->mutable_gpu_data(),bnum,bchannels,bwidth,bheight,bwidthheight,pad_size_,pwidthheight); blob_rearrange_kernel2<Dtype><<<totalBlocksRearr,threads_per_block>>> (bottom[1]->gpu_data(),rbot2_->mutable_gpu_data(),bnum,bchannels,bwidth,bheight,bwidthheight,pad_size_,pwidthheight); const int num = bnum; const int channels = bchannels; const int height = bheight + 2*pad_size_; const int width = bwidth + 2*pad_size_; const int shared_memory_per_block = (kernel_size_*kernel_size_)*bchannels; if(corr_type_ == CorrelationParameter_CorrelationType_MULTIPLY) { // CorrelationLayer int topThreadCount = topcount; dim3 totalBlocksCorr(top_width_, top_height_, num); CorrelateData<Dtype><<<totalBlocksCorr, threadsPerBlock, shared_memory_per_block * sizeof(Dtype)>>>( topThreadCount, num, top_width_, top_height_, top_channels_, topcount, max_displacement_, neighborhood_grid_radius_, neighborhood_grid_width_, kernel_radius_, kernel_size_, stride1_, stride2_, width, height, channels, rbot1_->gpu_data(), rbot2_->gpu_data(), top[0]->mutable_gpu_data() ); CUDA_POST_KERNEL_CHECK; } else if(corr_type_ == CorrelationParameter_CorrelationType_SUBTRACT) { // CorrelationLayer for(int n = 0; n < num; n++) { int topThreadCount = topcount; CorrelateDataSubtract<Dtype><<<CAFFE_GET_BLOCKS(topThreadCount), CAFFE_CUDA_NUM_THREADS>>>( topThreadCount, num, n, top_width_, top_height_, top_channels_, topcount, max_displacement_, neighborhood_grid_radius_, neighborhood_grid_width_, kernel_radius_, stride1_, stride2_, width, height, channels, rbot1_->gpu_data(), rbot2_->gpu_data(), top[0]->mutable_gpu_data() ); CUDA_POST_KERNEL_CHECK; } } } template <typename Dtype> void CorrelationLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) { // Get top diff, compute bottom diff const Dtype* top_diff = top[0]->gpu_diff(); Dtype* bottom0_diff = bottom[0]->mutable_gpu_diff(); Dtype* bottom1_diff = bottom[1]->mutable_gpu_diff(); const Dtype* bottom0_data = bottom[0]->gpu_data(); const Dtype* bottom1_data = bottom[1]->gpu_data(); const int num = bottom[0]->num(); const int channels = bottom[0]->channels(); const int height = bottom[0]->height(); const int width = bottom[0]->width(); const int paddedheight = height + 2*pad_size_; const int paddedwidth = width + 2*pad_size_; const int bottomcount = channels * height * width; int botThreadCount = bottomcount; // CorrelationLayerBackward bottom0_diff = bottom[0]->mutable_gpu_diff(); bottom1_diff = bottom[1]->mutable_gpu_diff(); if(corr_type_ == CorrelationParameter_CorrelationType_MULTIPLY) { // == Run kernel Backward 0 dim3 totalBlocksBackward0(width, height, channels * num); //First dim is fastest dim3 threadsPerBlockBackward0(THREADS_PER_WARP * WARPS_PER_BLOCK); const int buffer_size_backw0 = ((int)ceil((float)(2 * kernel_radius_) / (float)stride1_) + 1) * top_channels_; // == Run kernel Backward 0 for(int n = 0; n < num; n++) { //Bottom0: CorrelateDataBackward0<Dtype><<<CAFFE_GET_BLOCKS(botThreadCount), CAFFE_CUDA_NUM_THREADS>>>( botThreadCount, num, n, top_width_, top_height_, top_channels_, max_displacement_, neighborhood_grid_radius_, neighborhood_grid_width_, kernel_radius_, stride1_, stride2_, width, height, paddedwidth, paddedheight, channels, bottomcount, pad_size_, bottom0_diff, rbot2_->gpu_data(), top_diff ); CUDA_POST_KERNEL_CHECK; } // == Run kernel Backward 1 for(int n = 0; n < num; n++) { CorrelateDataBackward1<Dtype><<<CAFFE_GET_BLOCKS(botThreadCount), CAFFE_CUDA_NUM_THREADS>>>( botThreadCount, num, n, top_width_, top_height_, top_channels_, max_displacement_, neighborhood_grid_radius_, neighborhood_grid_width_, kernel_radius_, stride1_, stride2_, width, height, paddedwidth, paddedheight, channels, bottomcount, pad_size_, rbot1_->gpu_data(), bottom1_diff, top_diff ); CUDA_POST_KERNEL_CHECK; } } else if(corr_type_ == CorrelationParameter_CorrelationType_SUBTRACT) { for(int n = 0; n < num; n++) { //Bottom0: CorrelateDataBackward0Subtract<Dtype><<<CAFFE_GET_BLOCKS(botThreadCount), CAFFE_CUDA_NUM_THREADS>>>( botThreadCount, num, n, top_width_, top_height_, top_channels_, max_displacement_, neighborhood_grid_radius_, neighborhood_grid_width_, kernel_radius_, stride1_, stride2_, width, height, paddedwidth, paddedheight, channels, bottomcount, pad_size_, bottom0_diff, rbot1_->gpu_data(), rbot2_->gpu_data(), top_diff ); CUDA_POST_KERNEL_CHECK; } for(int n = 0; n < num; n++) { //Bottom0: CorrelateDataBackward1Subtract<Dtype><<<CAFFE_GET_BLOCKS(botThreadCount), CAFFE_CUDA_NUM_THREADS>>>( botThreadCount, num, n, top_width_, top_height_, top_channels_, max_displacement_, neighborhood_grid_radius_, neighborhood_grid_width_, kernel_radius_, stride1_, stride2_, width, height, paddedwidth, paddedheight, channels, bottomcount, pad_size_, rbot1_->gpu_data(), rbot2_->gpu_data(), bottom1_diff, top_diff ); CUDA_POST_KERNEL_CHECK; } } } INSTANTIATE_LAYER_GPU_FUNCS(CorrelationLayer); } // namespace caffe
the_stack
#include <Eigen/Core> #include <df/camera/poly3.h> // TODO #include <df/transform/rigid.h> #include <df/transform/nonrigid.h> #include <df/util/cudaHelpers.h> #include <df/util/dualQuaternion.h> // TODO #include <df/voxel/color.h> #include <df/voxel/probability.h> #include <df/voxel/compositeVoxel.h> #include <df/voxel/tsdf.h> #include <df/voxel/voxelGrid.h> //TODO #include <stdio.h> namespace df { template <typename ... NonTsdfVoxelTs> struct RequiresColorFrameFusion; template <> struct RequiresColorFrameFusion<> { static constexpr bool Value = false; }; template <typename HeadVoxelT, typename ... TailVoxelTs> struct RequiresColorFrameFusion<HeadVoxelT,TailVoxelTs...> { static constexpr bool Value = internal::FusionTypeTraits<HeadVoxelT>::frame == internal::ColorFrame || RequiresColorFrameFusion<TailVoxelTs...>::Value; }; template <typename Scalar,typename ... NonTsdfVoxelTs> struct NoColorFusionHandler { __device__ inline void doColorFusion(CompositeVoxel<Scalar,TsdfVoxel,NonTsdfVoxelTs...> &, const Eigen::Matrix<Scalar,3,1,Eigen::DontAlign> &, const Scalar, typename internal::FusionTypeTraits<NonTsdfVoxelTs>::template PackedInput<Scalar> ...) const { } }; template <typename Scalar, typename VoxelT, typename CompositeVoxelT> struct SingleColorFusionHandler { typedef Eigen::Matrix<Scalar,2,1,Eigen::DontAlign> Vec2; __device__ inline void doFusion(DeviceVoxelGrid<Scalar,CompositeVoxelT> & voxel, const Vec2 colorFrameProjection, const Scalar signedDistance, const typename internal::FusionTypeTraits<VoxelT>::template PackedInput<Scalar> & voxelInput) { } }; template <typename Scalar, typename CompositeVoxelT> struct SingleColorFusionHandler<Scalar,ColorVoxel, CompositeVoxelT> { typedef Eigen::Matrix<Scalar,2,1,Eigen::DontAlign> Vec2; typedef Eigen::Matrix<Scalar,3,1,Eigen::DontAlign> Vec3; __device__ static inline void doFusion(CompositeVoxelT & voxel, const Vec2 colorFrameProjection, const Scalar signedDistance, const typename internal::FusionTypeTraits<ColorVoxel>::template PackedInput<Scalar> & voxelInput) { if (fabs(signedDistance) < voxelInput.truncationDistance) { const DeviceTensor2<Vec3> & colorImage = voxelInput.colorImage; if (voxelInput.colorImage.inBounds(colorFrameProjection(0),colorFrameProjection(1),Scalar(2))) { const Vec3 color = colorImage.interpolate(colorFrameProjection(0),colorFrameProjection(1)); voxel.template fuse<ColorVoxel>(color,1.f,voxelInput.maxWeight); } } } }; template <typename Scalar, typename CompositeVoxelT> struct SingleColorFusionHandler<Scalar, ProbabilityVoxel, CompositeVoxelT> { typedef Eigen::Matrix<Scalar,2,1,Eigen::DontAlign> Vec2; typedef Eigen::Matrix<Scalar,10,1,Eigen::DontAlign> Vec; __device__ static inline void doFusion(CompositeVoxelT & voxel, const Vec2 colorFrameProjection, const Scalar signedDistance, const typename internal::FusionTypeTraits<ProbabilityVoxel>::template PackedInput<Scalar> & voxelInput) { if (fabs(signedDistance) < voxelInput.truncationDistance) { const DeviceTensor2<Vec> & colorImage = voxelInput.colorImage; if (voxelInput.colorImage.inBounds(colorFrameProjection(0),colorFrameProjection(1),Scalar(2))) { const Vec color = colorImage.interpolate(colorFrameProjection(0),colorFrameProjection(1)); voxel.template fuse<ProbabilityVoxel>(color,1.f,voxelInput.maxWeight); } } } }; template <typename Scalar, typename CompositeVoxelT, typename ... NonTsdfVoxelTs> struct ColorFusionForLoop; template <typename Scalar, typename CompositeVoxelT, typename HeadVoxelT, typename ... TailVoxelTs> struct ColorFusionForLoop<Scalar,CompositeVoxelT,HeadVoxelT,TailVoxelTs...> { typedef Eigen::Matrix<Scalar,2,1,Eigen::DontAlign> Vec2; __device__ static inline void doFusion(CompositeVoxelT & voxel, const Vec2 & colorFrameProjection, const Scalar signedDistance, typename internal::FusionTypeTraits<HeadVoxelT>::PackedInput<Scalar> headInput, typename internal::FusionTypeTraits<TailVoxelTs>::template PackedInput<Scalar> ... tailInputs) { SingleColorFusionHandler<Scalar,HeadVoxelT,CompositeVoxelT>::doFusion(voxel,colorFrameProjection,signedDistance,headInput); ColorFusionForLoop<Scalar,CompositeVoxelT,TailVoxelTs...>::doFusion(voxel,colorFrameProjection,signedDistance,tailInputs...); } }; template <typename Scalar, typename CompositeVoxelT> struct ColorFusionForLoop<Scalar,CompositeVoxelT> { typedef Eigen::Matrix<Scalar,2,1,Eigen::DontAlign> Vec2; __device__ static inline void doFusion(CompositeVoxelT & voxel, const Vec2 & colorFrameProjection, const Scalar signedDistance) { } }; template <typename Scalar, typename ColorCameraModelT, typename ... NonTsdfVoxelTs> struct ColorFusionHandler { const Sophus::SE3<Scalar> T_cd; const ColorCameraModelT colorCameraModel; typedef Eigen::Matrix<Scalar,2,1,Eigen::DontAlign> Vec2; typedef Eigen::Matrix<Scalar,3,1,Eigen::DontAlign> Vec3; __device__ inline void doColorFusion(CompositeVoxel<Scalar,TsdfVoxel,NonTsdfVoxelTs...> & voxel, const Vec3 & liveDepthCoord, const Scalar signedDistance, typename internal::FusionTypeTraits<NonTsdfVoxelTs>::template PackedInput<Scalar> ... nonTsdfInput) const { const Vec3 liveColorCoord = T_cd * liveDepthCoord; const Vec2 projectedColorCoord = colorCameraModel.project(liveColorCoord); ColorFusionForLoop<Scalar,CompositeVoxel<Scalar,TsdfVoxel,NonTsdfVoxelTs...>,NonTsdfVoxelTs...>::doFusion(voxel, projectedColorCoord, signedDistance, nonTsdfInput...); } }; template <typename Scalar, typename TransformerT, typename DepthCameraModelT, typename DepthT, typename ColorFusionHandlerT, typename ... NonTsdfVoxelTs> __global__ void fuseFrameKernel(DeviceVoxelGrid<Scalar,CompositeVoxel<Scalar,TsdfVoxel,NonTsdfVoxelTs...> > voxelGrid, const typename TransformerT::DeviceModule transformer, const DepthCameraModelT depthCameraModel, const Scalar truncationDistance, const DeviceTensor2<DepthT> depthMap, const ColorFusionHandlerT colorFusionHandler, typename internal::FusionTypeTraits<NonTsdfVoxelTs>::template PackedInput<Scalar> ... nonTsdfInput) { typedef Eigen::Matrix<Scalar,3,1,Eigen::DontAlign> Vec3; typedef Eigen::Matrix<Scalar,2,1,Eigen::DontAlign> Vec2; typedef Eigen::Matrix<int,3,1,Eigen::DontAlign> Vec3i; typedef Eigen::Matrix<int,2,1,Eigen::DontAlign> Vec2i; static constexpr Scalar border = Scalar(5); static constexpr Scalar maxWeight = Scalar(50); const int x = threadIdx.x + blockIdx.x * blockDim.x; const int y = threadIdx.y + blockIdx.y * blockDim.y; // TODO for (int z = threadIdx.z; z < voxelGrid.size(2); z += blockDim.z) { const Vec3i gridCoord(x,y,z); const Vec3 worldCoord = voxelGrid.gridToWorld(gridCoord); const Vec3 liveCoord = transformer.transformWorldToLive(worldCoord); // printf("%f,%f,%f\n",liveCoord(0),liveCoord(1),liveCoord(2)); if (liveCoord(2) <= 0) { // the point is behind the camera; continue; } const Vec2 liveProjection = depthCameraModel.project(liveCoord); if (!depthMap.inBounds(liveProjection,border)) { // the point is out-of-frame continue; } // TODO: use bilinear interpolation? // TODO: or take min of 4 surrounding depths? const Vec2i discretizedProjection = round(liveProjection); const DepthT d = depthMap(discretizedProjection); if (d <= DepthT(0)) { // no depth measurement continue; } const Scalar signedDistance = d - liveCoord(2); if (signedDistance < -truncationDistance) { // the point is too far behind the observation continue; } const Scalar truncatedSignedDistance = signedDistance > truncationDistance ? truncationDistance : signedDistance; CompositeVoxel<Scalar,TsdfVoxel,NonTsdfVoxelTs...> & voxel = voxelGrid(x,y,z); voxel.template fuse<TsdfVoxel>(truncatedSignedDistance,1.f,maxWeight); colorFusionHandler.doColorFusion(voxel,liveCoord,signedDistance,nonTsdfInput...); } } template <typename Scalar, typename TransformerT, typename CameraModelT, typename DepthT, typename ... NonTsdfVoxelTs> void fuseFrame(DeviceVoxelGrid<Scalar,CompositeVoxel<Scalar,TsdfVoxel,NonTsdfVoxelTs...> > & voxelGrid, const TransformerT & transformer, const CameraModelT & cameraModel, const DeviceTensor2<DepthT> & depthMap, const Scalar truncationDistance, typename internal::FusionTypeTraits<NonTsdfVoxelTs>::template PackedInput<Scalar> ... nonTsdfInput) { dim3 block(16,16,4); dim3 grid(intDivideAndCeil(voxelGrid.size(0),block.x), intDivideAndCeil(voxelGrid.size(1),block.y), 1); static_assert(!RequiresColorFrameFusion<NonTsdfVoxelTs...>::Value, "this function is for fusion into depth frame only"); fuseFrameKernel<Scalar,TransformerT,CameraModelT,DepthT,NoColorFusionHandler<Scalar,NonTsdfVoxelTs...>,NonTsdfVoxelTs...><<<grid,block>>> (voxelGrid,transformer.deviceModule(),cameraModel, truncationDistance,depthMap, NoColorFusionHandler<Scalar,NonTsdfVoxelTs...>(), nonTsdfInput ...); cudaDeviceSynchronize(); CheckCudaDieOnError(); } template <typename Scalar, typename TransformerT, typename DepthCameraModelT, typename ColorCameraModelT, typename DepthT, typename ... NonTsdfVoxelTs> void fuseFrame(DeviceVoxelGrid<Scalar,CompositeVoxel<Scalar,TsdfVoxel,NonTsdfVoxelTs...> > & voxelGrid, const TransformerT & transformer, const DepthCameraModelT & depthCameraModel, const ColorCameraModelT & colorCameraModel, const Sophus::SE3<Scalar> & T_cd, const DeviceTensor2<DepthT> & depthMap, const Scalar truncationDistance, typename internal::FusionTypeTraits<NonTsdfVoxelTs>::template PackedInput<Scalar> ... nonTsdfInput) { dim3 block(16,16,4); dim3 grid(intDivideAndCeil(voxelGrid.size(0),block.x), intDivideAndCeil(voxelGrid.size(1),block.y), 1); static_assert(RequiresColorFrameFusion<NonTsdfVoxelTs...>::Value, "this function is for fusion into both depth and color frame"); fuseFrameKernel<Scalar,TransformerT,DepthCameraModelT,DepthT,ColorFusionHandler<Scalar,ColorCameraModelT,NonTsdfVoxelTs...>,NonTsdfVoxelTs...><<<grid,block>>> (voxelGrid,transformer.deviceModule(),depthCameraModel, truncationDistance,depthMap, { T_cd, colorCameraModel }, nonTsdfInput ...); cudaDeviceSynchronize(); CheckCudaDieOnError(); } template void fuseFrame(DeviceVoxelGrid<float,CompositeVoxel<float,TsdfVoxel> > &, const RigidTransformer<float> &, const Poly3CameraModel<float> &, const DeviceTensor2<float> &, const float); template void fuseFrame(DeviceVoxelGrid<float,CompositeVoxel<float,TsdfVoxel> > &, const NonrigidTransformer<float,DualQuaternion> &, const Poly3CameraModel<float> &, const DeviceTensor2<float> &, const float); template void fuseFrame(DeviceVoxelGrid<float,CompositeVoxel<float,TsdfVoxel> > &, const NonrigidTransformer<float,Sophus::SE3> &, const Poly3CameraModel<float> &, const DeviceTensor2<float> &, const float); template void fuseFrame(DeviceVoxelGrid<float,CompositeVoxel<float,TsdfVoxel,ColorVoxel> > &, const RigidTransformer<float> &, const Poly3CameraModel<float> &, const Poly3CameraModel<float> &, const Sophus::SE3f &, const DeviceTensor2<float> &, const float, typename internal::FusionTypeTraits<ColorVoxel>::PackedInput<float> ); template void fuseFrame(DeviceVoxelGrid<float,CompositeVoxel<float,TsdfVoxel,ColorVoxel> > &, const NonrigidTransformer<float,DualQuaternion> &, const Poly3CameraModel<float> &, const Poly3CameraModel<float> &, const Sophus::SE3f &, const DeviceTensor2<float> &, const float, typename internal::FusionTypeTraits<ColorVoxel>::PackedInput<float>); template void fuseFrame(DeviceVoxelGrid<float,CompositeVoxel<float,TsdfVoxel,ColorVoxel> > &, const NonrigidTransformer<float,Sophus::SE3> &, const Poly3CameraModel<float> &, const Poly3CameraModel<float> &, const Sophus::SE3f &, const DeviceTensor2<float> &, const float, typename internal::FusionTypeTraits<ColorVoxel>::PackedInput<float>); template void fuseFrame(DeviceVoxelGrid<float,CompositeVoxel<float,TsdfVoxel,ProbabilityVoxel> > &, const RigidTransformer<float> &, const Poly3CameraModel<float> &, const Poly3CameraModel<float> &, const Sophus::SE3f &, const DeviceTensor2<float> &, const float, typename internal::FusionTypeTraits<ProbabilityVoxel>::PackedInput<float> ); } // namespace df
the_stack
static __thread unsigned long* err_addr; static __thread unsigned long* err_expect; static __thread unsigned long* err_current; static __thread unsigned long* err_second_read; static __thread unsigned int* err_count; static __thread unsigned int unreported_errors=0; __thread struct timeval last_report_time; extern unsigned int report_interval; __thread unsigned int firsttime=1; __thread char time_buf[128]; extern unsigned exit_on_error ; extern unsigned int email_notification; extern char emails[]; extern unsigned int global_pattern; extern unsigned long global_pattern_long; extern unsigned int num_iterations; extern unsigned int num_passes; extern char driver_info[MAX_STR_LEN]; #define MAX_ERR_RECORD_COUNT 10 #ifdef SM_10 #define atomicAdd(x, y) do{ (*x) = (*x) + y ;}while(0) #define RECORD_ERR(err, p, expect, current) do{ \ atomicAdd(err, 1); \ }while(0) #else #define RECORD_ERR(err, p, expect, current) do{ \ unsigned int idx = atomicAdd(err, 1); \ idx = idx % MAX_ERR_RECORD_COUNT; \ err_addr[idx] = (unsigned long)p; \ err_expect[idx] = (unsigned long)expect; \ err_current[idx] = (unsigned long)current; \ err_second_read[idx] = (unsigned long)(*p); \ }while(0) #endif #define MAX_ITERATION 3 char* time_string(void) { struct timeval tv; gettimeofday(&tv, NULL); struct tm tm; if (localtime_r(&tv.tv_sec, &tm) == NULL){ fprintf(stderr, "ERROR: in getting time\n"); exit(ERR_GENERAL); } sprintf(time_buf, "%02d/%02d/%04d %02d:%02d:%02d", tm.tm_mon + 1, tm.tm_mday, tm.tm_year + 1900,tm.tm_hour, tm.tm_min, tm.tm_sec); return time_buf; } unsigned int error_checking(const char* msg, unsigned int blockidx) { unsigned int err = 0; unsigned long host_err_addr[MAX_ERR_RECORD_COUNT]; unsigned long host_err_expect[MAX_ERR_RECORD_COUNT]; unsigned long host_err_current[MAX_ERR_RECORD_COUNT]; unsigned long host_err_second_read[MAX_ERR_RECORD_COUNT]; unsigned int i; cudaMemcpy((void*)&err, (void*)err_count, sizeof(unsigned int), cudaMemcpyDeviceToHost);CUERR; cudaMemcpy((void*)&host_err_addr[0], (void*)err_addr, sizeof(unsigned long)*MAX_ERR_RECORD_COUNT, cudaMemcpyDeviceToHost);CUERR; cudaMemcpy((void*)&host_err_expect[0], (void*)err_expect, sizeof(unsigned long)*MAX_ERR_RECORD_COUNT, cudaMemcpyDeviceToHost);CUERR; cudaMemcpy((void*)&host_err_current[0], (void*)err_current, sizeof(unsigned long)*MAX_ERR_RECORD_COUNT, cudaMemcpyDeviceToHost);CUERR; cudaMemcpy((void*)&host_err_second_read[0], (void*)err_second_read, sizeof(unsigned long)*MAX_ERR_RECORD_COUNT, cudaMemcpyDeviceToHost);CUERR; #define ERR_MSG_LENGTH 4096 char error_msg[ERR_MSG_LENGTH]; char* emsg = error_msg; if (err){ emsg += sprintf(emsg, "Unreported errors since last email: %d\n", unreported_errors); FPRINTF("ERROR: %s", driver_info); emsg += sprintf(emsg, "ERROR: %s", driver_info); #if !defined(NVML_DEVICE_SERIAL_BUFFER_SIZE) char devSerialNum[] = "unknown (no NVML found)"; #else char devSerialNum[NVML_DEVICE_SERIAL_BUFFER_SIZE]; get_serial_number( gpu_idx, devSerialNum ); #endif FPRINTF("ERROR: The unit serial number is %s\n", devSerialNum); emsg += sprintf(emsg, "ERROR: The unit serial number is %s\n", devSerialNum); FPRINTF("ERROR: (%s) %d errors found in block %d\n", msg, err, blockidx); emsg += sprintf(emsg, "ERROR: (%s) %d errors found in block %d\n", msg, err, blockidx); FPRINTF("ERROR: the last %d error addresses are:\t", MIN(MAX_ERR_RECORD_COUNT, err)); emsg += sprintf(emsg, "ERROR: the last %d error addresses are:\t", MIN(MAX_ERR_RECORD_COUNT, err)); for (i =0;i < MIN(MAX_ERR_RECORD_COUNT, err); i++){ fprintf(stderr, "%p\t", (void*)host_err_addr[i]); emsg += sprintf(emsg, "%p\t", (void*)host_err_addr[i]); } fprintf(stderr, "\n"); emsg += sprintf(emsg, "\n"); for (i =0; i < MIN(MAX_ERR_RECORD_COUNT, err); i++){ FPRINTF("ERROR: %dth error, expected value=0x%lx, current value=0x%lx, diff=0x%lx (second_read=0x%lx, expect=0x%lx, diff with expected value=0x%lx)\n", i, host_err_expect[i], host_err_current[i], host_err_expect[i] ^ host_err_current[i], host_err_second_read[i], host_err_expect[i] , host_err_expect[i] ^ host_err_second_read[i]); emsg += sprintf(emsg, "ERROR: %dth error, expected value=0x%lx, current value=0x%lx, diff=0x%lx (second_read=0x%lx, expect=0x%lx, diff with expected value=0x%lx)\n", i, host_err_expect[i], host_err_current[i], host_err_expect[i] ^ host_err_current[i], host_err_second_read[i], host_err_expect[i], host_err_expect[i] ^ host_err_second_read[i]); } if (email_notification){ struct timeval tv; gettimeofday(&tv, NULL); if ( firsttime || TDIFF(tv, last_report_time) > report_interval) { FPRINTF("ERROR: reporting this error to %s\n", emails); #define CMD_LENGTH (ERR_MSG_LENGTH + 256) char cmd[CMD_LENGTH]; error_msg[ERR_MSG_LENGTH -1] = 0; snprintf(cmd, CMD_LENGTH, "echo \"%s cuda_memtest errors found in %s[%d]\n%s\" |%s -s \" cuda_memtest errors found in %s[%d]\" %s", time_string(), hostname,gpu_idx, error_msg, MAILFILE, hostname,gpu_idx, emails ); int sendMailResult = system(cmd); if(sendMailResult != 0){ FPRINTF("ERROR: can not send the error report via mail to %s, '%s' returned an error\n", emails, MAILFILE); } firsttime = 0; unreported_errors = 0; last_report_time = tv; }else{ FPRINTF("ERROR: this error is not email reported\n"); unreported_errors ++; } } cudaMemset(err_count, 0, sizeof(unsigned int));CUERR; cudaMemset((void*)&err_addr[0], 0, sizeof(unsigned long)*MAX_ERR_RECORD_COUNT);CUERR; cudaMemset((void*)&err_expect[0], 0, sizeof(unsigned long)*MAX_ERR_RECORD_COUNT);CUERR; cudaMemset((void*)&err_current[0], 0, sizeof(unsigned long)*MAX_ERR_RECORD_COUNT);CUERR; if (exit_on_error){ cudaDeviceReset(); exit(ERR_BAD_STATE); } } return err; } unsigned int get_random_num(void) { struct timeval t0; if (gettimeofday(&t0, NULL) !=0){ fprintf(stderr, "ERROR: gettimeofday() failed\n"); exit(ERR_GENERAL); } unsigned int seed= (unsigned int)t0.tv_sec; srand(seed); return rand_r(&seed); } uint64_t get_random_num_long(void) { struct timeval t0; if (gettimeofday(&t0, NULL) !=0){ fprintf(stderr, "ERROR: gettimeofday() failed\n"); exit(ERR_GENERAL); } unsigned int seed= (unsigned int)t0.tv_sec; srand(seed); unsigned int a = rand_r(&seed); unsigned int b = rand_r(&seed); uint64_t ret = ((uint64_t)a) << 32; ret |= ((uint64_t)b); return ret; } __global__ void kernel_move_inv_write(char* _ptr, char* end_ptr, unsigned int pattern) { unsigned int i; unsigned int* ptr = (unsigned int*) (_ptr + blockIdx.x*BLOCKSIZE); if (ptr >= (unsigned int*) end_ptr) { return; } for (i = 0;i < BLOCKSIZE/sizeof(unsigned int); i++){ ptr[i] = pattern; } return; } __global__ void kernel_move_inv_readwrite(char* _ptr, char* end_ptr, unsigned int p1, unsigned int p2, unsigned int* err, unsigned long* err_addr, unsigned long* err_expect, unsigned long* err_current, unsigned long* err_second_read) { unsigned int i; unsigned int* ptr = (unsigned int*) (_ptr + blockIdx.x*BLOCKSIZE); if (ptr >= (unsigned int*) end_ptr) { return; } for (i = 0;i < BLOCKSIZE/sizeof(unsigned int); i++){ if (ptr[i] != p1){ RECORD_ERR(err, &ptr[i], p1, ptr[i]); } ptr[i] = p2; } return; } __global__ void kernel_move_inv_read(char* _ptr, char* end_ptr, unsigned int pattern, unsigned int* err, unsigned long* err_addr, unsigned long* err_expect, unsigned long* err_current, unsigned long* err_second_read ) { unsigned int i; unsigned int* ptr = (unsigned int*) (_ptr + blockIdx.x*BLOCKSIZE); if (ptr >= (unsigned int*) end_ptr) { return; } for (i = 0;i < BLOCKSIZE/sizeof(unsigned int); i++){ if (ptr[i] != pattern){ RECORD_ERR(err, &ptr[i], pattern, ptr[i]); } } return; } unsigned int move_inv_test(char* ptr, unsigned int tot_num_blocks, unsigned int p1, unsigned p2) { unsigned int i; unsigned int err = 0; char* end_ptr = ptr + tot_num_blocks* BLOCKSIZE; for (i= 0;i < tot_num_blocks; i+= GRIDSIZE){ dim3 grid; grid.x= GRIDSIZE; kernel_move_inv_write<<<grid, 1>>>(ptr + i*BLOCKSIZE, end_ptr, p1); SYNC_CUERR; SHOW_PROGRESS("move_inv_write", i, tot_num_blocks); } for (i=0;i < tot_num_blocks; i+= GRIDSIZE){ dim3 grid; grid.x= GRIDSIZE; kernel_move_inv_readwrite<<<grid, 1>>>(ptr + i*BLOCKSIZE, end_ptr, p1, p2, err_count, err_addr, err_expect, err_current, err_second_read); SYNC_CUERR; err += error_checking("move_inv_readwrite", i); SHOW_PROGRESS("move_inv_readwrite", i, tot_num_blocks); } for (i=0;i < tot_num_blocks; i+= GRIDSIZE){ dim3 grid; grid.x= GRIDSIZE; kernel_move_inv_read<<<grid, 1>>>(ptr + i*BLOCKSIZE, end_ptr, p2, err_count, err_addr, err_expect, err_current, err_second_read); SYNC_CUERR; err += error_checking("move_inv_read", i); SHOW_PROGRESS("move_inv_read", i, tot_num_blocks); } return err; } /* * Test0 [Walking 1 bit] * This test changes one bit a time in memory address to see it * goes to a different memory location. It is designed to test * the address wires. */ /* __global__ void kernel_test0_write(char* _ptr, char* end_ptr, unsigned int pattern, unsigned int* err, unsigned long* err_addr, unsigned long* err_expect, unsigned long* err_current, unsigned long* err_second_read) { unsigned int i; unsigned int* ptr = (unsigned int*) (_ptr + blockIdx.x*BLOCKSIZE); if (ptr >= (unsigned int*) end_ptr) { return; } for (i = 0;i < BLOCKSIZE/sizeof(unsigned int); i++){ ptr[i] = pattern; } return; } __global__ void kernel_test0_readwrite(char* _ptr, char* end_ptr, unsigned int pattern, unsigned int prev_pattern, unsigned int* err, unsigned long* err_addr, unsigned long* err_expect, unsigned long* err_current, unsigned long* err_second_read) { unsigned int i; unsigned int* ptr = (unsigned int*) (_ptr + blockIdx.x*BLOCKSIZE); if (ptr >= (unsigned int*) end_ptr) { return; } for (i = 0;i < BLOCKSIZE/sizeof(unsigned int); i++){ if (ptr[i] != prev_pattern){ RECORD_ERR(err, &ptr[i], prev_pattern, ptr[i]); } ptr[i] = pattern; } return; } void test0(char* ptr, unsigned int tot_num_blocks) { unsigned int i,j; char* end_ptr = ptr + tot_num_blocks* BLOCKSIZE; for (i=0;i < tot_num_blocks; i+= GRIDSIZE){ dim3 grid; grid.x= GRIDSIZE; unsigned int prev_pattern = 0; //the first iteration unsigned int pattern = 1; kernel_test0_write<<<grid, 1>>>(ptr + i*BLOCKSIZE, end_ptr, pattern, err_count, err_addr, err_expect, err_current, err_second_read); CUERR; prev_pattern =pattern; for (j =1; j < 32; j++){ pattern = 1 << j; kernel_test0_readwrite<<<grid, 1>>>(ptr + i*BLOCKSIZE, end_ptr, pattern, prev_pattern, err_count, err_addr, err_expect, err_current, err_second_read); CUERR; prev_pattern = pattern; } error_checking(__FUNCTION__, i); SHOW_PROGRESS(__FUNCTION__, i, tot_num_blocks); } return; } */ __global__ void kernel_test0_global_write(char* _ptr, char* _end_ptr) { unsigned int* ptr = (unsigned int*)_ptr; unsigned int* end_ptr = (unsigned int*)_end_ptr; unsigned int* orig_ptr = ptr; unsigned int pattern = 1; unsigned long mask = 4; *ptr = pattern; while(ptr < end_ptr){ ptr = (unsigned int*) ( ((unsigned long)orig_ptr) | mask); if (ptr == orig_ptr){ mask = mask <<1; continue; } if (ptr >= end_ptr){ break; } *ptr = pattern; pattern = pattern << 1; mask = mask << 1; } return; } __global__ void kernel_test0_global_read(char* _ptr, char* _end_ptr, unsigned int* err, unsigned long* err_addr, unsigned long* err_expect, unsigned long* err_current, unsigned long* err_second_read) { unsigned int* ptr = (unsigned int*)_ptr; unsigned int* end_ptr = (unsigned int*)_end_ptr; unsigned int* orig_ptr = ptr; unsigned int pattern = 1; unsigned long mask = 4; if (*ptr != pattern){ RECORD_ERR(err, ptr, pattern, *ptr); } while(ptr < end_ptr){ ptr = (unsigned int*) ( ((unsigned long)orig_ptr) | mask); if (ptr == orig_ptr){ mask = mask <<1; continue; } if (ptr >= end_ptr){ break; } if (*ptr != pattern){ RECORD_ERR(err, ptr, pattern, *ptr); } pattern = pattern << 1; mask = mask << 1; } return; } __global__ void kernel_test0_write(char* _ptr, char* end_ptr) { unsigned int* orig_ptr = (unsigned int*) (_ptr + blockIdx.x*BLOCKSIZE);; unsigned int* ptr = orig_ptr; if (ptr >= (unsigned int*) end_ptr) { return; } unsigned int* block_end = orig_ptr + BLOCKSIZE/sizeof(unsigned int); unsigned int pattern = 1; unsigned long mask = 4; *ptr = pattern; while(ptr < block_end){ ptr = (unsigned int*) ( ((unsigned long)orig_ptr) | mask); if (ptr == orig_ptr){ mask = mask <<1; continue; } if (ptr >= block_end){ break; } *ptr = pattern; pattern = pattern << 1; mask = mask << 1; } return; } __global__ void kernel_test0_read(char* _ptr, char* end_ptr, unsigned int* err, unsigned long* err_addr, unsigned long* err_expect, unsigned long* err_current, unsigned long* err_second_read) { unsigned int* orig_ptr = (unsigned int*) (_ptr + blockIdx.x*BLOCKSIZE);; unsigned int* ptr = orig_ptr; if (ptr >= (unsigned int*) end_ptr) { return; } unsigned int* block_end = orig_ptr + BLOCKSIZE/sizeof(unsigned int); unsigned int pattern = 1; unsigned long mask = 4; if (*ptr != pattern){ RECORD_ERR(err, ptr, pattern, *ptr); } while(ptr < block_end){ ptr = (unsigned int*) ( ((unsigned long)orig_ptr) | mask); if (ptr == orig_ptr){ mask = mask <<1; continue; } if (ptr >= block_end){ break; } if (*ptr != pattern){ RECORD_ERR(err, ptr, pattern, *ptr); } pattern = pattern << 1; mask = mask << 1; } return; } void test0(char* ptr, unsigned int tot_num_blocks) { unsigned int i; char* end_ptr = ptr + tot_num_blocks* BLOCKSIZE; //test global address kernel_test0_global_write<<<1, 1>>>(ptr, end_ptr); SYNC_CUERR; kernel_test0_global_read<<<1, 1>>>(ptr, end_ptr, err_count, err_addr, err_expect, err_current, err_second_read); SYNC_CUERR; error_checking("test0 on global address", 0); for(unsigned int ite = 0;ite < num_iterations; ite++){ for (i=0;i < tot_num_blocks; i+= GRIDSIZE){ dim3 grid; grid.x= GRIDSIZE; kernel_test0_write<<<grid, 1>>>(ptr + i*BLOCKSIZE, end_ptr); SYNC_CUERR; SHOW_PROGRESS("test0 on writing", i, tot_num_blocks); } for (i=0;i < tot_num_blocks; i+= GRIDSIZE){ dim3 grid; grid.x= GRIDSIZE; kernel_test0_read<<<grid, 1>>>(ptr + i*BLOCKSIZE, end_ptr, err_count, err_addr, err_expect, err_current, err_second_read); SYNC_CUERR; error_checking(__FUNCTION__, i); SHOW_PROGRESS("test0 on reading", i, tot_num_blocks); } } return; } /********************************************************************************* * test1 * Each Memory location is filled with its own address. The next kernel checks if the * value in each memory location still agrees with the address. * ********************************************************************************/ __global__ void kernel_test1_write(char* _ptr, char* end_ptr, unsigned int* err) { unsigned int i; unsigned long* ptr = (unsigned long*) (_ptr + blockIdx.x*BLOCKSIZE); if (ptr >= (unsigned long*) end_ptr) { return; } for (i = 0;i < BLOCKSIZE/sizeof(unsigned long); i++){ ptr[i] =(unsigned long) & ptr[i]; } return; } __global__ void kernel_test1_read(char* _ptr, char* end_ptr, unsigned int* err, unsigned long* err_addr, unsigned long* err_expect, unsigned long* err_current, unsigned long* err_second_read) { unsigned int i; unsigned long* ptr = (unsigned long*) (_ptr + blockIdx.x*BLOCKSIZE); if (ptr >= (unsigned long*) end_ptr) { return; } for (i = 0;i < BLOCKSIZE/sizeof(unsigned long); i++){ if (ptr[i] != (unsigned long)& ptr[i]){ RECORD_ERR(err, &ptr[i], (unsigned long)&ptr[i], ptr[i]); } } return; } void test1(char* ptr, unsigned int tot_num_blocks) { unsigned int i; char* end_ptr = ptr + tot_num_blocks* BLOCKSIZE; for (i=0;i < tot_num_blocks; i+= GRIDSIZE){ dim3 grid; grid.x= GRIDSIZE; kernel_test1_write<<<grid, 1>>>(ptr + i*BLOCKSIZE, end_ptr, err_count); SYNC_CUERR; SHOW_PROGRESS("test1 on writing", i, tot_num_blocks); } for (i=0;i < tot_num_blocks; i+= GRIDSIZE){ dim3 grid; grid.x= GRIDSIZE; kernel_test1_read<<<grid, 1>>>(ptr + i*BLOCKSIZE, end_ptr, err_count, err_addr, err_expect, err_current, err_second_read); SYNC_CUERR; error_checking("test1 on reading", i); SHOW_PROGRESS("test1 on reading", i, tot_num_blocks); } return; } /****************************************************************************** * Test 2 [Moving inversions, ones&zeros] * This test uses the moving inversions algorithm with patterns of all * ones and zeros. * ****************************************************************************/ void test2(char* ptr, unsigned int tot_num_blocks) { unsigned int p1 = 0; unsigned int p2 = ~p1; DEBUG_PRINTF("Test2: Moving inversions test, with pattern 0x%x and 0x%x\n", p1, p2); move_inv_test(ptr, tot_num_blocks, p1, p2); DEBUG_PRINTF("Test2: Moving inversions test, with pattern 0x%x and 0x%x\n", p2, p1); move_inv_test(ptr, tot_num_blocks, p2, p1); } /************************************************************************* * * Test 3 [Moving inversions, 8 bit pat] * This is the same as test 1 but uses a 8 bit wide pattern of * "walking" ones and zeros. This test will better detect subtle errors * in "wide" memory chips. * **************************************************************************/ void test3(char* ptr, unsigned int tot_num_blocks) { unsigned int p0=0x80; unsigned int p1 = p0 | (p0 << 8) | (p0 << 16) | (p0 << 24); unsigned int p2 = ~p1; DEBUG_PRINTF("Test3: Moving inversions test, with pattern 0x%x and 0x%x\n", p1, p2); move_inv_test(ptr, tot_num_blocks, p1, p2); DEBUG_PRINTF("Test3: Moving inversions test, with pattern 0x%x and 0x%x\n", p2, p1); move_inv_test(ptr, tot_num_blocks, p2, p1); } /************************************************************************************ * Test 4 [Moving inversions, random pattern] * Test 4 uses the same algorithm as test 1 but the data pattern is a * random number and it's complement. This test is particularly effective * in finding difficult to detect data sensitive errors. A total of 60 * patterns are used. The random number sequence is different with each pass * so multiple passes increase effectiveness. * *************************************************************************************/ void test4(char* ptr, unsigned int tot_num_blocks) { unsigned int p1; if (global_pattern == 0){ p1 = get_random_num(); }else{ p1 = global_pattern; } unsigned int p2 = ~p1; unsigned int err = 0; unsigned int iteration = 0; DEBUG_PRINTF("Test4: Moving inversions test, with random pattern 0x%x and 0x%x\n", p1, p2); repeat: err += move_inv_test(ptr, tot_num_blocks, p1, p2); if (err == 0 && iteration == 0){ return; } if (iteration < MAX_ITERATION){ PRINTF("%dth repeating test4 because there are %d errors found in last run\n", iteration, err); iteration++; err = 0; goto repeat; } } /************************************************************************************ * Test 5 [Block move, 64 moves] * This test stresses memory by moving block memories. Memory is initialized * with shifting patterns that are inverted every 8 bytes. Then blocks * of memory are moved around. After the moves * are completed the data patterns are checked. Because the data is checked * only after the memory moves are completed it is not possible to know * where the error occurred. The addresses reported are only for where the * bad pattern was found. * * *************************************************************************************/ __global__ void kernel_test5_init(char* _ptr, char* end_ptr) { unsigned int i; unsigned int* ptr = (unsigned int*) (_ptr + blockIdx.x*BLOCKSIZE); if (ptr >= (unsigned int*) end_ptr) { return; } unsigned int p1 = 1; for (i = 0;i < BLOCKSIZE/sizeof(unsigned int); i+=16){ unsigned int p2 = ~p1; ptr[i] = p1; ptr[i+1] = p1; ptr[i+2] = p2; ptr[i+3] = p2; ptr[i+4] = p1; ptr[i+5] = p1; ptr[i+6] = p2; ptr[i+7] = p2; ptr[i+8] = p1; ptr[i+9] = p1; ptr[i+10] = p2; ptr[i+11] = p2; ptr[i+12] = p1; ptr[i+13] = p1; ptr[i+14] = p2; ptr[i+15] = p2; p1 = p1<<1; if (p1 == 0){ p1 = 1; } } return; } __global__ void kernel_test5_move(char* _ptr, char* end_ptr) { unsigned int i; unsigned int* ptr = (unsigned int*) (_ptr + blockIdx.x*BLOCKSIZE); if (ptr >= (unsigned int*) end_ptr) { return; } unsigned int half_count = BLOCKSIZE/sizeof(unsigned int)/2; unsigned int* ptr_mid = ptr + half_count; for (i = 0;i < half_count; i++){ ptr_mid[i] = ptr[i]; } for (i=0;i < half_count - 8; i++){ ptr[i + 8] = ptr_mid[i]; } for (i=0;i < 8; i++){ ptr[i] = ptr_mid[half_count - 8 + i]; } return; } __global__ void kernel_test5_check(char* _ptr, char* end_ptr, unsigned int* err, unsigned long* err_addr, unsigned long* err_expect, unsigned long* err_current, unsigned long* err_second_read) { unsigned int i; unsigned int* ptr = (unsigned int*) (_ptr + blockIdx.x*BLOCKSIZE); if (ptr >= (unsigned int*) end_ptr) { return; } for (i=0;i < BLOCKSIZE/sizeof(unsigned int); i+=2){ if (ptr[i] != ptr[i+1]){ RECORD_ERR(err, &ptr[i], ptr[i+1], ptr[i]); } } return; } void test5(char* ptr, unsigned int tot_num_blocks) { unsigned int i; char* end_ptr = ptr + tot_num_blocks* BLOCKSIZE; for (i=0;i < tot_num_blocks; i+= GRIDSIZE){ dim3 grid; grid.x= GRIDSIZE; kernel_test5_init<<<grid, 1>>>(ptr + i*BLOCKSIZE, end_ptr); SYNC_CUERR; SHOW_PROGRESS("test5[init]", i, tot_num_blocks); } for (i=0;i < tot_num_blocks; i+= GRIDSIZE){ dim3 grid; grid.x= GRIDSIZE; kernel_test5_move<<<grid, 1>>>(ptr + i*BLOCKSIZE, end_ptr); SYNC_CUERR; SHOW_PROGRESS("test5[move]", i, tot_num_blocks); } for (i=0;i < tot_num_blocks; i+= GRIDSIZE){ dim3 grid; grid.x= GRIDSIZE; kernel_test5_check<<<grid, 1>>>(ptr + i*BLOCKSIZE, end_ptr, err_count, err_addr, err_expect, err_current, err_second_read); SYNC_CUERR; error_checking("test5[check]", i); SHOW_PROGRESS("test5[check]", i, tot_num_blocks); } return; } /***************************************************************************************** * Test 6 [Moving inversions, 32 bit pat] * This is a variation of the moving inversions algorithm that shifts the data * pattern left one bit for each successive address. The starting bit position * is shifted left for each pass. To use all possible data patterns 32 passes * are required. This test is quite effective at detecting data sensitive * errors but the execution time is long. * ***************************************************************************************/ __global__ void kernel_movinv32_write(char* _ptr, char* end_ptr, unsigned int pattern, unsigned int lb, unsigned int sval, unsigned int offset) { unsigned int i; unsigned int* ptr = (unsigned int*) (_ptr + blockIdx.x*BLOCKSIZE); if (ptr >= (unsigned int*) end_ptr) { return; } unsigned int k = offset; unsigned pat = pattern; for (i = 0;i < BLOCKSIZE/sizeof(unsigned int); i++){ ptr[i] = pat; k++; if (k >= 32){ k=0; pat = lb; }else{ pat = pat << 1; pat |= sval; } } return; } __global__ void kernel_movinv32_readwrite(char* _ptr, char* end_ptr, unsigned int pattern, unsigned int lb, unsigned int sval, unsigned int offset, unsigned int * err, unsigned long* err_addr, unsigned long* err_expect, unsigned long* err_current, unsigned long* err_second_read) { unsigned int i; unsigned int* ptr = (unsigned int*) (_ptr + blockIdx.x*BLOCKSIZE); if (ptr >= (unsigned int*) end_ptr) { return; } unsigned int k = offset; unsigned pat = pattern; for (i = 0;i < BLOCKSIZE/sizeof(unsigned int); i++){ if (ptr[i] != pat){ RECORD_ERR(err, &ptr[i], pat, ptr[i]); } ptr[i] = ~pat; k++; if (k >= 32){ k=0; pat = lb; }else{ pat = pat << 1; pat |= sval; } } return; } __global__ void kernel_movinv32_read(char* _ptr, char* end_ptr, unsigned int pattern, unsigned int lb, unsigned int sval, unsigned int offset, unsigned int * err, unsigned long* err_addr, unsigned long* err_expect, unsigned long* err_current, unsigned long* err_second_read) { unsigned int i; unsigned int* ptr = (unsigned int*) (_ptr + blockIdx.x*BLOCKSIZE); if (ptr >= (unsigned int*) end_ptr) { return; } unsigned int k = offset; unsigned pat = pattern; for (i = 0;i < BLOCKSIZE/sizeof(unsigned int); i++){ if (ptr[i] != ~pat){ RECORD_ERR(err, &ptr[i], ~pat, ptr[i]); } k++; if (k >= 32){ k=0; pat = lb; }else{ pat = pat << 1; pat |= sval; } } return; } void movinv32(char* ptr, unsigned int tot_num_blocks, unsigned int pattern, unsigned int lb, unsigned int sval, unsigned int offset) { unsigned int i; char* end_ptr = ptr + tot_num_blocks* BLOCKSIZE; for (i=0;i < tot_num_blocks; i+= GRIDSIZE){ dim3 grid; grid.x= GRIDSIZE; kernel_movinv32_write<<<grid, 1>>>(ptr + i*BLOCKSIZE, end_ptr, pattern, lb,sval, offset); SYNC_CUERR; SHOW_PROGRESS("test6[moving inversion 32 write]", i, tot_num_blocks); } for (i=0;i < tot_num_blocks; i+= GRIDSIZE){ dim3 grid; grid.x= GRIDSIZE; kernel_movinv32_readwrite<<<grid, 1>>>(ptr + i*BLOCKSIZE, end_ptr, pattern, lb,sval, offset, err_count, err_addr, err_expect, err_current, err_second_read); SYNC_CUERR; error_checking("test6[moving inversion 32 readwrite]", i); SHOW_PROGRESS("test6[moving inversion 32 readwrite]", i, tot_num_blocks); } for (i=0;i < tot_num_blocks; i+= GRIDSIZE){ dim3 grid; grid.x= GRIDSIZE; kernel_movinv32_read<<<grid, 1>>>(ptr + i*BLOCKSIZE, end_ptr, pattern, lb,sval, offset, err_count, err_addr, err_expect, err_current, err_second_read); SYNC_CUERR; error_checking("test6[moving inversion 32 read]", i); SHOW_PROGRESS("test6[moving inversion 32 read]", i, tot_num_blocks); } return; } void test6(char* ptr, unsigned int tot_num_blocks) { unsigned int i; unsigned int pattern; for (i= 0, pattern = 1;i < 32; pattern = pattern << 1, i++){ DEBUG_PRINTF("Test6[move inversion 32 bits test]: pattern =0x%x, offset=%d\n", pattern, i); movinv32(ptr, tot_num_blocks, pattern, 1, 0, i); DEBUG_PRINTF("Test6[move inversion 32 bits test]: pattern =0x%x, offset=%d\n", ~pattern, i); movinv32(ptr, tot_num_blocks, ~pattern, 0xfffffffe, 1, i); } } /****************************************************************************** * Test 7 [Random number sequence] * * This test writes a series of random numbers into memory. A block (1 MB) of memory * is initialized with random patterns. These patterns and their complements are * used in moving inversions test with rest of memory. * * *******************************************************************************/ __global__ void kernel_test7_write(char* _ptr, char* end_ptr, char* _start_ptr, unsigned int* err) { unsigned int i; unsigned int* ptr = (unsigned int*) (_ptr + blockIdx.x*BLOCKSIZE); unsigned int* start_ptr = (unsigned int*) _start_ptr; if (ptr >= (unsigned int*) end_ptr) { return; } for (i = 0;i < BLOCKSIZE/sizeof(unsigned int); i++){ ptr[i] = start_ptr[i]; } return; } __global__ void kernel_test7_readwrite(char* _ptr, char* end_ptr, char* _start_ptr, unsigned int* err, unsigned long* err_addr, unsigned long* err_expect, unsigned long* err_current, unsigned long* err_second_read) { unsigned int i; unsigned int* ptr = (unsigned int*) (_ptr + blockIdx.x*BLOCKSIZE); unsigned int* start_ptr = (unsigned int*) _start_ptr; if (ptr >= (unsigned int*) end_ptr) { return; } for (i = 0;i < BLOCKSIZE/sizeof(unsigned int); i++){ if (ptr[i] != start_ptr[i]){ RECORD_ERR(err, &ptr[i], start_ptr[i], ptr[i]); } ptr[i] = ~(start_ptr[i]); } return; } __global__ void kernel_test7_read(char* _ptr, char* end_ptr, char* _start_ptr, unsigned int* err, unsigned long* err_addr, unsigned long* err_expect, unsigned long* err_current, unsigned long* err_second_read) { unsigned int i; unsigned int* ptr = (unsigned int*) (_ptr + blockIdx.x*BLOCKSIZE); unsigned int* start_ptr = (unsigned int*) _start_ptr; if (ptr >= (unsigned int*) end_ptr) { return; } for (i = 0;i < BLOCKSIZE/sizeof(unsigned int); i++){ if (ptr[i] != ~(start_ptr[i])){ RECORD_ERR(err, &ptr[i], ~(start_ptr[i]), ptr[i]); } } return; } void test7(char* ptr, unsigned int tot_num_blocks) { unsigned int* host_buf; host_buf = (unsigned int*)malloc(BLOCKSIZE); unsigned int err = 0; unsigned int i; unsigned int iteration = 0; for (i = 0;i < BLOCKSIZE/sizeof(unsigned int);i++){ host_buf[i] = get_random_num(); } cudaMemcpy(ptr, host_buf, BLOCKSIZE, cudaMemcpyHostToDevice); char* end_ptr = ptr + tot_num_blocks* BLOCKSIZE; repeat: for (i=1;i < tot_num_blocks; i+= GRIDSIZE){ dim3 grid; grid.x= GRIDSIZE; kernel_test7_write<<<grid, 1>>>(ptr + i*BLOCKSIZE, end_ptr, ptr, err_count); SYNC_CUERR; SHOW_PROGRESS("test7_write", i, tot_num_blocks); } for (i=1;i < tot_num_blocks; i+= GRIDSIZE){ dim3 grid; grid.x= GRIDSIZE; kernel_test7_readwrite<<<grid, 1>>>(ptr + i*BLOCKSIZE, end_ptr, ptr, err_count, err_addr, err_expect, err_current, err_second_read); SYNC_CUERR; err += error_checking("test7_readwrite", i); SHOW_PROGRESS("test7_readwrite", i, tot_num_blocks); } for (i=1;i < tot_num_blocks; i+= GRIDSIZE){ dim3 grid; grid.x= GRIDSIZE; kernel_test7_read<<<grid, 1>>>(ptr + i*BLOCKSIZE, end_ptr, ptr, err_count, err_addr, err_expect, err_current, err_second_read); SYNC_CUERR; err += error_checking("test7_read", i); SHOW_PROGRESS("test7_read", i, tot_num_blocks); } if (err == 0 && iteration == 0){ return; } if (iteration < MAX_ITERATION){ PRINTF("%dth repeating test7 because there are %d errors found in last run\n", iteration, err); iteration++; err = 0; goto repeat; } } /*********************************************************************************** * Test 8 [Modulo 20, random pattern] * * A random pattern is generated. This pattern is used to set every 20th memory location * in memory. The rest of the memory location is set to the complimemnt of the pattern. * Repeat this for 20 times and each time the memory location to set the pattern is shifted right. * * **********************************************************************************/ __global__ void kernel_modtest_write(char* _ptr, char* end_ptr, unsigned int offset, unsigned int p1, unsigned int p2) { unsigned int i; unsigned int* ptr = (unsigned int*) (_ptr + blockIdx.x*BLOCKSIZE); if (ptr >= (unsigned int*) end_ptr) { return; } for (i = offset;i < BLOCKSIZE/sizeof(unsigned int); i+=MOD_SZ){ ptr[i] =p1; } for (i = 0;i < BLOCKSIZE/sizeof(unsigned int); i++){ if (i % MOD_SZ != offset){ ptr[i] =p2; } } return; } __global__ void kernel_modtest_read(char* _ptr, char* end_ptr, unsigned int offset, unsigned int p1, unsigned int* err, unsigned long* err_addr, unsigned long* err_expect, unsigned long* err_current, unsigned long* err_second_read) { unsigned int i; unsigned int* ptr = (unsigned int*) (_ptr + blockIdx.x*BLOCKSIZE); if (ptr >= (unsigned int*) end_ptr) { return; } for (i = offset;i < BLOCKSIZE/sizeof(unsigned int); i+=MOD_SZ){ if (ptr[i] !=p1){ RECORD_ERR(err, &ptr[i], p1, ptr[i]); } } return; } unsigned int modtest(char* ptr, unsigned int tot_num_blocks, unsigned int offset, unsigned int p1, unsigned int p2) { unsigned int i; char* end_ptr = ptr + tot_num_blocks* BLOCKSIZE; unsigned int err = 0; for (i= 0;i < tot_num_blocks; i+= GRIDSIZE){ dim3 grid; grid.x= GRIDSIZE; kernel_modtest_write<<<grid, 1>>>(ptr + i*BLOCKSIZE, end_ptr, offset, p1, p2); SYNC_CUERR; SHOW_PROGRESS("test8[mod test, write]", i, tot_num_blocks); } for (i= 0;i < tot_num_blocks; i+= GRIDSIZE){ dim3 grid; grid.x= GRIDSIZE; kernel_modtest_read<<<grid, 1>>>(ptr + i*BLOCKSIZE, end_ptr, offset, p1, err_count, err_addr, err_expect, err_current, err_second_read); SYNC_CUERR; err += error_checking("test8[mod test, read", i); SHOW_PROGRESS("test8[mod test, read]", i, tot_num_blocks); } return err; } void test8(char* ptr, unsigned int tot_num_blocks) { unsigned int i; unsigned int err = 0; unsigned int iteration = 0; unsigned int p1; if (global_pattern){ p1 = global_pattern; }else{ p1= get_random_num(); } unsigned int p2 = ~p1; repeat: PRINTF("test8[mod test]: p1=0x%x, p2=0x%x\n", p1,p2); for (i = 0;i < MOD_SZ; i++){ err += modtest(ptr, tot_num_blocks,i, p1, p2); } if (err == 0 && iteration == 0){ return; } if (iteration < MAX_ITERATION){ PRINTF("%dth repeating test8 because there are %d errors found in last run, p1=%x, p2=%x\n", iteration, err, p1, p2); iteration++; err = 0; goto repeat; } } /************************************************************************************ * * Test 9 [Bit fade test, 90 min, 2 patterns] * The bit fade test initializes all of memory with a pattern and then * sleeps for 90 minutes. Then memory is examined to see if any memory bits * have changed. All ones and all zero patterns are used. This test takes * 3 hours to complete. The Bit Fade test is disabled by default * **********************************************************************************/ void test9(char* ptr, unsigned int tot_num_blocks) { unsigned int p1 = 0; unsigned int p2 = ~p1; unsigned int i; char* end_ptr = ptr + tot_num_blocks* BLOCKSIZE; for (i= 0;i < tot_num_blocks; i+= GRIDSIZE){ dim3 grid; grid.x= GRIDSIZE; kernel_move_inv_write<<<grid, 1>>>(ptr + i*BLOCKSIZE, end_ptr, p1); SYNC_CUERR; SHOW_PROGRESS("test9[bit fade test, write]", i, tot_num_blocks); } DEBUG_PRINTF("sleeping for 90 minutes\n"); sleep(60*90); for (i=0;i < tot_num_blocks; i+= GRIDSIZE){ dim3 grid; grid.x= GRIDSIZE; kernel_move_inv_readwrite<<<grid, 1>>>(ptr + i*BLOCKSIZE, end_ptr, p1, p2, err_count, err_addr, err_expect, err_current, err_second_read); SYNC_CUERR; error_checking("test9[bit fade test, readwrite]", i); SHOW_PROGRESS("test9[bit fade test, readwrite]", i, tot_num_blocks); } DEBUG_PRINTF("sleeping for 90 minutes\n"); sleep(60*90); for (i=0;i < tot_num_blocks; i+= GRIDSIZE){ dim3 grid; grid.x= GRIDSIZE; kernel_move_inv_read<<<grid, 1>>>(ptr + i*BLOCKSIZE, end_ptr, p2, err_count, err_addr, err_expect, err_current, err_second_read); SYNC_CUERR; error_checking("test9[bit fade test, read]", i); SHOW_PROGRESS("test9[bit fade test, read]", i, tot_num_blocks); } return; } /************************************************************************************** * Test10 [memory stress test] * * Stress memory as much as we can. A random pattern is generated and a kernel of large grid size * and block size is launched to set all memory to the pattern. A new read and write kernel is launched * immediately after the previous write kernel to check if there is any errors in memory and set the * memory to the compliment. This process is repeated for 1000 times for one pattern. The kernel is * written as to achieve the maximum bandwidth between the global memory and GPU. * This will increase the chance of catching software error. In practice, we found this test quite useful * to flush hardware errors as well. * */ #define TYPE unsigned long __global__ void test10_kernel_write(char* ptr, int memsize, TYPE p1) { int i; int avenumber = memsize/(gridDim.x*gridDim.y); TYPE* mybuf = (TYPE*)(ptr + blockIdx.x* avenumber); int n = avenumber/(blockDim.x*sizeof(TYPE)); for(i=0;i < n;i++){ int index = i*blockDim.x + threadIdx.x; mybuf[index]= p1; } int index = n*blockDim.x + threadIdx.x; if (index*sizeof(TYPE) < avenumber){ mybuf[index] = p1; } return; } __global__ void test10_kernel_readwrite(char* ptr, int memsize, TYPE p1, TYPE p2, unsigned int* err, unsigned long* err_addr, unsigned long* err_expect, unsigned long* err_current, unsigned long* err_second_read) { int i; int avenumber = memsize/(gridDim.x*gridDim.y); TYPE* mybuf = (TYPE*)(ptr + blockIdx.x* avenumber); int n = avenumber/(blockDim.x*sizeof(TYPE)); TYPE localp; for(i=0;i < n;i++){ int index = i*blockDim.x + threadIdx.x; localp = mybuf[index]; if (localp != p1){ RECORD_ERR(err, &mybuf[index], p1, localp); } mybuf[index] = p2; } int index = n*blockDim.x + threadIdx.x; if (index*sizeof(TYPE) < avenumber){ localp = mybuf[index]; if (localp!= p1){ RECORD_ERR(err, &mybuf[index], p1, localp); } mybuf[index] = p2; } return; } //////////////////////////////////////////////////////////////////////////////// // These are CUDA Helper functions // This will output the proper CUDA error strings in the event that a CUDA host call returns an error #define checkCudaErrors(err) __checkCudaErrors (err, __FILE__, __LINE__) inline void __checkCudaErrors(cudaError err, const char *file, const int line ) { if(cudaSuccess != err) { fprintf(stderr, "%s(%i) : CUDA Runtime API error %d: %s.\n",file, line, (int)err, cudaGetErrorString( err ) ); exit(-1); } } #define STRESS_BLOCKSIZE 64 #define STRESS_GRIDSIZE (1024*32) void test10(char* ptr, unsigned int tot_num_blocks) { TYPE p1; if (global_pattern_long){ p1 = global_pattern_long; }else{ p1 = get_random_num_long(); } TYPE p2 = ~p1; cudaStream_t stream; cudaEvent_t start, stop; checkCudaErrors(cudaStreamCreate(&stream)); checkCudaErrors(cudaEventCreate(&start)); checkCudaErrors(cudaEventCreate(&stop)); int n = num_iterations; float elapsedtime; dim3 gridDim(STRESS_GRIDSIZE); dim3 blockDim(STRESS_BLOCKSIZE); checkCudaErrors(cudaEventRecord(start, stream)); PRINTF("Test10 with pattern=0x%lx\n", p1); test10_kernel_write<<<gridDim, blockDim, 0, stream>>>(ptr, tot_num_blocks*BLOCKSIZE, p1); SYNC_CUERR; for(int i =0;i < n ;i ++){ test10_kernel_readwrite<<<gridDim, blockDim, 0, stream>>>(ptr, tot_num_blocks*BLOCKSIZE, p1, p2, err_count, err_addr, err_expect, err_current, err_second_read); SYNC_CUERR; p1 = ~p1; p2 = ~p2; } cudaEventRecord(stop, stream); cudaEventSynchronize(stop); error_checking("test10[Memory stress test]", 0); cudaEventElapsedTime(&elapsedtime, start, stop); DEBUG_PRINTF("test10: elapsedtime=%f, bandwidth=%f GB/s\n", elapsedtime, (2*n+1)*tot_num_blocks/elapsedtime); cudaEventDestroy(start); cudaEventDestroy(stop); cudaStreamDestroy(stream); #if 0 TYPE* host_buf = (TYPE*)malloc(tot_num_blocks*BLOCKSIZE); if (host_buf == NULL){ printf("ERROR: malloc failed for host_buf\n"); exit(ERR_GENERAL); } memset(host_buf, 0, tot_num_blocks* BLOCKSIZE); cudaMemcpy(host_buf, ptr, tot_num_blocks*BLOCKSIZE, cudaMemcpyDeviceToHost); for(unsigned long i=0;i < (tot_num_blocks*BLOCKSIZE)/sizeof(TYPE) ;i ++){ if (host_buf[i] != p1){ PRINTF("ERROR: data not match for i=%d, expecting 0x%x, current value=0x%x\n", i, p1, host_buf[i]); free(host_buf); exit(ERR_GENERAL); } } printf("all data match\n"); free(host_buf); #endif } cuda_memtest_t cuda_memtests[]={ {test0, (char*)"Test0 [Walking 1 bit]", 1}, {test1, (char*)"Test1 [Own address test]", 1}, {test2, (char*)"Test2 [Moving inversions, ones&zeros]", 1}, {test3, (char*)"Test3 [Moving inversions, 8 bit pat]", 1}, {test4, (char*)"Test4 [Moving inversions, random pattern]",1}, {test5, (char*)"Test5 [Block move, 64 moves]", 1}, {test6, (char*)"Test6 [Moving inversions, 32 bit pat]", 1}, {test7, (char*)"Test7 [Random number sequence]", 1}, {test8, (char*)"Test8 [Modulo 20, random pattern]", 1}, {test9, (char*)"Test9 [Bit fade test]", 0}, {test10, (char*)"Test10 [Memory stress test]", 1}, }; void allocate_small_mem(void) { cudaMalloc((void**)&err_count, sizeof(unsigned int)); CUERR; cudaMemset(err_count, 0, sizeof(unsigned int)); CUERR; cudaMalloc((void**)&err_addr, sizeof(unsigned long)*MAX_ERR_RECORD_COUNT);CUERR; cudaMemset(err_addr, 0, sizeof(unsigned long)*MAX_ERR_RECORD_COUNT); cudaMalloc((void**)&err_expect, sizeof(unsigned long)*MAX_ERR_RECORD_COUNT);CUERR; cudaMemset(err_expect, 0, sizeof(unsigned long)*MAX_ERR_RECORD_COUNT); cudaMalloc((void**)&err_current, sizeof(unsigned long)*MAX_ERR_RECORD_COUNT);CUERR; cudaMemset(err_current, 0, sizeof(unsigned long)*MAX_ERR_RECORD_COUNT); cudaMalloc((void**)&err_second_read, sizeof(unsigned long)*MAX_ERR_RECORD_COUNT);CUERR; cudaMemset(err_second_read, 0, sizeof(unsigned long)*MAX_ERR_RECORD_COUNT); } void run_tests(char* ptr, unsigned int tot_num_blocks) { struct timeval t0, t1; unsigned int i; unsigned int pass = 0; while(1){ for (i = 0;i < DIM(cuda_memtests); i++){ if (cuda_memtests[i].enabled){ PRINTF("%s\n", cuda_memtests[i].desc); gettimeofday(&t0, NULL); cuda_memtests[i].func(ptr, tot_num_blocks); gettimeofday(&t1, NULL); PRINTF("Test%d finished in %.1f seconds\n", i, TDIFF(t1, t0)); }//if }//for if (num_passes <= 0){ continue; } pass ++; if (pass >= num_passes){ break; } }//while }
the_stack
namespace mn { struct mgsp_benchmark { using streamIdx = Cuda::StreamIndex; using eventIdx = Cuda::EventIndex; using host_allocator = heap_allocator; struct device_allocator { // hide the global one void *allocate(std::size_t bytes) { void *ret; checkCudaErrors(cudaMalloc(&ret, bytes)); return ret; } void deallocate(void *p, std::size_t) { checkCudaErrors(cudaFree(p)); } }; struct temp_allocator { explicit temp_allocator(int did) : did{did} {} void *allocate(std::size_t bytes) { return Cuda::ref_cuda_context(did).borrow(bytes); } void deallocate(void *p, std::size_t) {} int did; }; template <std::size_t I> void initParticles() { auto &cuDev = Cuda::ref_cuda_context(I); cuDev.setContext(); tmps[I].alloc(config::g_max_active_block); for (int copyid = 0; copyid < 2; copyid++) { gridBlocks[copyid].emplace_back(device_allocator{}); particleBins[copyid].emplace_back( ParticleBuffer<get_material_type(I)>{device_allocator{}}); partitions[copyid].emplace_back(device_allocator{}, config::g_max_active_block); } cuDev.syncStream<streamIdx::Compute>(); inputHaloGridBlocks.emplace_back(g_device_cnt); outputHaloGridBlocks.emplace_back(g_device_cnt); particles[I] = spawn<particle_array_, orphan_signature>(device_allocator{}); checkedCnts[I][0] = 0; checkedCnts[I][1] = 0; curNumActiveBlocks[I] = config::g_max_active_block; curNumActiveBins[I] = config::g_max_particle_bin; /// tail-recursion optimization if constexpr (I + 1 < config::g_device_cnt) initParticles<I + 1>(); } mgsp_benchmark() : dtDefault{1e-4}, curTime{0.f}, rollid{0}, curFrame{0}, curStep{0}, fps{24}, bRunning{true} { // data _hostData = spawn<signed_distance_field_, orphan_signature>(host_allocator{}); collisionObjs.resize(config::g_device_cnt); initParticles<0>(); fmt::print("{} -vs- {}\n", match(particleBins[0][0])([&](auto &pb) { return pb.size; }), match(particleBins[0][1])([&](auto &pb) { return pb.size; })); // tasks for (int did = 0; did < config::g_device_cnt; ++did) { ths[did] = std::thread([this](int did) { this->gpu_worker(did); }, did); } } ~mgsp_benchmark() { auto is_empty = [this]() { for (int did = 0; did < config::g_device_cnt; ++did) if (!jobs[did].empty()) return false; return true; }; do { cv_slave.notify_all(); } while (!is_empty()); bRunning = false; for (auto &th : ths) th.join(); } void initModel(int devid, const std::vector<std::array<float, 3>> &model) { auto &cuDev = Cuda::ref_cuda_context(devid); cuDev.setContext(); pcnt[devid] = model.size(); fmt::print("init model[{}] with {} particles\n", devid, pcnt[devid]); cudaMemcpyAsync((void *)&particles[devid].val_1d(_0, 0), model.data(), sizeof(std::array<float, 3>) * model.size(), cudaMemcpyDefault, cuDev.stream_compute()); cuDev.syncStream<streamIdx::Compute>(); std::string fn = std::string{"model"} + "_dev[" + std::to_string(devid) + "]_frame[0].bgeo"; IO::insert_job([fn, model]() { write_partio<float, 3>(fn, model); }); IO::flush(); } void initBoundary(std::string fn) { initFromSignedDistanceFile(fn, vec<std::size_t, 3>{(std::size_t)1024, (std::size_t)1024, (std::size_t)512}, _hostData); for (int did = 0; did < config::g_device_cnt; ++did) { auto &cuDev = Cuda::ref_cuda_context(did); cuDev.setContext(); collisionObjs[did] = SignedDistanceGrid{device_allocator{}}; collisionObjs[did]->init(_hostData, cuDev.stream_compute()); cuDev.syncStream<streamIdx::Compute>(); } } template <typename CudaContext> void exclScan(std::size_t cnt, int const *const in, int *out, CudaContext &cuDev) { #if 1 auto policy = thrust::cuda::par.on((cudaStream_t)cuDev.stream_compute()); thrust::exclusive_scan(policy, getDevicePtr(in), getDevicePtr(in) + cnt, getDevicePtr(out)); #else std::size_t temp_storage_bytes = 0; auto plus_op = [] __device__(const int &a, const int &b) { return a + b; }; checkCudaErrors(cub::DeviceScan::ExclusiveScan(nullptr, temp_storage_bytes, in, out, plus_op, 0, cnt, cuDev.stream_compute())); void *d_tmp = tmps[cuDev.getDevId()].d_tmp; checkCudaErrors(cub::DeviceScan::ExclusiveScan(d_tmp, temp_storage_bytes, in, out, plus_op, 0, cnt, cuDev.stream_compute())); #endif } float getMass(int did) { return match(particleBins[rollid][did])( [&](const auto &particleBuffer) { return particleBuffer.mass; }); } void checkCapacity(int did) { if (ebcnt[did] > curNumActiveBlocks[did] * 3 / 4 && checkedCnts[did][0] == 0) { curNumActiveBlocks[did] = curNumActiveBlocks[did] * 3 / 2; checkedCnts[did][0] = 2; fmt::print(fmt::emphasis::bold, "resizing blocks {} -> {}\n", ebcnt[did], curNumActiveBlocks[did]); } if (bincnt[did] > curNumActiveBins[did] * 3 / 4 && checkedCnts[did][1] == 0) { curNumActiveBins[did] = curNumActiveBins[did] * 3 / 2; checkedCnts[did][1] = 2; fmt::print(fmt::emphasis::bold, "resizing bins {} -> {}\n", bincnt[did], curNumActiveBins[did]); } } /// thread local ctrl flow void gpu_worker(int did) { auto wait = [did, this]() { std::unique_lock<std::mutex> lk{this->mut_slave}; this->cv_slave.wait(lk, [did, this]() { return !this->bRunning || !this->jobs[did].empty(); }); }; auto signal = [this]() { std::unique_lock<std::mutex> lk{this->mut_ctrl}; this->idleCnt.fetch_add(1); lk.unlock(); this->cv_ctrl.notify_one(); }; auto &cuDev = Cuda::ref_cuda_context(did); cuDev.setContext(); fmt::print(fg(fmt::color::light_blue), "{}-th gpu worker operates on GPU {}\n", did, cuDev.getDevId()); while (this->bRunning) { wait(); auto job = this->jobs[did].try_pop(); if (job) (*job)(did); signal(); } fmt::print(fg(fmt::color::light_blue), "{}-th gpu worker exits\n", did); } void sync() { std::unique_lock<std::mutex> lk{mut_ctrl}; cv_ctrl.wait(lk, [this]() { return this->idleCnt == config::g_device_cnt; }); fmt::print(fmt::emphasis::bold, "-----------------------------------------------------------" "-----\n"); } void issue(std::function<void(int)> job) { std::unique_lock<std::mutex> lk{mut_slave}; for (int did = 0; did < config::g_device_cnt; ++did) jobs[did].push(job); idleCnt = 0; lk.unlock(); cv_slave.notify_all(); } void main_loop() { /// initial float nextTime = 1.f / fps; dt = compute_dt(0.f, curTime, nextTime, dtDefault); fmt::print(fmt::emphasis::bold, "{} --{}--> {}, defaultDt: {}\n", curTime, dt, nextTime, dtDefault); initial_setup(); curTime = dt; for (curFrame = 1; curFrame <= config::g_total_frame_cnt; ++curFrame) { for (; curTime < nextTime; curTime += dt, curStep++) { /// max grid vel issue([this](int did) { auto &cuDev = Cuda::ref_cuda_context(did); /// check capacity checkCapacity(did); float *d_maxVel = tmps[did].d_maxVel; CudaTimer timer{cuDev.stream_compute()}; timer.tick(); checkCudaErrors(cudaMemsetAsync(d_maxVel, 0, sizeof(float), cuDev.stream_compute())); if (collisionObjs[did]) cuDev.compute_launch( {(nbcnt[did] + g_num_grid_blocks_per_cuda_block - 1) / g_num_grid_blocks_per_cuda_block, g_num_warps_per_cuda_block * 32, g_num_warps_per_cuda_block}, update_grid_velocity_query_max, (uint32_t)nbcnt[did], // gridBlocks[0][did], partitions[rollid][did], dt, d_maxVel); gridBlocks[0][did], partitions[rollid][did], dt, (const SignedDistanceGrid)(*collisionObjs[did]), d_maxVel); else cuDev.compute_launch( {(nbcnt[did] + g_num_grid_blocks_per_cuda_block - 1) / g_num_grid_blocks_per_cuda_block, g_num_warps_per_cuda_block * 32, g_num_warps_per_cuda_block}, update_grid_velocity_query_max, (uint32_t)nbcnt[did], // gridBlocks[0][did], partitions[rollid][did], dt, d_maxVel); gridBlocks[0][did], partitions[rollid][did], dt, d_maxVel); checkCudaErrors(cudaMemcpyAsync(&maxVels[did], d_maxVel, sizeof(float), cudaMemcpyDefault, cuDev.stream_compute())); timer.tock(fmt::format("GPU[{}] frame {} step {} grid_update_query", did, curFrame, curStep)); }); sync(); /// host: compute maxvel & next dt float maxVel = 0.f; for (int did = 0; did < g_device_cnt; ++did) if (maxVels[did] > maxVel) maxVel = maxVels[did]; maxVel = std::sqrt(maxVel); nextDt = compute_dt(maxVel, curTime, nextTime, dtDefault); fmt::print(fmt::emphasis::bold, "{} --{}--> {}, defaultDt: {}, maxVel: {}\n", curTime, nextDt, nextTime, dtDefault, maxVel); /// g2p2g issue([this](int did) { auto &cuDev = Cuda::ref_cuda_context(did); CudaTimer timer{cuDev.stream_compute()}; /// check capacity if (checkedCnts[did][1] > 0) { match(particleBins[rollid ^ 1][did])([&](auto &pb) { pb.resize(device_allocator{}, curNumActiveBins[did]); }); checkedCnts[did][1]--; } timer.tick(); // grid gridBlocks[1][did].reset(nbcnt[did], cuDev); // adv map checkCudaErrors( cudaMemsetAsync(partitions[rollid][did]._ppcs, 0, sizeof(int) * ebcnt[did] * g_blockvolume, cuDev.stream_compute())); // g2p2g match(particleBins[rollid][did])([&](const auto &pb) { if (partitions[rollid][did].h_count) cuDev.compute_launch( {partitions[rollid][did].h_count, 128, (512 * 3 * 4) + (512 * 4 * 4)}, g2p2g, dt, nextDt, (const ivec3 *)partitions[rollid][did]._haloBlocks, pb, get<typename std::decay_t<decltype(pb)>>( particleBins[rollid ^ 1][did]), partitions[rollid ^ 1][did], partitions[rollid][did], gridBlocks[0][did], gridBlocks[1][did]); }); cuDev.syncStream<streamIdx::Compute>(); timer.tock(fmt::format("GPU[{}] frame {} step {} halo_g2p2g", did, curFrame, curStep)); }); sync(); collect_halo_grid_blocks(); issue([this](int did) { auto &cuDev = Cuda::ref_cuda_context(did); CudaTimer timer{cuDev.stream_compute()}; timer.tick(); match(particleBins[rollid][did])([&](const auto &pb) { cuDev.compute_launch( {pbcnt[did], 128, (512 * 3 * 4) + (512 * 4 * 4)}, g2p2g, dt, nextDt, (const ivec3 *)nullptr, pb, get<typename std::decay_t<decltype(pb)>>( particleBins[rollid ^ 1][did]), partitions[rollid ^ 1][did], partitions[rollid][did], gridBlocks[0][did], gridBlocks[1][did]); }); timer.tock(fmt::format("GPU[{}] frame {} step {} non_halo_g2p2g", did, curFrame, curStep)); if (checkedCnts[did][0] > 0) { partitions[rollid ^ 1][did].resizePartition( device_allocator{}, curNumActiveBlocks[did]); checkedCnts[did][0]--; } }); sync(); reduce_halo_grid_blocks(); issue([this](int did) { auto &cuDev = Cuda::ref_cuda_context(did); CudaTimer timer{cuDev.stream_compute()}; timer.tick(); /// mark particle blocks partitions[rollid][did].buildParticleBuckets(cuDev, ebcnt[did]); int *activeBlockMarks = tmps[did].activeBlockMarks, *destinations = tmps[did].destinations, *sources = tmps[did].sources; checkCudaErrors(cudaMemsetAsync(activeBlockMarks, 0, sizeof(int) * nbcnt[did], cuDev.stream_compute())); /// mark grid blocks cuDev.compute_launch({(nbcnt[did] * g_blockvolume + 127) / 128, 128}, mark_active_grid_blocks, (uint32_t)nbcnt[did], gridBlocks[1][did], activeBlockMarks); cuDev.compute_launch({(ebcnt[did] + 1 + 127) / 128, 128}, mark_active_particle_blocks, ebcnt[did] + 1, partitions[rollid][did]._ppbs, sources); exclScan(ebcnt[did] + 1, sources, destinations, cuDev); /// building new partition // block count checkCudaErrors(cudaMemcpyAsync( partitions[rollid ^ 1][did]._cnt, destinations + ebcnt[did], sizeof(int), cudaMemcpyDefault, cuDev.stream_compute())); checkCudaErrors(cudaMemcpyAsync( &pbcnt[did], destinations + ebcnt[did], sizeof(int), cudaMemcpyDefault, cuDev.stream_compute())); cuDev.compute_launch({(ebcnt[did] + 255) / 256, 256}, exclusive_scan_inverse, ebcnt[did], (const int *)destinations, sources); // indextable, activeKeys, ppb, buckets partitions[rollid ^ 1][did].resetTable(cuDev.stream_compute()); cuDev.syncStream<streamIdx::Compute>(); cuDev.compute_launch({pbcnt[did], 128}, update_partition, (uint32_t)pbcnt[did], (const int *)sources, partitions[rollid][did], partitions[rollid ^ 1][did]); // binsts { int *binpbs = tmps[did].binpbs; cuDev.compute_launch({(pbcnt[did] + 1 + 127) / 128, 128}, compute_bin_capacity, pbcnt[did] + 1, (const int *)partitions[rollid ^ 1][did]._ppbs, binpbs); exclScan(pbcnt[did] + 1, binpbs, partitions[rollid ^ 1][did]._binsts, cuDev); checkCudaErrors(cudaMemcpyAsync( &bincnt[did], partitions[rollid ^ 1][did]._binsts + pbcnt[did], sizeof(int), cudaMemcpyDefault, cuDev.stream_compute())); cuDev.syncStream<streamIdx::Compute>(); } timer.tock(fmt::format("GPU[{}] frame {} step {} update_partition", did, curFrame, curStep)); /// neighboring blocks timer.tick(); cuDev.compute_launch({(pbcnt[did] + 127) / 128, 128}, register_neighbor_blocks, (uint32_t)pbcnt[did], partitions[rollid ^ 1][did]); auto prev_nbcnt = nbcnt[did]; checkCudaErrors(cudaMemcpyAsync( &nbcnt[did], partitions[rollid ^ 1][did]._cnt, sizeof(int), cudaMemcpyDefault, cuDev.stream_compute())); cuDev.syncStream<streamIdx::Compute>(); timer.tock( fmt::format("GPU[{}] frame {} step {} build_partition_for_grid", did, curFrame, curStep)); /// check capacity if (checkedCnts[did][0] > 0) { gridBlocks[0][did].resize(device_allocator{}, curNumActiveBlocks[did]); } /// rearrange grid blocks timer.tick(); gridBlocks[0][did].reset(ebcnt[did], cuDev); cuDev.compute_launch( {prev_nbcnt, g_blockvolume}, copy_selected_grid_blocks, (const ivec3 *)partitions[rollid][did]._activeKeys, partitions[rollid ^ 1][did], (const int *)activeBlockMarks, gridBlocks[1][did], gridBlocks[0][did]); cuDev.syncStream<streamIdx::Compute>(); timer.tock(fmt::format("GPU[{}] frame {} step {} copy_grid_blocks", did, curFrame, curStep)); /// check capacity if (checkedCnts[did][0] > 0) { gridBlocks[1][did].resize(device_allocator{}, curNumActiveBlocks[did]); tmps[did].resize(curNumActiveBlocks[did]); } }); sync(); /// halo tag halo_tagging(); issue([this](int did) { auto &cuDev = Cuda::ref_cuda_context(did); CudaTimer timer{cuDev.stream_compute()}; timer.tick(); /// exterior blocks cuDev.compute_launch({(pbcnt[did] + 127) / 128, 128}, register_exterior_blocks, (uint32_t)pbcnt[did], partitions[rollid ^ 1][did]); checkCudaErrors(cudaMemcpyAsync( &ebcnt[did], partitions[rollid ^ 1][did]._cnt, sizeof(int), cudaMemcpyDefault, cuDev.stream_compute())); cuDev.syncStream<streamIdx::Compute>(); fmt::print(fmt::emphasis::bold | fg(fmt::color::yellow), "block count on device {}: {}, {}, {} [{}]; {} [{}]\n", did, pbcnt[did], nbcnt[did], ebcnt[did], curNumActiveBlocks[did], bincnt[did], curNumActiveBins[did]); timer.tock(fmt::format( "GPU[{}] frame {} step {} build_partition_for_particles", did, curFrame, curStep)); }); sync(); rollid ^= 1; dt = nextDt; } issue([this](int did) { IO::flush(); output_model(did); }); sync(); nextTime = 1.f * (curFrame + 1) / fps; fmt::print(fmt::emphasis::bold | fg(fmt::color::red), "-----------------------------------------------------------" "-----\n"); } } void output_model(int did) { auto &cuDev = Cuda::ref_cuda_context(did); cuDev.setContext(); CudaTimer timer{cuDev.stream_compute()}; timer.tick(); int parcnt, *d_parcnt = (int *)cuDev.borrow(sizeof(int)); checkCudaErrors( cudaMemsetAsync(d_parcnt, 0, sizeof(int), cuDev.stream_compute())); match(particleBins[rollid][did])([&](const auto &pb) { cuDev.compute_launch({pbcnt[did], 128}, retrieve_particle_buffer, partitions[rollid][did], partitions[rollid ^ 1][did], pb, particles[did], d_parcnt); }); checkCudaErrors(cudaMemcpyAsync(&parcnt, d_parcnt, sizeof(int), cudaMemcpyDefault, cuDev.stream_compute())); cuDev.syncStream<streamIdx::Compute>(); fmt::print(fg(fmt::color::red), "total number of particles {}\n", parcnt); models[did].resize(parcnt); checkCudaErrors(cudaMemcpyAsync(models[did].data(), (void *)&particles[did].val_1d(_0, 0), sizeof(std::array<float, 3>) * (parcnt), cudaMemcpyDefault, cuDev.stream_compute())); cuDev.syncStream<streamIdx::Compute>(); std::string fn = std::string{"model"} + "_dev[" + std::to_string(did) + "]_frame[" + std::to_string(curFrame) + "].bgeo"; IO::insert_job( [fn, model = models[did]]() { write_partio<float, 3>(fn, model); }); timer.tock(fmt::format("GPU[{}] frame {} step {} retrieve_particles", did, curFrame, curStep)); } void initial_setup() { issue([this](int did) { auto &cuDev = Cuda::ref_cuda_context(did); cuDev.setContext(); CudaTimer timer{cuDev.stream_compute()}; timer.tick(); cuDev.compute_launch({(pcnt[did] + 255) / 256, 256}, activate_blocks, pcnt[did], particles[did], partitions[rollid ^ 1][did]); checkCudaErrors(cudaMemcpyAsync( &pbcnt[did], partitions[rollid ^ 1][did]._cnt, sizeof(int), cudaMemcpyDefault, cuDev.stream_compute())); timer.tock(fmt::format("GPU[{}] step {} init_table", did, curStep)); timer.tick(); cuDev.resetMem(); // particle block cuDev.compute_launch({(pcnt[did] + 255) / 256, 256}, build_particle_cell_buckets, pcnt[did], particles[did], partitions[rollid ^ 1][did]); // bucket, binsts cuDev.syncStream<streamIdx::Compute>(); partitions[rollid ^ 1][did].buildParticleBuckets(cuDev, pbcnt[did]); { int *binpbs = tmps[did].binpbs; cuDev.compute_launch({(pbcnt[did] + 1 + 127) / 128, 128}, compute_bin_capacity, pbcnt[did] + 1, (const int *)partitions[rollid ^ 1][did]._ppbs, binpbs); exclScan(pbcnt[did] + 1, binpbs, partitions[rollid ^ 1][did]._binsts, cuDev); checkCudaErrors(cudaMemcpyAsync( &bincnt[did], partitions[rollid ^ 1][did]._binsts + pbcnt[did], sizeof(int), cudaMemcpyDefault, cuDev.stream_compute())); cuDev.syncStream<streamIdx::Compute>(); } match(particleBins[rollid][did])([&](const auto &pb) { cuDev.compute_launch({pbcnt[did], 128}, array_to_buffer, particles[did], pb, partitions[rollid ^ 1][did]); }); // grid block cuDev.compute_launch({(pbcnt[did] + 127) / 128, 128}, register_neighbor_blocks, (uint32_t)pbcnt[did], partitions[rollid ^ 1][did]); checkCudaErrors(cudaMemcpyAsync( &nbcnt[did], partitions[rollid ^ 1][did]._cnt, sizeof(int), cudaMemcpyDefault, cuDev.stream_compute())); cuDev.syncStream<streamIdx::Compute>(); cuDev.compute_launch({(pbcnt[did] + 127) / 128, 128}, register_exterior_blocks, (uint32_t)pbcnt[did], partitions[rollid ^ 1][did]); checkCudaErrors(cudaMemcpyAsync( &ebcnt[did], partitions[rollid ^ 1][did]._cnt, sizeof(int), cudaMemcpyDefault, cuDev.stream_compute())); cuDev.syncStream<streamIdx::Compute>(); timer.tock(fmt::format("GPU[{}] step {} init_partition", did, curStep)); fmt::print(fmt::emphasis::bold | fg(fmt::color::yellow), "block count on device {}: {}, {}, {} [{}]; {} [{}]\n", did, pbcnt[did], nbcnt[did], ebcnt[did], curNumActiveBlocks[did], bincnt[did], curNumActiveBins[did]); }); sync(); halo_tagging(); issue([this](int did) { auto &cuDev = Cuda::ref_cuda_context(did); CudaTimer timer{cuDev.stream_compute()}; /// need to copy halo tag info as well partitions[rollid ^ 1][did].copy_to(partitions[rollid][did], ebcnt[did], cuDev.stream_compute()); checkCudaErrors(cudaMemcpyAsync( partitions[rollid][did]._activeKeys, partitions[rollid ^ 1][did]._activeKeys, sizeof(ivec3) * ebcnt[did], cudaMemcpyDefault, cuDev.stream_compute())); cuDev.syncStream<streamIdx::Compute>(); timer.tick(); gridBlocks[0][did].reset(nbcnt[did], cuDev); cuDev.compute_launch({(pcnt[did] + 255) / 256, 256}, rasterize, pcnt[did], particles[did], gridBlocks[0][did], partitions[rollid][did], dt, getMass(did)); cuDev.compute_launch({pbcnt[did], 128}, init_adv_bucket, (const int *)partitions[rollid][did]._ppbs, partitions[rollid][did]._blockbuckets); cuDev.syncStream<streamIdx::Compute>(); timer.tock(fmt::format("GPU[{}] step {} init_grid", did, curStep)); }); sync(); collect_halo_grid_blocks(0); reduce_halo_grid_blocks(0); } void halo_tagging() { issue([this](int did) { auto &cuDev = Cuda::ref_cuda_context(did); cuDev.resetMem(); for (int otherdid = 0; otherdid < config::g_device_cnt; otherdid++) if (otherdid != did) haloBlockIds[did][otherdid] = (ivec3 *)cuDev.borrow(sizeof(ivec3) * nbcnt[otherdid]); /// init halo blockids outputHaloGridBlocks[did].initBlocks(temp_allocator{did}, nbcnt[did]); inputHaloGridBlocks[did].initBlocks(temp_allocator{did}, nbcnt[did]); }); sync(); issue([this](int did) { auto &cuDev = Cuda::ref_cuda_context(did); /// prepare counts outputHaloGridBlocks[did].resetCounts(cuDev.stream_compute()); cuDev.syncStream<streamIdx::Compute>(); /// sharing local active blocks for (int otherdid = 0; otherdid < config::g_device_cnt; otherdid++) if (otherdid != did) { checkCudaErrors( cudaMemcpyAsync(haloBlockIds[otherdid][did], partitions[rollid ^ 1][did]._activeKeys, sizeof(ivec3) * nbcnt[did], cudaMemcpyDefault, cuDev.stream_spare(otherdid))); cuDev.spare_event_record(otherdid); } }); sync(); issue([this](int did) { auto &cuDev = Cuda::ref_cuda_context(did); CudaTimer timer{cuDev.stream_compute()}; timer.tick(); /// init overlap marks partitions[rollid ^ 1][did].resetOverlapMarks(nbcnt[did], cuDev.stream_compute()); cuDev.syncStream<streamIdx::Compute>(); /// receiving active blocks from other devices for (int otherdid = 0; otherdid < config::g_device_cnt; otherdid++) if (otherdid != did) { cuDev.spareStreamWaitForEvent( otherdid, Cuda::ref_cuda_context(otherdid).event_spare(did)); cuDev.spare_launch(otherdid, {(nbcnt[otherdid] + 127) / 128, 128}, mark_overlapping_blocks, (uint32_t)nbcnt[otherdid], otherdid, (const ivec3 *)haloBlockIds[did][otherdid], partitions[rollid ^ 1][did], outputHaloGridBlocks[did]._counts + otherdid, outputHaloGridBlocks[did]._buffers[otherdid]); cuDev.spare_event_record(otherdid); cuDev.computeStreamWaitForEvent(cuDev.event_spare(otherdid)); } // self halo particle block partitions[rollid ^ 1][did].resetHaloCount(cuDev.stream_compute()); cuDev.compute_launch( {(pbcnt[did] + 127) / 128, 128}, collect_blockids_for_halo_reduction, (uint32_t)pbcnt[did], did, partitions[rollid ^ 1][did]); /// retrieve counts partitions[rollid ^ 1][did].retrieveHaloCount(cuDev.stream_compute()); outputHaloGridBlocks[did].retrieveCounts(cuDev.stream_compute()); cuDev.syncStream<streamIdx::Compute>(); timer.tock(fmt::format("GPU[{}] step {} halo_tagging", did, curStep)); fmt::print(fg(fmt::color::green), "halo particle blocks[{}]: {}\n", did, partitions[rollid ^ 1][did].h_count); for (int otherdid = 0; otherdid < config::g_device_cnt; otherdid++) fmt::print(fg(fmt::color::green), "halo grid blocks[{}][{}]: {}\n", did, otherdid, outputHaloGridBlocks[did].h_counts[otherdid]); }); sync(); } void collect_halo_grid_blocks(int gid = 1) { /// init halo grid blocks issue([this](int did) { std::vector<uint32_t> counts(config::g_device_cnt); outputHaloGridBlocks[did].initBuffer(temp_allocator{did}, outputHaloGridBlocks[did].h_counts); for (int otherdid = 0; otherdid < config::g_device_cnt; otherdid++) counts[otherdid] = (otherdid != did) ? outputHaloGridBlocks[otherdid].h_counts[did] : 0; inputHaloGridBlocks[did].initBuffer(temp_allocator{did}, counts); }); sync(); issue([this, gid](int did) { auto &cuDev = Cuda::ref_cuda_context(did); CppTimer timer{}; timer.tick(); /// sharing local active blocks for (int otherdid = 0; otherdid < config::g_device_cnt; otherdid++) if (otherdid != did) { if (outputHaloGridBlocks[did].h_counts[otherdid] > 0) { auto &cnt = outputHaloGridBlocks[did].h_counts[otherdid]; cuDev.spare_launch(otherdid, {cnt, config::g_blockvolume}, collect_grid_blocks, gridBlocks[gid][did], partitions[rollid][did], outputHaloGridBlocks[did]._buffers[otherdid]); outputHaloGridBlocks[did].send(inputHaloGridBlocks[otherdid], did, otherdid, cuDev.stream_spare(otherdid)); cuDev.spare_event_record(otherdid); } else inputHaloGridBlocks[otherdid].h_counts[did] = 0; } timer.tock( fmt::format("GPU[{}] step {} collect_send_halo_grid", did, curStep)); }); sync(); } void reduce_halo_grid_blocks(int gid = 1) { issue([this, gid](int did) { auto &cuDev = Cuda::ref_cuda_context(did); CppTimer timer{}; timer.tick(); /// receiving active blocks from other devices for (int otherdid = 0; otherdid < config::g_device_cnt; otherdid++) if (otherdid != did) { if (inputHaloGridBlocks[did].h_counts[otherdid] > 0) { cuDev.spareStreamWaitForEvent( otherdid, Cuda::ref_cuda_context(otherdid).event_spare(did)); cuDev.spare_launch(otherdid, {inputHaloGridBlocks[did].h_counts[otherdid], config::g_blockvolume}, reduce_grid_blocks, gridBlocks[gid][did], partitions[rollid][did], inputHaloGridBlocks[did]._buffers[otherdid]); cuDev.spare_event_record(otherdid); cuDev.computeStreamWaitForEvent(cuDev.event_spare(otherdid)); } } cuDev.syncStream<streamIdx::Compute>(); timer.tock(fmt::format("GPU[{}] step {} receive_reduce_halo_grid", did, curStep)); }); sync(); } /// /// animation runtime settings float dt, nextDt, dtDefault, curTime, maxVel; uint64_t curFrame, curStep, fps; /// data on device, double buffering std::vector<optional<SignedDistanceGrid>> collisionObjs; std::vector<GridBuffer> gridBlocks[2]; std::vector<particle_buffer_t> particleBins[2]; std::vector<Partition<1>> partitions[2]; ///< with halo info std::vector<HaloGridBlocks> inputHaloGridBlocks, outputHaloGridBlocks; // std::vector<HaloParticleBlocks> inputHaloParticleBlocks, // outputHaloParticleBlocks; vec<ParticleArray, config::g_device_cnt> particles; struct { void *base; float *d_maxVel; int *d_tmp; int *activeBlockMarks; int *destinations; int *sources; int *binpbs; void alloc(int maxBlockCnt) { checkCudaErrors(cudaMalloc(&base, sizeof(int) * (maxBlockCnt * 5 + 1))); d_maxVel = (float *)((char *)base + sizeof(int) * maxBlockCnt * 5); d_tmp = (int *)((uintptr_t)base); activeBlockMarks = (int *)((char *)base + sizeof(int) * maxBlockCnt); destinations = (int *)((char *)base + sizeof(int) * maxBlockCnt * 2); sources = (int *)((char *)base + sizeof(int) * maxBlockCnt * 3); binpbs = (int *)((char *)base + sizeof(int) * maxBlockCnt * 4); } void dealloc() { cudaDeviceSynchronize(); checkCudaErrors(cudaFree(base)); } void resize(int maxBlockCnt) { dealloc(); alloc(maxBlockCnt); } } tmps[config::g_device_cnt]; // halo data vec<ivec3 *, config::g_device_cnt, config::g_device_cnt> haloBlockIds; /// data on host static_assert(std::is_same<GridBufferDomain::index_type, int>::value, "block index type is not int"); char rollid; std::size_t curNumActiveBlocks[config::g_device_cnt], curNumActiveBins[config::g_device_cnt], checkedCnts[config::g_device_cnt][2]; vec<float, config::g_device_cnt> maxVels; vec<int, config::g_device_cnt> pbcnt, nbcnt, ebcnt, bincnt; ///< num blocks vec<uint32_t, config::g_device_cnt> pcnt; ///< num particles std::vector<float> durations[config::g_device_cnt + 1]; std::vector<std::array<float, 3>> models[config::g_device_cnt]; Instance<signed_distance_field_> _hostData; /// control bool bRunning; threadsafe_queue<std::function<void(int)>> jobs[config::g_device_cnt]; std::thread ths[config::g_device_cnt]; ///< thread is not trivial std::mutex mut_slave, mut_ctrl; std::condition_variable cv_slave, cv_ctrl; std::atomic_uint idleCnt{0}; /// computations per substep std::vector<std::function<void(int)>> init_tasks; std::vector<std::function<void(int)>> loop_tasks; }; } // namespace mn #endif
the_stack
#include <thrust/reduce.h> #include <cstdio> #include <cstring> #include <fstream> #include <sstream> #include "chrono_fsi/utils/ChUtilsDevice.cuh" #include "chrono_fsi/utils/ChUtilsPrintSph.cuh" namespace chrono { namespace fsi { namespace utils { void PrintToFile(const thrust::device_vector<Real4>& posRadD, const thrust::device_vector<Real3>& velMasD, const thrust::device_vector<Real4>& rhoPresMuD, const thrust::device_vector<Real4>& sr_tau_I_mu_i, const thrust::host_vector<int4>& referenceArray, const thrust::host_vector<int4>& referenceArrayFEA, const std::string& out_dir, bool printToParaview) { thrust::host_vector<Real4> posRadH = posRadD; thrust::host_vector<Real3> velMasH = velMasD; thrust::host_vector<Real4> rhoPresMuH = rhoPresMuD; thrust::host_vector<Real4> h_sr_tau_I_mu_i = sr_tau_I_mu_i; bool short_out = true; // if output with less information, set to true bool haveHelper = (referenceArray[0].z == -3) ? true : false; bool haveGhost = (referenceArray[0].z == -2 || referenceArray[1].z == -2) ? true : false; char fileCounter[5]; static int dumNumChar = -1; dumNumChar++; double eps = 1e-20; sprintf(fileCounter, "%d", dumNumChar); if (haveHelper || haveGhost) { const std::string nameOthers = out_dir + std::string("/others") + std::string(fileCounter) + std::string(".csv"); std::ofstream fileNameOtherParticles; fileNameOtherParticles.open(nameOthers); std::stringstream ssotherParticles; if (printToParaview) { if (short_out) { ssotherParticles << "x,y,z,v_x,v_y,v_z,|U|,rho,pressure\n"; } else { ssotherParticles << "x,y,z,h,v_x,v_y,v_z,|U|,rho(rpx),p(rpy),mu(rpz),sr,tau,I,mu_i,type(rpw)\n"; } } for (size_t i = referenceArray[0].x; i < referenceArray[haveHelper + haveGhost].y; i++) { Real4 rP = rhoPresMuH[i]; if (rP.w > -2) continue; Real4 pos = posRadH[i]; Real3 vel = velMasH[i]; Real4 stIm = h_sr_tau_I_mu_i[i] + mR4(1e-20); Real velMag = length(vel); if (short_out) { ssotherParticles << pos.x << ", " << pos.y << ", " << pos.z << ", " << vel.x + eps << ", " << vel.y + eps << ", " << vel.z + eps << ", " << velMag + eps << ", " << rP.x << ", " << rP.y + eps << std::endl; } else { ssotherParticles << pos.x << ", " << pos.y << ", " << pos.z << ", " << pos.w << ", " << vel.x + eps << ", " << vel.y + eps << ", " << vel.z + eps << ", " << velMag + eps << ", " << rP.x << ", " << rP.y + eps << ", " << rP.z << ", " << stIm.x << ", " << stIm.y << ", " << stIm.z << ", " << stIm.w << ", " << rP.w << std::endl; } } fileNameOtherParticles << ssotherParticles.str(); fileNameOtherParticles.close(); } //***************************************************** const std::string nameFluid = out_dir + std::string("/fluid") + std::string(fileCounter) + std::string(".csv"); std::ofstream fileNameFluidParticles; fileNameFluidParticles.open(nameFluid); std::stringstream ssFluidParticles; if (printToParaview) { if (short_out) { ssFluidParticles << "x,y,z,v_x,v_y,v_z,|U|,rho,pressure\n"; } else { ssFluidParticles << "x,y,z,h,v_x,v_y,v_z,|U|,rho(rpx),p(rpy),mu(rpz),sr,tau,I,mu_i,type(rpw)\n"; } } // int startFluid = haveHelper + haveGhost; for (size_t i = referenceArray[haveHelper + haveGhost].x; i < referenceArray[haveHelper + haveGhost].y; i++) { Real4 rP = rhoPresMuH[i]; if (rP.w != -1) continue; Real4 pos = posRadH[i]; Real3 vel = velMasH[i] + mR3(1e-20); Real4 stIm = h_sr_tau_I_mu_i[i] + mR4(1e-20); Real velMag = length(vel); if (short_out) { ssFluidParticles << pos.x << ", " << pos.y << ", " << pos.z << ", " << vel.x + eps << ", " << vel.y + eps << ", " << vel.z + eps << ", " << velMag + eps << ", " << rP.x << ", " << rP.y + eps << std::endl; } else { ssFluidParticles << pos.x << ", " << pos.y << ", " << pos.z << ", " << pos.w << ", " << vel.x + eps << ", " << vel.y + eps << ", " << vel.z + eps << ", " << velMag + eps << ", " << rP.x << ", " << rP.y + eps << ", " << rP.z << ", " << stIm.x << ", " << stIm.y + eps << ", " << stIm.z << ", " << stIm.w << ", " << rP.w << std::endl; } } fileNameFluidParticles << ssFluidParticles.str(); fileNameFluidParticles.close(); //***************************************************** if (dumNumChar == 0) { const std::string nameFluidBoundaries = out_dir + std::string("/boundary") + std::string(fileCounter) + std::string(".csv"); std::ofstream fileNameFluidBoundaries; fileNameFluidBoundaries.open(nameFluidBoundaries); std::stringstream ssFluidBoundaryParticles; if (printToParaview) { if (short_out) { ssFluidBoundaryParticles << "x,y,z,v_x,v_y,v_z,|U|,rho,pressure\n"; } else { ssFluidBoundaryParticles << "x,y,z,h,v_x,v_y,v_z,|U|,rho(rpx),p(rpy),mu(rpz),sr,tau,I,mu_i,type(rpw)\n"; } } // ssFluidBoundaryParticles.precision(20); for (size_t i = referenceArray[haveHelper + haveGhost + 1].x; i < referenceArray[haveHelper + haveGhost + 1].y; i++) { Real4 rP = rhoPresMuH[i]; if (rP.w != 0) continue; Real4 pos = posRadH[i]; Real3 vel = velMasH[i] + mR3(1e-20); Real4 stIm = h_sr_tau_I_mu_i[i] + mR4(1e-20); Real velMag = length(vel); if (short_out) { ssFluidBoundaryParticles << pos.x << ", " << pos.y << ", " << pos.z << ", " << vel.x + eps << ", " << vel.y + eps << ", " << vel.z + eps << ", " << velMag + eps << ", " << rP.x << ", " << rP.y + eps << std::endl; } else { ssFluidBoundaryParticles << pos.x << ", " << pos.y << ", " << pos.z << ", " << pos.w << ", " << vel.x + eps << ", " << vel.y + eps << ", " << vel.z + eps << ", " << velMag + eps << ", " << rP.x << ", " << rP.y + eps << ", " << rP.z << ", " << stIm.x << ", " << stIm.y + eps << ", " << stIm.z << ", " << stIm.w << ", " << rP.w << std::endl; } } fileNameFluidBoundaries << ssFluidBoundaryParticles.str(); fileNameFluidBoundaries.close(); } //***************************************************** int refSize = (int)referenceArray.size(); if (refSize > haveHelper + haveGhost + 2) { const std::string nameBCE = out_dir + std::string("/BCE_Rigid") + std::string(fileCounter) + std::string(".csv"); std::ofstream fileNameBCE; fileNameBCE.open(nameBCE); std::stringstream ssBCE; // ssFluidBoundaryParticles.precision(20); if (printToParaview) { if (short_out) { ssBCE << "x,y,z,v_x,v_y,v_z,|U|,rho,pressure\n"; } else { ssBCE << "x,y,z,h,v_x,v_y,v_z,|U|,rho(rpx),p(rpy),mu(rpz),sr,tau,I,mu_i,type(rpw)\n"; } } for (size_t i = referenceArray[haveHelper + haveGhost + 2].x; i < referenceArray[refSize - 1].y; i++) { // if (referenceArray[haveHelper + haveGhost + 2].w) Real4 pos = posRadH[i]; Real3 vel = velMasH[i] + mR3(1e-20); Real4 rP = rhoPresMuH[i]; Real velMag = length(vel); Real4 stIm = h_sr_tau_I_mu_i[i] + mR4(1e-20); if (rP.w == 1.0) { if (short_out) { ssBCE << pos.x << ", " << pos.y << ", " << pos.z << ", " << vel.x + eps << ", " << vel.y + eps << ", " << vel.z + eps << ", " << velMag + eps << ", " << rP.x << ", " << rP.y + eps << std::endl; } else { ssBCE << pos.x << ", " << pos.y << ", " << pos.z << ", " << pos.w << ", " << vel.x + eps << ", " << vel.y + eps << ", " << vel.z + eps << ", " << velMag + eps << ", " << rP.x << ", " << rP.y + eps << ", " << rP.z << ", " << stIm.x << ", " << stIm.y + eps << ", " << stIm.z << ", " << stIm.w << ", " << rP.w << std::endl; } } } fileNameBCE << ssBCE.str(); fileNameBCE.close(); } //***************************************************** int refSize_Flex = (int)referenceArrayFEA.size(); if (refSize_Flex > 0) { const std::string nameBCE_Flex = out_dir + std::string("/BCE_Flex") + std::string(fileCounter) + std::string(".csv"); std::ofstream fileNameBCE_Flex; fileNameBCE_Flex.open(nameBCE_Flex); std::stringstream ssBCE_Flex; // ssFluidBoundaryParticles.precision(20); if (printToParaview) { if (short_out) { ssBCE_Flex << "x,y,z,v_x,v_y,v_z,|U|,rho,pressure\n"; } else { ssBCE_Flex << "x,y,z,h,v_x,v_y,v_z,|U|,rho(rpx),p(rpy),mu(rpz),sr,tau,I,mu_i,type(rpw)\n"; } } for (size_t i = referenceArrayFEA[0].x; i < referenceArrayFEA[refSize_Flex - 1].y; i++) { Real4 pos = posRadH[i]; Real3 vel = velMasH[i] + mR3(1e-20); Real4 rP = rhoPresMuH[i]; Real4 stIm = h_sr_tau_I_mu_i[i] + mR4(1e-20); Real velMag = length(vel); if (short_out) { ssBCE_Flex << pos.x << ", " << pos.y << ", " << pos.z << ", " << vel.x + eps << ", " << vel.y + eps << ", " << vel.z + eps << ", " << velMag + eps << ", " << rP.x << ", " << rP.y + eps << std::endl; } else { ssBCE_Flex << pos.x << ", " << pos.y << ", " << pos.z << ", " << pos.w << ", " << vel.x + eps << ", " << vel.y + eps << ", " << vel.z + eps << ", " << velMag + eps << ", " << rP.x << ", " << rP.y + 1e-20 << ", " << rP.z << ", " << stIm.x << ", " << stIm.y + eps << ", " << stIm.z << ", " << stIm.w << ", " << rP.w << std::endl; } } fileNameBCE_Flex << ssBCE_Flex.str(); fileNameBCE_Flex.close(); } //***************************************************** posRadH.clear(); velMasH.clear(); rhoPresMuH.clear(); } void WriteCsvParticlesToFile(thrust::device_vector<Real4>& posRadD, thrust::device_vector<Real3>& velMasD, thrust::device_vector<Real4>& rhoPresMuD, thrust::host_vector<int4>& referenceArray, const std::string& outfilename) { thrust::host_vector<Real4> posRadH = posRadD; thrust::host_vector<Real3> velMasH = velMasD; thrust::host_vector<Real4> rhoPresMuH = rhoPresMuD; double eps = 1e-20; // ====================================================== bool haveHelper = (referenceArray[0].z == -3) ? true : false; bool haveGhost = (referenceArray[0].z == -2 || referenceArray[1].z == -2) ? true : false; // ====================================================== std::ofstream fileNameFluidParticles; fileNameFluidParticles.open(outfilename); std::stringstream ssFluidParticles; ssFluidParticles << "x,y,z,v_x,v_y,v_z,|U|,rho,pressure\n"; for (size_t i = referenceArray[haveHelper + haveGhost].x; i < referenceArray[haveHelper + haveGhost].y; i++) { Real4 rP = rhoPresMuH[i]; if (rP.w != -1) continue; Real4 pos = posRadH[i]; Real3 vel = velMasH[i] + mR3(1e-20); Real velMag = length(vel); ssFluidParticles << pos.x << ", " << pos.y << ", " << pos.z << ", " << vel.x + eps << ", " << vel.y + eps << ", " << vel.z + eps << ", " << velMag + eps << ", " << rP.x << ", " << rP.y + eps << std::endl; } fileNameFluidParticles << ssFluidParticles.str(); fileNameFluidParticles.close(); } void WriteChPFParticlesToFile(thrust::device_vector<Real4>& posRadD, thrust::host_vector<int4>& referenceArray, const std::string& outfilename) { std::ofstream ptFile(outfilename, std::ios::out | std::ios::binary); ParticleFormatWriter pw; thrust::host_vector<Real4> posRadH = posRadD; std::vector<float> pos_x(posRadH.size()); std::vector<float> pos_y(posRadH.size()); std::vector<float> pos_z(posRadH.size()); // ====================================================== bool haveHelper = (referenceArray[0].z == -3) ? true : false; bool haveGhost = (referenceArray[0].z == -2 || referenceArray[1].z == -2) ? true : false; // ====================================================== for (size_t i = referenceArray[haveHelper + haveGhost].x; i < referenceArray[haveHelper + haveGhost].y; i++) { pos_x[i] = (float)posRadH[i].x; pos_y[i] = (float)posRadH[i].y; pos_z[i] = (float)posRadH[i].z; } pw.write(ptFile, ParticleFormatWriter::CompressionType::NONE, pos_x, pos_y, pos_z); } } // end namespace utils } // end namespace fsi } // end namespace chrono
the_stack
/*---Main---*/ int main( int argc, char** argv ) { Arguments args; memset( (void*)&args, 0, sizeof(Arguments) ); args.argc = argc; args.argv_unconsumed = (char**) malloc( argc * sizeof( char* ) ); args.argstring = 0; for( int i=0; i<argc; ++i ) { if ( argv[i] == NULL) { printf("Null command line argument encountered"); return -1; } args.argv_unconsumed[i] = argv[i]; } Dimensions dims_g; /*---dims for entire problem---*/ Dimensions dims; /*---dims for the part on this MPI proc---*/ Sweeper sweeper ; memset( (void*)&sweeper, 0, sizeof(Sweeper) ); int niterations = 0; /*---Define problem specs---*/ dims_g.ncell_x = Arguments_consume_int_or_default( &args, "--ncell_x", 5 ); dims_g.ncell_y = Arguments_consume_int_or_default( &args, "--ncell_y", 5 ); dims_g.ncell_z = Arguments_consume_int_or_default( &args, "--ncell_z", 5 ); dims_g.ne = Arguments_consume_int_or_default( &args, "--ne", 30 ); dims_g.na = Arguments_consume_int_or_default( &args, "--na", 33 ); niterations = Arguments_consume_int_or_default( &args, "--niterations", 1 ); dims_g.nm = NM; if (dims_g.ncell_x <= 0) { printf("Invalid ncell_x supplied."); return -1; } if (dims_g.ncell_y <= 0) { printf("Invalid ncell_y supplied."); return -1; } if (dims_g.ncell_z <= 0) { printf("Invalid ncell_z supplied."); return -1; } if (dims_g.ne <= 0 ) { printf("Invalid ne supplied."); return -1; } if (dims_g.nm <= 0 ) { printf("Invalid nm supplied."); return -1; } if (dims_g.na <= 0 ) { printf("Invalid na supplied."); return -1; } if (niterations < 1 ) { printf("Invalid iteration count supplied."); return -1; } /*---Initialize (local) dimensions - no domain decomposition---*/ dims.ncell_x = dims_g.ncell_x; dims.ncell_y = dims_g.ncell_y; dims.ncell_z = dims_g.ncell_z; dims.ne = dims_g.ne; dims.nm = dims_g.nm; dims.na = dims_g.na; /*---Initialize quantities---*/ size_t n = dims.nm * dims.na * NOCTANT * sizeof(P); P* a_from_m = (P*) malloc (n); P* m_from_a = (P*) malloc (n); /*---First set to zero---*/ for( int octant=0; octant<NOCTANT; ++octant ) for( int im=0; im<dims.nm; ++im ) for( int ia=0; ia<dims.na; ++ia ) a_from_m[A_FROM_M_ADDR(dims.na, im, ia, octant)] = (P)0; for( int octant=0; octant<NOCTANT; ++octant ) for( int i=0; i<dims.na; ++i ) { const int quot = ( i + 1 ) / dims.nm; const int rem = ( i + 1 ) % dims.nm; a_from_m[A_FROM_M_ADDR(dims.na, dims.nm-1, i, octant)] += quot; if( rem != 0 ) { a_from_m[A_FROM_M_ADDR(dims.na, 0, i, octant)] += (P)-1; a_from_m[A_FROM_M_ADDR(dims.na, rem, i, octant)] += (P)1; } } /*---Fill matrix with entries that leave linears unaffected---*/ /*---This is to create a more dense, nontrivial matrix, with additions to the rows that are guaranteed to send affine functions to zero. ---*/ for(int octant=0; octant<NOCTANT; ++octant ) for(int im=0; im<dims.nm-2; ++im ) for(int ia=0; ia<dims.na; ++ia ) { const int randvalue = 21 + ( im + dims.nm * ia ) % 17; a_from_m[A_FROM_M_ADDR(dims.na, im, ia, octant)] += -randvalue; a_from_m[A_FROM_M_ADDR(dims.na, im+1, ia, octant)] += 2*randvalue; a_from_m[A_FROM_M_ADDR(dims.na, im+2, ia, octant)] += -randvalue; } #ifdef DEBUG for (int i = 0; i < n/sizeof(P); i++) printf("a_from_m %d %f\n", i, a_from_m[i]); #endif P* d_a_from_m = NULL; hipMalloc((void**) &d_a_from_m, n); hipMemcpy(d_a_from_m, a_from_m, n, hipMemcpyHostToDevice); // m from a for( int octant=0; octant<NOCTANT; ++octant ) for( int im=0; im<dims.nm; ++im ) for( int ia=0; ia<dims.na; ++ia ) m_from_a[M_FROM_A_ADDR(dims.na, im, ia, octant)] = (P)0; for( int octant=0; octant<NOCTANT; ++octant ) for( int i=0; i<dims.nm; ++i ) { const int quot = ( i + 1 ) / dims.na; const int rem = ( i + 1 ) % dims.na; m_from_a[M_FROM_A_ADDR(dims.na, i, dims.na-1, octant)] += quot; if( rem != 0 ) { m_from_a[M_FROM_A_ADDR(dims.na, i, 0, octant)] += (P)-1; m_from_a[M_FROM_A_ADDR(dims.na, i, rem, octant)] += (P)1; } } /*---Fill matrix with entries that leave linears unaffected---*/ /*---This is to create a more dense, nontrivial matrix, with additions to the rows that are guaranteed to send affine functions to zero. ---*/ for(int octant=0; octant<NOCTANT; ++octant ) for(int im=0; im<dims.nm; ++im ) for(int ia=0; ia<dims.na-2; ++ia ) { const int randvalue = 37 + ( im + dims.nm * ia ) % 19; m_from_a[M_FROM_A_ADDR(dims.na, im, ia, octant)] += -randvalue; m_from_a[M_FROM_A_ADDR(dims.na, im, ia+1, octant)] += 2*randvalue; m_from_a[M_FROM_A_ADDR(dims.na, im, ia+2, octant)] += -randvalue; } /*---Scale matrix to compensate for 8 octants and also angle scale factor---*/ for(int octant=0; octant<NOCTANT; ++octant ) for(int im=0; im<dims.nm; ++im ) for(int ia=0; ia<dims.na; ++ia ) { m_from_a[M_FROM_A_ADDR(dims.na, im, ia, octant)] /= NOCTANT; // scale factor angle m_from_a[M_FROM_A_ADDR(dims.na, im, ia, octant)] /= 1 << ( ia & ( (1<<3) - 1 ) ); } #ifdef DEBUG for (int i = 0; i < n/sizeof(P); i++) printf("m_from_a %d %f\n", i, m_from_a[i]); #endif P* d_m_from_a = NULL; hipMalloc((void**) &d_m_from_a, n); hipMemcpy(d_m_from_a, m_from_a, n, hipMemcpyHostToDevice); n = Dimensions_size_state( dims, NU ) * sizeof(P); P* vi = (P*) malloc( n ); /*---Initialize input state array ---*/ initialize_input_state( vi, dims, NU ); #ifdef DEBUG for (int i = 0; i < n/sizeof(P); i++) printf("vi %d %f\n", i, vi[i]); #endif P* d_vi = NULL; hipMalloc((void**) &d_vi, n); hipMemcpy(d_vi, vi, n, hipMemcpyHostToDevice); P* vo = (P*) malloc( n ); P* d_vo = NULL; hipMalloc((void**) &d_vo, n ); /*---This is not strictly required for the output state array, but might have a performance effect from pre-touching pages */ //for (int i = 0; i < Dimensions_size_state( dims, NU ); i++) vo[i] = (P)0; /*---Initialize sweeper---*/ sweeper.nblock_z = 1; //NOTE: will not work efficiently in parallel. sweeper.noctant_per_block = NOCTANT; sweeper.nblock_octant = 1; sweeper.dims = dims; sweeper.dims_b = dims; sweeper.dims_b.ncell_z = dims.ncell_z / sweeper.nblock_z; // step scheduler sweeper.stepscheduler.nblock_z_ = sweeper.nblock_z; sweeper.stepscheduler.nproc_x_ = 1; //Env_nproc_x( env ); sweeper.stepscheduler.nproc_y_ = 1; //Env_nproc_y( env ); sweeper.stepscheduler.nblock_octant_ = sweeper.nblock_octant; sweeper.stepscheduler.noctant_per_block_ = NOCTANT / sweeper.nblock_octant; //sweeper.faces.noctant_per_block_ = sweeper.noctant_per_block; const int noctant_per_block = sweeper.noctant_per_block; n = Dimensions_size_facexy( sweeper.dims_b, NU, noctant_per_block ) * sizeof(P); P* facexy = (P*) malloc ( n ); P* d_facexy = NULL; hipMalloc((void**)&d_facexy, n); n = Dimensions_size_facexz( sweeper.dims_b, NU, noctant_per_block) * sizeof(P); P* facexz = (P*) malloc ( n ); P* d_facexz = NULL; hipMalloc((void**) &d_facexz, n); n = Dimensions_size_faceyz( sweeper.dims_b, NU, noctant_per_block) * sizeof(P); P* faceyz = (P*) malloc ( n ); P* d_faceyz = NULL; hipMalloc((void**) &d_faceyz, n); n = dims.na * NU * dims.ne * NOCTANT * dims.ncell_x * dims.ncell_y * sizeof(P); P* vslocal = (P*) malloc ( n ); P* d_vslocal = NULL; hipMalloc((void**)&d_vslocal, n ); double t1 = get_time(); for(int iteration=0; iteration<niterations; ++iteration ) { // compute the value of next step const int nstep = StepScheduler_nstep( &(sweeper.stepscheduler) ); #ifdef DEBUG printf("iteration %d next step = %d\n", iteration, nstep); #endif for (int step = 0; step < nstep; ++step) { Dimensions dims = sweeper.dims; Dimensions dims_b = sweeper.dims_b; int dims_b_ncell_x = dims_b.ncell_x; int dims_b_ncell_y = dims_b.ncell_y; int dims_b_ncell_z = dims_b.ncell_z; int dims_ncell_z = dims.ncell_z; int dims_b_ne = dims_b.ne; int dims_b_na = dims_b.na; //int dims_b_nm = dims_b.nm; #ifdef DEBUG int facexy_size = dims_b.ncell_x * dims_b.ncell_y * dims_b.ne * dims_b.na * NU * NOCTANT; int facexz_size = dims_b.ncell_x * dims_b.ncell_z * dims_b.ne * dims_b.na * NU * NOCTANT; int faceyz_size = dims_b.ncell_y * dims_b.ncell_z * dims_b.ne * dims_b.na * NU * NOCTANT; #endif int v_size = sizeof(P) * dims.ncell_x * dims.ncell_y * dims.ncell_z * dims.ne * dims.nm * NU; //int a_from_m_size = sizeof(P) * dims_b.nm * dims_b.na * NOCTANT; //int m_from_a_size = sizeof(P) * dims_b.nm * dims_b.na * NOCTANT; //int vs_local_size = sizeof(P) * dims_b.na * NU * dims_b.ne * NOCTANT * dims_b.ncell_x * dims_b.ncell_y; int v_b_size = dims_b.ncell_x * dims_b.ncell_y * dims_b.ncell_z * dims_b.ne * dims_b.nm * NU; StepInfoAll stepinfoall; /*---But only use noctant_per_block values---*/ for(int octant_in_block=0; octant_in_block<noctant_per_block; ++octant_in_block ) { stepinfoall.stepinfo[octant_in_block] = StepScheduler_stepinfo( &(sweeper.stepscheduler), step, octant_in_block, 0, //proc_x, 0 //proc_y ); } const int ix_base = 0; const int iy_base = 0; const int num_wavefronts = dims_b_ncell_z + dims_b_ncell_y + dims_b_ncell_x - 2; const int is_first_step = 0 == step; const int is_last_step = nstep - 1 == step; dim3 xy_grid((dims_b_ncell_x+7)/8, (dims_b_ncell_y+7)/8, (NOCTANT+3)/4); dim3 xy_block(8, 8, 4); dim3 xz_grid((dims_b_ncell_x+7)/8, (dims_b_ncell_z+7)/8, (NOCTANT+3)/4); dim3 xz_block(8, 8, 4); dim3 yz_grid((dims_b_ncell_y+7)/8, (dims_b_ncell_z+7)/8, (NOCTANT+3)/4); dim3 yz_block(8, 8, 4); dim3 wave_grid((NOCTANT+3)/4, (dims_b_ne+63)/64); dim3 wave_block(4, 64); if (is_first_step) { hipMemset(d_vo, 0, v_size); // must reset output for each iteration hipLaunchKernelGGL(init_facexy, dim3(xy_grid), dim3(xy_block), 0, 0, ix_base, iy_base, dims_b_ne, dims_b_na, dims_b_ncell_x, dims_b_ncell_y, dims_b_ncell_z, dims_ncell_z, d_facexy); #ifdef DEBUG hipMemcpy(facexy, d_facexy, facexy_size * sizeof(P), hipMemcpyDeviceToHost); for (int i = 0; i < facexy_size; i++) printf("facexy: %d %f\n", i, facexy[i]); #endif } hipLaunchKernelGGL(init_facexz, dim3(xz_grid), dim3(xz_block), 0, 0, ix_base, iy_base, dims_b_ne, dims_b_na, dims_b_ncell_x, dims_b_ncell_y, dims_ncell_z, 1, //proc_x_min, 1, //proc_x_max, stepinfoall, d_facexz); #ifdef DEBUG hipMemcpy(facexz, d_facexz, facexz_size * sizeof(P), hipMemcpyDeviceToHost); for (int i = 0; i < facexz_size; i++) printf("facexz: %d %f\n", i, facexz[i]); #endif hipLaunchKernelGGL(init_faceyz, dim3(yz_grid), dim3(yz_block), 0, 0, ix_base, iy_base, dims_b_ne, dims_b_na, dims_b_ncell_x, dims_b_ncell_y, dims_ncell_z, 1, //proc_y_min, 1, //proc_y_max, stepinfoall, d_faceyz); #ifdef DEBUG hipMemcpy(faceyz, d_faceyz, faceyz_size * sizeof(P), hipMemcpyDeviceToHost); for (int i = 0; i < faceyz_size; i++) printf("faceyz: %d %f\n", i, faceyz[i]); #endif hipLaunchKernelGGL(wavefronts, dim3(wave_grid), dim3(wave_block), 0, 0, num_wavefronts, ix_base, iy_base, v_b_size, noctant_per_block, dims_b, stepinfoall, d_facexy, d_facexz, d_faceyz, d_a_from_m, d_m_from_a, d_vi, d_vo, d_vslocal ); if (is_last_step) { hipMemcpy(vo, d_vo, v_size, hipMemcpyDeviceToHost); #ifdef DEBUG for (int i = 0; i < v_size/sizeof(P); i++) printf("vo %d %f\n", i, vo[i]); #endif } } // step P* tmp = vo; vo = vi; vi = tmp; } double t2 = get_time(); double time = t2 - t1; // Verification (input and output vectors are equal) P normsq = (P)0; P normsqdiff = (P)0; for (size_t i = 0; i < Dimensions_size_state( dims, NU ); i++) { normsq += vo[i] * vo[i]; normsqdiff += (vi[i] - vo[i]) * (vi[i] - vo[i]); } double flops = niterations * ( Dimensions_size_state( dims, NU ) * NOCTANT * 2. * dims.na + Dimensions_size_state_angles( dims, NU ) * Quantities_flops_per_solve( dims ) + Dimensions_size_state( dims, NU ) * NOCTANT * 2. * dims.na ); double floprate = (time <= 0) ? 0 : flops / time / 1e9; printf( "Normsq result: %.8e diff: %.3e %s time: %.3f GF/s: %.3f\n", normsq, normsqdiff, normsqdiff== (P)0 ? "PASS" : "FAIL", time, floprate ); /*---Deallocations---*/ Arguments_destroy( &args ); hipFree(d_vo); hipFree(d_vi); hipFree(d_a_from_m); hipFree(d_m_from_a); hipFree(d_facexy); hipFree(d_facexz); hipFree(d_faceyz); hipFree(d_vslocal); free(vi); free(vo); free(m_from_a); free(a_from_m); free(facexy); free(facexz); free(faceyz); free(vslocal); return 0; } /*---main---*/
the_stack
Bicubic filtering See GPU Gems 2: "Fast Third-Order Texture Filtering", Sigg & Hadwiger https://developer.nvidia.com/gpugems/gpugems2/part-iii-high-quality-rendering/chapter-20-fast-third-order-texture-filtering Reformulation thanks to Keenan Crane */ #ifndef _BICUBICTEXTURE_KERNEL_CUH_ #define _BICUBICTEXTURE_KERNEL_CUH_ enum Mode { MODE_NEAREST, MODE_BILINEAR, MODE_BICUBIC, MODE_FAST_BICUBIC, MODE_CATROM }; cudaTextureObject_t texObjPoint, texObjLinear; // w0, w1, w2, and w3 are the four cubic B-spline basis functions __host__ __device__ float w0(float a) { // return (1.0f/6.0f)*(-a*a*a + 3.0f*a*a - 3.0f*a + 1.0f); return (1.0f / 6.0f) * (a * (a * (-a + 3.0f) - 3.0f) + 1.0f); // optimized } __host__ __device__ float w1(float a) { // return (1.0f/6.0f)*(3.0f*a*a*a - 6.0f*a*a + 4.0f); return (1.0f / 6.0f) * (a * a * (3.0f * a - 6.0f) + 4.0f); } __host__ __device__ float w2(float a) { // return (1.0f/6.0f)*(-3.0f*a*a*a + 3.0f*a*a + 3.0f*a + 1.0f); return (1.0f / 6.0f) * (a * (a * (-3.0f * a + 3.0f) + 3.0f) + 1.0f); } __host__ __device__ float w3(float a) { return (1.0f / 6.0f) * (a * a * a); } // g0 and g1 are the two amplitude functions __device__ float g0(float a) { return w0(a) + w1(a); } __device__ float g1(float a) { return w2(a) + w3(a); } // h0 and h1 are the two offset functions __device__ float h0(float a) { // note +0.5 offset to compensate for CUDA linear filtering convention return -1.0f + w1(a) / (w0(a) + w1(a)) + 0.5f; } __device__ float h1(float a) { return 1.0f + w3(a) / (w2(a) + w3(a)) + 0.5f; } // filter 4 values using cubic splines template <class T> __device__ T cubicFilter(float x, T c0, T c1, T c2, T c3) { T r; r = c0 * w0(x); r += c1 * w1(x); r += c2 * w2(x); r += c3 * w3(x); return r; } // slow but precise bicubic lookup using 16 texture lookups template <class T, class R> // texture data type, return type __device__ R tex2DBicubic(const cudaTextureObject_t tex, float x, float y) { x -= 0.5f; y -= 0.5f; float px = floorf(x); float py = floorf(y); float fx = x - px; float fy = y - py; return cubicFilter<R>( fy, cubicFilter<R>( fx, tex2D<R>(tex, px - 1, py - 1), tex2D<R>(tex, px, py - 1), tex2D<R>(tex, px + 1, py - 1), tex2D<R>(tex, px + 2, py - 1)), cubicFilter<R>(fx, tex2D<R>(tex, px - 1, py), tex2D<R>(tex, px, py), tex2D<R>(tex, px + 1, py), tex2D<R>(tex, px + 2, py)), cubicFilter<R>(fx, tex2D<R>(tex, px - 1, py + 1), tex2D<R>(tex, px, py + 1), tex2D<R>(tex, px + 1, py + 1), tex2D<R>(tex, px + 2, py + 1)), cubicFilter<R>(fx, tex2D<R>(tex, px - 1, py + 2), tex2D<R>(tex, px, py + 2), tex2D<R>(tex, px + 1, py + 2), tex2D<R>(tex, px + 2, py + 2))); } // fast bicubic texture lookup using 4 bilinear lookups // assumes texture is set to non-normalized coordinates, point sampling template <class T, class R> // texture data type, return type __device__ R tex2DFastBicubic(const cudaTextureObject_t tex, float x, float y) { x -= 0.5f; y -= 0.5f; float px = floorf(x); float py = floorf(y); float fx = x - px; float fy = y - py; // note: we could store these functions in a lookup table texture, but maths // is cheap float g0x = g0(fx); float g1x = g1(fx); float h0x = h0(fx); float h1x = h1(fx); float h0y = h0(fy); float h1y = h1(fy); R r = g0(fy) * (g0x * tex2D<R>(tex, px + h0x, py + h0y) + g1x * tex2D<R>(tex, px + h1x, py + h0y)) + g1(fy) * (g0x * tex2D<R>(tex, px + h0x, py + h1y) + g1x * tex2D<R>(tex, px + h1x, py + h1y)); return r; } // higher-precision 2D bilinear lookup template <class T, class R> // texture data type, return type __device__ R tex2DBilinear(const cudaTextureObject_t tex, float x, float y) { x -= 0.5f; y -= 0.5f; float px = floorf(x); // integer position float py = floorf(y); float fx = x - px; // fractional position float fy = y - py; px += 0.5f; py += 0.5f; return lerp(lerp(tex2D<R>(tex, px, py), tex2D<R>(tex, px + 1.0f, py), fx), lerp(tex2D<R>(tex, px, py + 1.0f), tex2D<R>(tex, px + 1.0f, py + 1.0f), fx), fy); } #if !defined(__CUDA_ARCH__) || __CUDA_ARCH__ >= 200 /* bilinear 2D texture lookup using tex2Dgather function - tex2Dgather() returns the four neighbouring samples in a single texture lookup - it is only supported on the Fermi architecture - you can select which component to fetch using the "comp" parameter - it can be used to efficiently implement custom texture filters The samples are returned in a 4-vector in the following order: x: (0, 1) y: (1, 1) z: (1, 0) w: (0, 0) */ template <class T, class R> // texture data type, return type __device__ float tex2DBilinearGather(const cudaTextureObject_t tex, float x, float y, int comp = 0) { x -= 0.5f; y -= 0.5f; float px = floorf(x); // integer position float py = floorf(y); float fx = x - px; // fractional position float fy = y - py; R samples = tex2Dgather<R>(tex, px + 0.5f, py + 0.5f, comp); return lerp(lerp((float)samples.w, (float)samples.z, fx), lerp((float)samples.x, (float)samples.y, fx), fy); } #endif // Catmull-Rom interpolation __host__ __device__ float catrom_w0(float a) { // return -0.5f*a + a*a - 0.5f*a*a*a; return a * (-0.5f + a * (1.0f - 0.5f * a)); } __host__ __device__ float catrom_w1(float a) { // return 1.0f - 2.5f*a*a + 1.5f*a*a*a; return 1.0f + a * a * (-2.5f + 1.5f * a); } __host__ __device__ float catrom_w2(float a) { // return 0.5f*a + 2.0f*a*a - 1.5f*a*a*a; return a * (0.5f + a * (2.0f - 1.5f * a)); } __host__ __device__ float catrom_w3(float a) { // return -0.5f*a*a + 0.5f*a*a*a; return a * a * (-0.5f + 0.5f * a); } template <class T> __device__ T catRomFilter(float x, T c0, T c1, T c2, T c3) { T r; r = c0 * catrom_w0(x); r += c1 * catrom_w1(x); r += c2 * catrom_w2(x); r += c3 * catrom_w3(x); return r; } // Note - can't use bilinear trick here because of negative lobes template <class T, class R> // texture data type, return type __device__ R tex2DCatRom(const cudaTextureObject_t tex, float x, float y) { x -= 0.5f; y -= 0.5f; float px = floorf(x); float py = floorf(y); float fx = x - px; float fy = y - py; return catRomFilter<R>( fy, catRomFilter<R>( fx, tex2D<R>(tex, px - 1, py - 1), tex2D<R>(tex, px, py - 1), tex2D<R>(tex, px + 1, py - 1), tex2D<R>(tex, px + 2, py - 1)), catRomFilter<R>(fx, tex2D<R>(tex, px - 1, py), tex2D<R>(tex, px, py), tex2D<R>(tex, px + 1, py), tex2D<R>(tex, px + 2, py)), catRomFilter<R>(fx, tex2D<R>(tex, px - 1, py + 1), tex2D<R>(tex, px, py + 1), tex2D<R>(tex, px + 1, py + 1), tex2D<R>(tex, px + 2, py + 1)), catRomFilter<R>(fx, tex2D<R>(tex, px - 1, py + 2), tex2D<R>(tex, px, py + 2), tex2D<R>(tex, px + 1, py + 2), tex2D<R>(tex, px + 2, py + 2))); } // test functions // render image using normal bilinear texture lookup __global__ void d_render(uchar4 *d_output, uint width, uint height, float tx, float ty, float scale, float cx, float cy, cudaTextureObject_t texObj) { uint x = __umul24(blockIdx.x, blockDim.x) + threadIdx.x; uint y = __umul24(blockIdx.y, blockDim.y) + threadIdx.y; uint i = __umul24(y, width) + x; float u = (x - cx) * scale + cx + tx; float v = (y - cy) * scale + cy + ty; if ((x < width) && (y < height)) { // write output color float c = tex2D<float>(texObj, u, v); // float c = tex2DBilinear<uchar, float>(tex, u, v); // float c = tex2DBilinearGather<uchar, uchar4>(tex2, u, v, 0) / 255.0f; d_output[i] = make_uchar4(c * 0xff, c * 0xff, c * 0xff, 0); } } // render image using bicubic texture lookup __global__ void d_renderBicubic(uchar4 *d_output, uint width, uint height, float tx, float ty, float scale, float cx, float cy, cudaTextureObject_t texObj) { uint x = __umul24(blockIdx.x, blockDim.x) + threadIdx.x; uint y = __umul24(blockIdx.y, blockDim.y) + threadIdx.y; uint i = __umul24(y, width) + x; float u = (x - cx) * scale + cx + tx; float v = (y - cy) * scale + cy + ty; if ((x < width) && (y < height)) { // write output color float c = tex2DBicubic<uchar, float>(texObj, u, v); d_output[i] = make_uchar4(c * 0xff, c * 0xff, c * 0xff, 0); } } // render image using fast bicubic texture lookup __global__ void d_renderFastBicubic(uchar4 *d_output, uint width, uint height, float tx, float ty, float scale, float cx, float cy, cudaTextureObject_t texObj) { uint x = __umul24(blockIdx.x, blockDim.x) + threadIdx.x; uint y = __umul24(blockIdx.y, blockDim.y) + threadIdx.y; uint i = __umul24(y, width) + x; float u = (x - cx) * scale + cx + tx; float v = (y - cy) * scale + cy + ty; if ((x < width) && (y < height)) { // write output color float c = tex2DFastBicubic<uchar, float>(texObj, u, v); d_output[i] = make_uchar4(c * 0xff, c * 0xff, c * 0xff, 0); } } // render image using Catmull-Rom texture lookup __global__ void d_renderCatRom(uchar4 *d_output, uint width, uint height, float tx, float ty, float scale, float cx, float cy, cudaTextureObject_t texObj) { uint x = __umul24(blockIdx.x, blockDim.x) + threadIdx.x; uint y = __umul24(blockIdx.y, blockDim.y) + threadIdx.y; uint i = __umul24(y, width) + x; float u = (x - cx) * scale + cx + tx; float v = (y - cy) * scale + cy + ty; if ((x < width) && (y < height)) { // write output color float c = tex2DCatRom<uchar, float>(texObj, u, v); d_output[i] = make_uchar4(c * 0xff, c * 0xff, c * 0xff, 0); } } #endif // _BICUBICTEXTURE_KERNEL_CUH_
the_stack
#include <thrust/device_ptr.h> #include <thrust/scan.h> #include <cuComplex.h> #include "../cuspreadinterp.h" #include "../memtransfer.h" #include "../precision_independent.h" using namespace std; int CUFINUFFT_SPREAD3D(int nf1, int nf2, int nf3, CUCPX* d_fw, int M, FLT *d_kx, FLT *d_ky, FLT* d_kz, CUCPX *d_c, CUFINUFFT_PLAN d_plan) /* This c function is written for only doing 3D spreading. See test/spread3d_test.cu for usage. Melody Shih 07/25/19 not allocate,transfer and free memories on gpu. Shih 09/24/20 */ { cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); int ier; d_plan->kx = d_kx; d_plan->ky = d_ky; d_plan->kz = d_kz; d_plan->c = d_c; d_plan->fw = d_fw; //ier = setup_spreader_for_nufft(d_plan->spopts, eps, d_plan->opts); d_plan->nf1 = nf1; d_plan->nf2 = nf2; d_plan->nf3 = nf3; d_plan->M = M; d_plan->maxbatchsize = 1; cudaEventRecord(start); ier = ALLOCGPUMEM3D_PLAN(d_plan); ier = ALLOCGPUMEM3D_NUPTS(d_plan); cudaEventRecord(start); if(d_plan->opts.gpu_method == 1){ ier = CUSPREAD3D_NUPTSDRIVEN_PROP(nf1,nf2,nf3,M,d_plan); if(ier != 0 ){ printf("error: cuspread3d_nuptsdriven_prop, method(%d)\n", d_plan->opts.gpu_method); return ier; } } if(d_plan->opts.gpu_method == 2){ ier = CUSPREAD3D_SUBPROB_PROP(nf1,nf2,nf3,M,d_plan); if(ier != 0 ){ printf("error: cuspread3d_subprob_prop, method(%d)\n", d_plan->opts.gpu_method); return ier; } } if(d_plan->opts.gpu_method == 4){ ier = CUSPREAD3D_BLOCKGATHER_PROP(nf1,nf2,nf3,M,d_plan); if(ier != 0 ){ printf("error: cuspread3d_blockgather_prop, method(%d)\n", d_plan->opts.gpu_method); return ier; } } #ifdef TIME float milliseconds; cudaEventRecord(stop); cudaEventSynchronize(stop); cudaEventElapsedTime(&milliseconds, start, stop); printf("[time ] Obtain Spread Prop\t %.3g ms\n", milliseconds); #endif cudaEventRecord(start); ier = CUSPREAD3D(d_plan, 1); #ifdef TIME cudaEventRecord(stop); cudaEventSynchronize(stop); cudaEventElapsedTime(&milliseconds, start, stop); printf("[time ] Spread (%d)\t\t %.3g ms\n", d_plan->opts.gpu_method, milliseconds); #endif cudaEventRecord(start); FREEGPUMEMORY3D(d_plan); #ifdef TIME cudaEventRecord(stop); cudaEventSynchronize(stop); cudaEventElapsedTime(&milliseconds, start, stop); printf("[time ] Free GPU memory\t %.3g ms\n", milliseconds); #endif return ier; } int CUSPREAD3D(CUFINUFFT_PLAN d_plan, int blksize) /* A wrapper for different spreading methods. Methods available: (1) Non-uniform points driven (2) Subproblem (4) Block gather Melody Shih 07/25/19 */ { int nf1 = d_plan->nf1; int nf2 = d_plan->nf2; int nf3 = d_plan->nf3; int M = d_plan->M; cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); int ier = 0; switch(d_plan->opts.gpu_method) { case 1: { cudaEventRecord(start); ier = CUSPREAD3D_NUPTSDRIVEN(nf1, nf2, nf3, M, d_plan, blksize); if(ier != 0 ){ cout<<"error: cnufftspread3d_gpu_subprob"<<endl; return 1; } } break; case 2: { cudaEventRecord(start); ier = CUSPREAD3D_SUBPROB(nf1, nf2, nf3, M, d_plan, blksize); if(ier != 0 ){ cout<<"error: cnufftspread3d_gpu_subprob"<<endl; return 1; } } break; case 4: { cudaEventRecord(start); ier = CUSPREAD3D_BLOCKGATHER(nf1, nf2, nf3, M, d_plan, blksize); if(ier != 0 ){ cout<<"error: cnufftspread3d_gpu_subprob"<<endl; return 1; } } break; default: cerr<<"error: incorrect method, should be 1,2,4"<<endl; return 2; } return ier; } int CUSPREAD3D_NUPTSDRIVEN_PROP(int nf1, int nf2, int nf3, int M, CUFINUFFT_PLAN d_plan) { cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); if(d_plan->opts.gpu_sort){ int bin_size_x=d_plan->opts.gpu_binsizex; int bin_size_y=d_plan->opts.gpu_binsizey; int bin_size_z=d_plan->opts.gpu_binsizez; if(bin_size_x < 0 || bin_size_y < 0 || bin_size_z < 0){ cout<<"error: invalid binsize (binsizex, binsizey, binsizez) = ("; cout<<bin_size_x<<","<<bin_size_y<<","<<bin_size_z<<")"<<endl; return 1; } int numbins[3]; numbins[0] = ceil((FLT) nf1/bin_size_x); numbins[1] = ceil((FLT) nf2/bin_size_y); numbins[2] = ceil((FLT) nf3/bin_size_z); #ifdef DEBUG cout<<"[debug ] Dividing the uniform grids to bin size[" <<d_plan->opts.gpu_binsizex<<"x"<<d_plan->opts.gpu_binsizey<<"x"<< d_plan->opts.gpu_binsizez<<"]"<<endl; cout<<"[debug ] numbins = ["<<numbins[0]<<"x"<<numbins[1]<<"x"<<numbins[2] <<"]"<<endl; #endif FLT* d_kx = d_plan->kx; FLT* d_ky = d_plan->ky; FLT* d_kz = d_plan->kz; #ifdef DEBUG FLT *h_kx; FLT *h_ky; FLT *h_kz; h_kx = (FLT*)malloc(M*sizeof(FLT)); h_ky = (FLT*)malloc(M*sizeof(FLT)); h_kz = (FLT*)malloc(M*sizeof(FLT)); checkCudaErrors(cudaMemcpy(h_kx,d_kx,M*sizeof(FLT), cudaMemcpyDeviceToHost)); checkCudaErrors(cudaMemcpy(h_ky,d_ky,M*sizeof(FLT), cudaMemcpyDeviceToHost)); checkCudaErrors(cudaMemcpy(h_kz,d_kz,M*sizeof(FLT), cudaMemcpyDeviceToHost)); for(int i=0; i<10; i++){ cout<<"[debug ] "; cout <<"("<<setw(3)<<h_kx[i]<<","<<setw(3)<<h_ky[i]<<","<<setw(3) <<h_kz[i]<<")"<<endl; } #endif int *d_binsize = d_plan->binsize; int *d_binstartpts = d_plan->binstartpts; int *d_sortidx = d_plan->sortidx; int *d_idxnupts = d_plan->idxnupts; int pirange = d_plan->spopts.pirange; cudaEventRecord(start); checkCudaErrors(cudaMemset(d_binsize,0,numbins[0]*numbins[1]*numbins[2]* sizeof(int))); CalcBinSize_noghost_3d<<<(M+1024-1)/1024, 1024>>>(M,nf1,nf2,nf3, bin_size_x,bin_size_y,bin_size_z,numbins[0],numbins[1],numbins[2], d_binsize,d_kx,d_ky,d_kz,d_sortidx,pirange); #ifdef SPREADTIME float milliseconds = 0; cudaEventRecord(stop); cudaEventSynchronize(stop); cudaEventElapsedTime(&milliseconds, start, stop); printf("[time ] \tKernel CalcBinSize_noghost_3d \t\t%.3g ms\n", milliseconds); #endif #ifdef DEBUG int *h_binsize;// For debug h_binsize = (int*)malloc(numbins[0]*numbins[1]*numbins[2]* sizeof(int)); checkCudaErrors(cudaMemcpy(h_binsize,d_binsize,numbins[0]*numbins[1]* numbins[2]*sizeof(int),cudaMemcpyDeviceToHost)); cout<<"[debug ] bin size:"<<endl; for(int k=0; k<numbins[2]; k++){ for(int j=0; j<numbins[1]; j++){ cout<<"[debug ] "; for(int i=0; i<numbins[0]; i++){ if(i!=0) cout<<" "; cout <<" bin["<<setw(1)<<i<<","<<setw(1)<<j<<","<<setw(1) <<k<<"]="<<h_binsize[i+j*numbins[0]+k*numbins[0]* numbins[1]]; } cout<<endl; } } free(h_binsize); cout<<"[debug ] ------------------------------------------------"<<endl; #endif #ifdef DEBUG int *h_sortidx; h_sortidx = (int*)malloc(M*sizeof(int)); checkCudaErrors(cudaMemcpy(h_sortidx,d_sortidx,M*sizeof(int), cudaMemcpyDeviceToHost)); for(int i=0; i<M; i++){ cout<<"[debug ] "; cout <<"point["<<setw(3)<<i<<"]="<<setw(3)<<h_sortidx[i]<<endl; } #endif cudaEventRecord(start); int n=numbins[0]*numbins[1]*numbins[2]; thrust::device_ptr<int> d_ptr(d_binsize); thrust::device_ptr<int> d_result(d_binstartpts); thrust::exclusive_scan(d_ptr, d_ptr + n, d_result); #ifdef SPREADTIME cudaEventRecord(stop); cudaEventSynchronize(stop); cudaEventElapsedTime(&milliseconds, start, stop); printf("[time ] \tKernel BinStartPts_3d \t\t\t%.3g ms\n", milliseconds); #endif #ifdef DEBUG int *h_binstartpts; h_binstartpts = (int*)malloc((numbins[0]*numbins[1]*numbins[2])* sizeof(int)); checkCudaErrors(cudaMemcpy(h_binstartpts,d_binstartpts,(numbins[0]* numbins[1]*numbins[2])*sizeof(int),cudaMemcpyDeviceToHost)); cout<<"[debug ] Result of scan bin_size array:"<<endl; for(int k=0; k<numbins[2]; k++){ for(int j=0; j<numbins[1]; j++){ cout<<"[debug ] "; for(int i=0; i<numbins[0]; i++){ if(i!=0) cout<<" "; cout <<" bin["<<setw(1)<<i<<","<<setw(1)<<j<<","<<setw(1)<< k<<"]="<<h_binstartpts[i+j*numbins[0]+k*numbins[0]*numbins[1]]; } cout<<endl; } } free(h_binstartpts); cout<<"[debug ] ------------------------------------------------"<<endl; #endif cudaEventRecord(start); CalcInvertofGlobalSortIdx_3d<<<(M+1024-1)/1024,1024>>>(M,bin_size_x, bin_size_y,bin_size_z,numbins[0],numbins[1],numbins[2],d_binstartpts, d_sortidx,d_kx,d_ky,d_kz,d_idxnupts, pirange, nf1, nf2, nf3); #ifdef SPREADTIME cudaEventRecord(stop); cudaEventSynchronize(stop); cudaEventElapsedTime(&milliseconds, start, stop); printf("[time ] \tKernel CalcInvertofGlobalSortIdx_3d \t%.3g ms\n", milliseconds); #endif #ifdef DEBUG int *h_idxnupts; h_idxnupts = (int*)malloc(M*sizeof(int)); checkCudaErrors(cudaMemcpy(h_idxnupts,d_idxnupts,M*sizeof(int), cudaMemcpyDeviceToHost)); for (int i=0; i<10; i++){ cout <<"[debug ] idx="<< h_idxnupts[i]<<endl; } free(h_idxnupts); #endif }else{ int *d_idxnupts = d_plan->idxnupts; cudaEventRecord(start); TrivialGlobalSortIdx_3d<<<(M+1024-1)/1024, 1024>>>(M,d_idxnupts); #ifdef SPREADTIME float milliseconds = 0; cudaEventRecord(stop); cudaEventSynchronize(stop); cudaEventElapsedTime(&milliseconds, start, stop); printf("[time ] \tKernel TrivialGlobalSortIDx_3d \t\t%.3g ms\n", milliseconds); #endif } return 0; } int CUSPREAD3D_NUPTSDRIVEN(int nf1, int nf2, int nf3, int M, CUFINUFFT_PLAN d_plan, int blksize) { cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); dim3 threadsPerBlock; dim3 blocks; int ns=d_plan->spopts.nspread; // psi's support in terms of number of cells FLT sigma=d_plan->spopts.upsampfac; FLT es_c=d_plan->spopts.ES_c; FLT es_beta=d_plan->spopts.ES_beta; int pirange=d_plan->spopts.pirange; int* d_idxnupts = d_plan->idxnupts; FLT* d_kx = d_plan->kx; FLT* d_ky = d_plan->ky; FLT* d_kz = d_plan->kz; CUCPX* d_c = d_plan->c; CUCPX* d_fw = d_plan->fw; threadsPerBlock.x = 16; threadsPerBlock.y = 1; blocks.x = (M + threadsPerBlock.x - 1)/threadsPerBlock.x; blocks.y = 1; cudaEventRecord(start); if(d_plan->opts.gpu_kerevalmeth==1){ for(int t=0; t<blksize; t++){ Spread_3d_NUptsdriven_Horner<<<blocks, threadsPerBlock>>>(d_kx, d_ky, d_kz, d_c+t*M, d_fw+t*nf1*nf2*nf3, M, ns, nf1, nf2, nf3, sigma, d_idxnupts,pirange); } }else{ for(int t=0; t<blksize; t++){ Spread_3d_NUptsdriven<<<blocks, threadsPerBlock>>>(d_kx, d_ky, d_kz, d_c+t*M, d_fw+t*nf1*nf2*nf3, M, ns, nf1, nf2, nf3, es_c, es_beta, d_idxnupts,pirange); } } #ifdef SPREADTIME float milliseconds = 0; cudaEventRecord(stop); cudaEventSynchronize(stop); cudaEventElapsedTime(&milliseconds, start, stop); printf("[time ] \tKernel Spread_3d_NUptsdriven (%d)\t%.3g ms\n", milliseconds,d_plan->opts.gpu_kerevalmeth); #endif return 0; } int CUSPREAD3D_BLOCKGATHER_PROP(int nf1, int nf2, int nf3, int M, CUFINUFFT_PLAN d_plan) { cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); dim3 threadsPerBlock; dim3 blocks; int pirange = d_plan->spopts.pirange; int maxsubprobsize=d_plan->opts.gpu_maxsubprobsize; int o_bin_size_x = d_plan->opts.gpu_obinsizex; int o_bin_size_y = d_plan->opts.gpu_obinsizey; int o_bin_size_z = d_plan->opts.gpu_obinsizez; int numobins[3]; if (nf1 % o_bin_size_x != 0 || nf2 % o_bin_size_y != 0 || nf3 % o_bin_size_z != 0){ cout<<"error: mod(nf1, opts.gpu_obinsizex) != 0"<<endl; cout<<" mod(nf2, opts.gpu_obinsizey) != 0"<<endl; cout<<" mod(nf3, opts.gpu_obinsizez) != 0"<<endl; cout<<"error: (nf1, nf2, nf3) = ("<<nf1<<", "<<nf2<<", "<<nf3<<")"<<endl; cout<<"error: (obinsizex, obinsizey, obinsizez) = (" <<o_bin_size_x<<", "<<o_bin_size_y<<", "<<o_bin_size_z<<")"<<endl; return 1; } numobins[0] = ceil((FLT) nf1/o_bin_size_x); numobins[1] = ceil((FLT) nf2/o_bin_size_y); numobins[2] = ceil((FLT) nf3/o_bin_size_z); int bin_size_x=d_plan->opts.gpu_binsizex; int bin_size_y=d_plan->opts.gpu_binsizey; int bin_size_z=d_plan->opts.gpu_binsizez; if (o_bin_size_x % bin_size_x != 0 || o_bin_size_y % bin_size_y != 0 || o_bin_size_z % bin_size_z != 0){ cout<<"error: mod(ops.gpu_obinsizex, opts.gpu_binsizex) != 0"<<endl; cout<<" mod(ops.gpu_obinsizey, opts.gpu_binsizey) != 0"<<endl; cout<<" mod(ops.gpu_obinsizez, opts.gpu_binsizez) != 0"<<endl; cout<<"error: (binsizex, binsizey, binsizez) = (" <<bin_size_x<<", "<<bin_size_y<<", "<<bin_size_z<<")"<<endl; cout<<"error: (obinsizex, obinsizey, obinsizez) = (" <<o_bin_size_x<<", "<<o_bin_size_y<<", "<<o_bin_size_z<<")"<<endl; return 1; } int binsperobinx, binsperobiny, binsperobinz; int numbins[3]; binsperobinx = o_bin_size_x/bin_size_x+2; binsperobiny = o_bin_size_y/bin_size_y+2; binsperobinz = o_bin_size_z/bin_size_z+2; numbins[0] = numobins[0]*(binsperobinx); numbins[1] = numobins[1]*(binsperobiny); numbins[2] = numobins[2]*(binsperobinz); #ifdef DEBUG cout<<"[debug ] Dividing the uniform grids to bin size[" <<d_plan->opts.gpu_binsizex<<"x"<<d_plan->opts.gpu_binsizey<<"x"<< d_plan->opts.gpu_binsizez<<"]"<<endl; cout<<"[debug ] numobins = ["<<numobins[0]<<"x"<<numobins[1]<<"x"<< numobins[2]<<"]"<<endl; cout<<"[debug ] numbins = ["<<numbins[0]<<"x"<<numbins[1]<<"x"<< numbins[2]<<"]"<<endl; #endif FLT* d_kx = d_plan->kx; FLT* d_ky = d_plan->ky; FLT* d_kz = d_plan->kz; #ifdef DEBUG FLT *h_kx, *h_ky, *h_kz; h_kx = (FLT*)malloc(M*sizeof(FLT)); h_ky = (FLT*)malloc(M*sizeof(FLT)); h_kz = (FLT*)malloc(M*sizeof(FLT)); checkCudaErrors(cudaMemcpy(h_kx,d_kx,M*sizeof(FLT),cudaMemcpyDeviceToHost)); checkCudaErrors(cudaMemcpy(h_ky,d_ky,M*sizeof(FLT),cudaMemcpyDeviceToHost)); checkCudaErrors(cudaMemcpy(h_kz,d_kz,M*sizeof(FLT),cudaMemcpyDeviceToHost)); for(int i=0; i<M; i++){ cout<<"[debug ] "; cout <<"("<<setw(3)<<h_kx[i]<<","<<setw(3)<<h_ky[i]<<","<<h_kz[i]<<")" <<endl; } #endif int *d_binsize = d_plan->binsize; int *d_sortidx = d_plan->sortidx; int *d_binstartpts = d_plan->binstartpts; int *d_numsubprob = d_plan->numsubprob; void*d_temp_storage = NULL; int *d_idxnupts = NULL; int *d_subprobstartpts = d_plan->subprobstartpts; int *d_subprob_to_bin = NULL; cudaEventRecord(start); checkCudaErrors(cudaMemset(d_binsize,0,numbins[0]*numbins[1]*numbins[2]* sizeof(int))); LocateNUptstoBins_ghost<<<(M+1024-1)/1024, 1024>>>(M,bin_size_x, bin_size_y,bin_size_z,numobins[0],numobins[1],numobins[2],binsperobinx, binsperobiny, binsperobinz,d_binsize,d_kx, d_ky,d_kz,d_sortidx,pirange,nf1,nf2,nf3); #ifdef SPREADTIME float milliseconds = 0; cudaEventRecord(stop); cudaEventSynchronize(stop); cudaEventElapsedTime(&milliseconds, start, stop); printf("[time ] \tKernel LocateNUptstoBins_ghost \t\t%.3g ms\n", milliseconds); #endif #ifdef DEBUG int *h_binsize;// For debug h_binsize = (int*)malloc(numbins[0]*numbins[1]*numbins[2]*sizeof(int)); checkCudaErrors(cudaMemcpy(h_binsize,d_binsize,numbins[0]*numbins[1]* numbins[2]*sizeof(int),cudaMemcpyDeviceToHost)); cout<<"[debug ] bin size:"<<endl; for(int k=0; k<numbins[2]; k++){ cout<<"[debug ]"<<endl; for(int j=0; j<numbins[1]; j++){ if(j%binsperobinx == 0 && j!=0) cout<<"[debug ] -----------------"<<endl; cout<<"[debug ] "; for(int i=0; i<numbins[0]; i++){ if(i%binsperobinx == 0 && i!=0) cout<<"|"; if(i!=0) cout<<" "; int binidx = CalcGlobalIdx(i,j,k,numobins[0],numobins[1], numobins[2],binsperobinx,binsperobiny,binsperobinz); cout<<h_binsize[binidx]; } cout<<endl; } } cout<<"[debug ] ---------------------------------------------------"<<endl; #endif #ifdef DEBUG int *h_sortidx; h_sortidx = (int*)malloc(M*sizeof(int)); checkCudaErrors(cudaMemcpy(h_sortidx,d_sortidx,M*sizeof(int), cudaMemcpyDeviceToHost)); for(int i=0; i<M; i++){ cout <<"[debug ] point["<<setw(3)<<i<<"]="<<setw(3)<<h_sortidx[i]<<endl; } #endif cudaEventRecord(start); threadsPerBlock.x=8; threadsPerBlock.y=8; threadsPerBlock.z=8; blocks.x = (threadsPerBlock.x+numbins[0]-1)/threadsPerBlock.x; blocks.y = (threadsPerBlock.y+numbins[1]-1)/threadsPerBlock.y; blocks.z = (threadsPerBlock.z+numbins[2]-1)/threadsPerBlock.z; FillGhostBins<<<blocks, threadsPerBlock>>>(binsperobinx, binsperobiny, binsperobinz, numobins[0], numobins[1], numobins[2], d_binsize); #ifdef SPREADTIME cudaEventRecord(stop); cudaEventSynchronize(stop); cudaEventElapsedTime(&milliseconds, start, stop); printf("[time ] \tKernel FillGhostBins \t\t\t%.3g ms\n", milliseconds); #endif #ifdef DEBUG checkCudaErrors(cudaMemcpy(h_binsize,d_binsize,numbins[0]*numbins[1]* numbins[2]*sizeof(int),cudaMemcpyDeviceToHost)); cout<<"[debug ] Filled ghost bins:"<<endl; for(int k=0; k<numbins[2]; k++){ cout<<"[debug ] "<<endl; cout<<"[debug ] "<<endl; for(int j=0; j<numbins[1]; j++){ if(j%binsperobinx == 0 && j!=0) cout<<"[debug ] -----------------"<<endl; cout<<"[debug ] "; for(int i=0; i<numbins[0]; i++){ if(i%binsperobinx == 0 && i!=0) cout<<"|"; int binidx = CalcGlobalIdx(i,j,k,numobins[0],numobins[1], numobins[2],binsperobinx,binsperobiny,binsperobinz); if(i!=0) cout<<" "; cout<<h_binsize[binidx]; } cout<<endl; } } cout<<"[debug ] ---------------------------------------------------"<<endl; #endif cudaEventRecord(start); int n=numbins[0]*numbins[1]*numbins[2]; thrust::device_ptr<int> d_ptr(d_binsize); thrust::device_ptr<int> d_result(d_binstartpts+1); thrust::inclusive_scan(d_ptr, d_ptr + n, d_result); checkCudaErrors(cudaMemset(d_binstartpts,0,sizeof(int))); #ifdef SPREADTIME cudaEventRecord(stop); cudaEventSynchronize(stop); cudaEventElapsedTime(&milliseconds, start, stop); printf("[time ] \tKernel BinStartPts_3d \t\t\t%.3g ms\n", milliseconds); #endif #ifdef DEBUG int *h_binstartpts; h_binstartpts = (int*)malloc((numbins[0]*numbins[1]*numbins[2])*sizeof(int)); checkCudaErrors(cudaMemcpy(h_binstartpts,d_binstartpts,(numbins[0]* numbins[1]*numbins[2])*sizeof(int),cudaMemcpyDeviceToHost)); cout<<"[debug ] Result of scan bin_size array:"<<endl; for(int k=0; k<numbins[2]; k++){ cout<<"[debug ] "<<endl; for(int j=0; j<numbins[1]; j++){ cout<<"[debug ] "; for(int i=0; i<numbins[0]; i++){ if(i!=0) cout<<" "; int binidx = CalcGlobalIdx(i,j,k,numobins[0],numobins[1], numobins[2],binsperobinx,binsperobiny,binsperobinz); cout<<h_binstartpts[binidx]; } cout<<endl; } } cout<<"[debug ] ----------------------------------------------------"<<endl; #endif cudaEventRecord(start); int totalNUpts; checkCudaErrors(cudaMemcpy(&totalNUpts,&d_binstartpts[n], sizeof(int),cudaMemcpyDeviceToHost)); checkCudaErrors(cudaMalloc(&d_idxnupts,totalNUpts*sizeof(int))); #ifdef DEBUG checkCudaErrors(cudaMemset(d_idxnupts,-1,totalNUpts*sizeof(int))); #endif cudaEventRecord(start); CalcInvertofGlobalSortIdx_ghost<<<(M+1024-1)/1024,1024>>>(M,bin_size_x, bin_size_y,bin_size_z,numobins[0],numobins[1],numobins[2],binsperobinx, binsperobiny,binsperobinz,d_binstartpts,d_sortidx,d_kx,d_ky,d_kz, d_idxnupts,pirange,nf1,nf2,nf3); #ifdef SPREADTIME cudaEventRecord(stop); cudaEventSynchronize(stop); cudaEventElapsedTime(&milliseconds, start, stop); printf("[time ] \tKernel CalcInvertofGlobalIdx_ghost \t%.3g ms\n", milliseconds); #endif #ifdef DEBUG int *h_idxnupts; h_idxnupts = (int*)malloc(totalNUpts*sizeof(int)); checkCudaErrors(cudaMemcpy(h_idxnupts,d_idxnupts,totalNUpts*sizeof(int), cudaMemcpyDeviceToHost)); int pts = 0; for(int b=0; b<numbins[0]*numbins[1]*numbins[1]; b++){ if(h_binsize[b] > 0) cout <<"[debug ] Bin "<<b<<endl; for (int i=h_binstartpts[b]; i<h_binstartpts[b]+h_binsize[b]; i++){ cout <<"[debug ] NUpts-index= "<< h_idxnupts[i]<<endl; pts++; } } cout<<"[debug ] totalpts = "<<pts<<endl; #endif cudaEventRecord(start); threadsPerBlock.x=2; threadsPerBlock.y=2; threadsPerBlock.z=2; blocks.x = (threadsPerBlock.x+numbins[0]-1)/threadsPerBlock.x; blocks.y = (threadsPerBlock.y+numbins[1]-1)/threadsPerBlock.y; blocks.z = (threadsPerBlock.z+numbins[2]-1)/threadsPerBlock.z; GhostBinPtsIdx<<<blocks, threadsPerBlock>>>(binsperobinx, binsperobiny, binsperobinz, numobins[0], numobins[1], numobins[2], d_binsize, d_idxnupts, d_binstartpts, M); if (d_plan->idxnupts != NULL) cudaFree(d_plan->idxnupts); d_plan->idxnupts = d_idxnupts; #ifdef SPREADTIME cudaEventRecord(stop); cudaEventSynchronize(stop); cudaEventElapsedTime(&milliseconds, start, stop); printf("[time ] \tKernel GhostBinPtsIdx \t\t\t%.3g ms\n", milliseconds); #endif #ifdef DEBUG checkCudaErrors(cudaMemcpy(h_idxnupts,d_idxnupts,totalNUpts*sizeof(int), cudaMemcpyDeviceToHost)); pts = 0; for(int b=0; b<numbins[0]*numbins[1]*numbins[1]; b++){ if(h_binsize[b] > 0) cout <<"[debug ] Bin "<<b<<endl; for (int i=h_binstartpts[b]; i<h_binstartpts[b]+h_binsize[b]; i++){ cout <<"[debug ] NUpts-index= "<< h_idxnupts[i]<<endl; pts++; } } cout<<"[debug ] totalpts = "<<pts<<endl; free(h_idxnupts); free(h_binstartpts); free(h_binsize); #endif /* --------------------------------------------- */ // Determining Subproblem properties // /* --------------------------------------------- */ cudaEventRecord(start); n = numobins[0]*numobins[1]*numobins[2]; cudaEventRecord(start); CalcSubProb_3d_v1<<<(n+1024-1)/1024, 1024>>>(binsperobinx, binsperobiny, binsperobinz, d_binsize, d_numsubprob, maxsubprobsize, numobins[0]* numobins[1]*numobins[2]); #ifdef SPREADTIME cudaEventRecord(stop); cudaEventSynchronize(stop); cudaEventElapsedTime(&milliseconds, start, stop); printf("[time ] \tKernel CalcSubProb_3d_v1\t\t%.3g ms\n", milliseconds); #endif #ifdef DEBUG int* h_numsubprob; h_numsubprob = (int*) malloc(n*sizeof(int)); checkCudaErrors(cudaMemcpy(h_numsubprob,d_numsubprob,numobins[0]*numobins[1]* numobins[2]*sizeof(int),cudaMemcpyDeviceToHost)); for(int k=0; k<numobins[2]; k++){ cout<<"[debug ] "<<endl; for(int j=0; j<numobins[1]; j++){ cout<<"[debug ] "; for(int i=0; i<numobins[0]; i++){ if(i!=0) cout<<" "; cout <<"s["<<setw(1)<<i<<","<<setw(1)<<j<<","<<setw(1)<<k <<"]= "<<setw(3)<<h_numsubprob[i+j*numobins[0]+k* numobins[1]*numobins[2]]; } cout<<endl; } } free(h_numsubprob); #endif cudaEventRecord(start); n = numobins[0]*numobins[1]*numobins[2]; d_ptr = thrust::device_pointer_cast(d_numsubprob); d_result = thrust::device_pointer_cast(d_subprobstartpts+1); thrust::inclusive_scan(d_ptr, d_ptr + n, d_result); checkCudaErrors(cudaMemset(d_subprobstartpts,0,sizeof(int))); #ifdef SPREADTIME cudaEventRecord(stop); cudaEventSynchronize(stop); cudaEventElapsedTime(&milliseconds, start, stop); printf("[time ] \tScan numsubprob\t\t\t%.3g ms\n", milliseconds); #endif #ifdef DEBUG printf("[debug ] Subproblem start points\n"); int* h_subprobstartpts; h_subprobstartpts = (int*) malloc((n+1)*sizeof(int)); checkCudaErrors(cudaMemcpy(h_subprobstartpts,d_subprobstartpts,(numobins[0]* numobins[1]*numobins[2]+1)*sizeof(int),cudaMemcpyDeviceToHost)); for(int k=0; k<numobins[2]; k++){ if(k!=0) cout<<"[debug ] "<<endl; for(int j=0; j<numobins[1]; j++){ cout<<"[debug ] "; for(int i=0; i<numobins[0]; i++){ if(i!=0) cout<<" "; cout <<"s["<<setw(1)<<i<<","<<setw(1)<<j<<","<<setw(1)<<k <<"]= "<<setw(3)<<h_subprobstartpts[i+j*numobins[0]+k* numobins[1]*numobins[2]]; } cout<<endl; } } printf("[debug ] Total number of subproblems (%d) = %d\n", n, h_subprobstartpts[n]); free(h_subprobstartpts); cout<<"[debug ] ---------------------------------------------------"<<endl; #endif cudaEventRecord(start); int totalnumsubprob; checkCudaErrors(cudaMemcpy(&totalnumsubprob,&d_subprobstartpts[n], sizeof(int),cudaMemcpyDeviceToHost)); checkCudaErrors(cudaMalloc(&d_subprob_to_bin,totalnumsubprob*sizeof(int))); MapBintoSubProb_3d_v1<<<(n+1024-1)/1024, 1024>>>(d_subprob_to_bin, d_subprobstartpts,d_numsubprob,n); assert(d_subprob_to_bin != NULL); if (d_plan->subprob_to_bin != NULL) cudaFree(d_plan->subprob_to_bin); d_plan->subprob_to_bin = d_subprob_to_bin; d_plan->totalnumsubprob = totalnumsubprob; #ifdef SPREADTIME cudaEventRecord(stop); cudaEventSynchronize(stop); cudaEventElapsedTime(&milliseconds, start, stop); printf("[time ] \tKernel Subproblem to Bin map\t\t%.3g ms\n", milliseconds); #endif #ifdef DEBUG printf("[debug ] Map Subproblem to Bins\n"); int* h_subprob_to_bin; h_subprob_to_bin = (int*) malloc((totalnumsubprob)*sizeof(int)); checkCudaErrors(cudaMemcpy(h_subprob_to_bin,d_subprob_to_bin, (totalnumsubprob)*sizeof(int),cudaMemcpyDeviceToHost)); for(int j=0; j<totalnumsubprob; j++){ cout<<"[debug ] "; cout <<"s["<<j<<"] = "<<setw(2)<<"b["<<h_subprob_to_bin[j]<<"]"; cout<<endl; } free(h_subprob_to_bin); #endif cudaFree(d_temp_storage); return 0; } int CUSPREAD3D_BLOCKGATHER(int nf1, int nf2, int nf3, int M, CUFINUFFT_PLAN d_plan, int blksize) { cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); int ns=d_plan->spopts.nspread; FLT es_c=d_plan->spopts.ES_c; FLT es_beta=d_plan->spopts.ES_beta; FLT sigma=d_plan->spopts.upsampfac; int pirange=d_plan->spopts.pirange; int maxsubprobsize=d_plan->opts.gpu_maxsubprobsize; int obin_size_x=d_plan->opts.gpu_obinsizex; int obin_size_y=d_plan->opts.gpu_obinsizey; int obin_size_z=d_plan->opts.gpu_obinsizez; int bin_size_x=d_plan->opts.gpu_binsizex; int bin_size_y=d_plan->opts.gpu_binsizey; int bin_size_z=d_plan->opts.gpu_binsizez; int numobins[3]; numobins[0] = ceil((FLT) nf1/obin_size_x); numobins[1] = ceil((FLT) nf2/obin_size_y); numobins[2] = ceil((FLT) nf3/obin_size_z); int binsperobinx, binsperobiny, binsperobinz; binsperobinx = obin_size_x/bin_size_x+2; binsperobiny = obin_size_y/bin_size_y+2; binsperobinz = obin_size_z/bin_size_z+2; #ifdef INFO cout<<"[info ] Dividing the uniform grids to bin size[" <<obin_size_x<<"x"<<obin_size_y<<"x"<<obin_size_z<<"]"<<endl; cout<<"[info ] numbins = ["<<numobins[0]<<"x"<<numobins[1]<<"x"<< numobins[2]<<"]"<<endl; cout<<"[info ] ns = "<< ns<<endl; #endif FLT* d_kx = d_plan->kx; FLT* d_ky = d_plan->ky; FLT* d_kz = d_plan->kz; CUCPX* d_c = d_plan->c; CUCPX* d_fw = d_plan->fw; int *d_binstartpts = d_plan->binstartpts; int *d_subprobstartpts = d_plan->subprobstartpts; int *d_idxnupts = d_plan->idxnupts; int totalnumsubprob=d_plan->totalnumsubprob; int *d_subprob_to_bin = d_plan->subprob_to_bin; cudaEventRecord(start); for(int t=0; t<blksize; t++){ if(d_plan->opts.gpu_kerevalmeth == 1){ size_t sharedplanorysize = obin_size_x*obin_size_y*obin_size_z *sizeof(CUCPX); if(sharedplanorysize > 49152){ cout<<"error: not enough shared memory"<<endl; return 1; } Spread_3d_BlockGather_Horner<<<totalnumsubprob, 64, sharedplanorysize >>>(d_kx, d_ky, d_kz, d_c+t*M, d_fw+t*nf1*nf2*nf3, M, ns, nf1, nf2, nf3, es_c, es_beta, sigma, d_binstartpts, obin_size_x, obin_size_y, obin_size_z, binsperobinx*binsperobiny*binsperobinz,d_subprob_to_bin, d_subprobstartpts, maxsubprobsize, numobins[0], numobins[1], numobins[2], d_idxnupts,pirange); }else{ size_t sharedplanorysize = obin_size_x*obin_size_y*obin_size_z *sizeof(CUCPX); if(sharedplanorysize > 49152){ cout<<"error: not enough shared memory"<<endl; return 1; } Spread_3d_BlockGather<<<totalnumsubprob, 64, sharedplanorysize>>>( d_kx, d_ky, d_kz, d_c+t*M, d_fw+t*nf1*nf2*nf3, M, ns, nf1, nf2, nf3, es_c, es_beta, sigma, d_binstartpts, obin_size_x, obin_size_y, obin_size_z, binsperobinx*binsperobiny*binsperobinz,d_subprob_to_bin, d_subprobstartpts, maxsubprobsize, numobins[0], numobins[1], numobins[2], d_idxnupts,pirange); } } #ifdef SPREADTIME float milliseconds = 0; cudaEventRecord(stop); cudaEventSynchronize(stop); cudaEventElapsedTime(&milliseconds, start, stop); printf("[time ] \tKernel Spread_3d_BlockGather (%d)\t%.3g ms\n", milliseconds, d_plan->opts.gpu_kerevalmeth); #endif return 0; } int CUSPREAD3D_SUBPROB_PROP(int nf1, int nf2, int nf3, int M, CUFINUFFT_PLAN d_plan) { cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); int maxsubprobsize=d_plan->opts.gpu_maxsubprobsize; int bin_size_x=d_plan->opts.gpu_binsizex; int bin_size_y=d_plan->opts.gpu_binsizey; int bin_size_z=d_plan->opts.gpu_binsizez; if(bin_size_x < 0 || bin_size_y < 0 || bin_size_z < 0){ cout<<"error: invalid binsize (binsizex, binsizey, binsizez) = ("; cout<<bin_size_x<<","<<bin_size_y<<","<<bin_size_z<<")"<<endl; return 1; } int numbins[3]; numbins[0] = ceil((FLT) nf1/bin_size_x); numbins[1] = ceil((FLT) nf2/bin_size_y); numbins[2] = ceil((FLT) nf3/bin_size_z); #ifdef DEBUG cout<<"[debug ] Dividing the uniform grids to bin size[" <<d_plan->opts.gpu_binsizex<<"x"<<d_plan->opts.gpu_binsizey<<"x"<<d_plan->opts.gpu_binsizez<<"]"<<endl; cout<<"[debug ] numbins = ["<<numbins[0]<<"x"<<numbins[1]<<"x"<<numbins[2] <<"]"<<endl; #endif FLT* d_kx = d_plan->kx; FLT* d_ky = d_plan->ky; FLT* d_kz = d_plan->kz; #ifdef DEBUG FLT *h_kx; FLT *h_ky; FLT *h_kz; h_kx = (FLT*)malloc(M*sizeof(FLT)); h_ky = (FLT*)malloc(M*sizeof(FLT)); h_kz = (FLT*)malloc(M*sizeof(FLT)); checkCudaErrors(cudaMemcpy(h_kx,d_kx,M*sizeof(FLT),cudaMemcpyDeviceToHost)); checkCudaErrors(cudaMemcpy(h_ky,d_ky,M*sizeof(FLT),cudaMemcpyDeviceToHost)); checkCudaErrors(cudaMemcpy(h_kz,d_kz,M*sizeof(FLT),cudaMemcpyDeviceToHost)); for(int i=M-10; i<M; i++){ cout<<"[debug ] "; cout <<"("<<setw(3)<<h_kx[i]<<","<<setw(3)<<h_ky[i]<<","<<setw(3)<<h_kz[i] <<")"<<endl; } #endif int *d_binsize = d_plan->binsize; int *d_binstartpts = d_plan->binstartpts; int *d_sortidx = d_plan->sortidx; int *d_numsubprob = d_plan->numsubprob; int *d_subprobstartpts = d_plan->subprobstartpts; int *d_idxnupts = d_plan->idxnupts; int *d_subprob_to_bin = NULL; void *d_temp_storage = NULL; int pirange = d_plan->spopts.pirange; cudaEventRecord(start); checkCudaErrors(cudaMemset(d_binsize,0,numbins[0]*numbins[1]*numbins[2]* sizeof(int))); CalcBinSize_noghost_3d<<<(M+1024-1)/1024, 1024>>>(M,nf1,nf2,nf3,bin_size_x, bin_size_y,bin_size_z,numbins[0],numbins[1],numbins[2],d_binsize,d_kx, d_ky,d_kz,d_sortidx,pirange); #ifdef SPREADTIME float milliseconds = 0; cudaEventRecord(stop); cudaEventSynchronize(stop); cudaEventElapsedTime(&milliseconds, start, stop); printf("[time ] \tKernel CalcBinSize_noghost_3d \t\t%.3g ms\n", milliseconds); #endif #ifdef DEBUG int *h_binsize;// For debug h_binsize = (int*)malloc(numbins[0]*numbins[1]*numbins[2]*sizeof(int)); checkCudaErrors(cudaMemcpy(h_binsize,d_binsize,numbins[0]*numbins[1]* numbins[2]*sizeof(int),cudaMemcpyDeviceToHost)); cout<<"[debug ] bin size:"<<endl; for(int k=0; k<numbins[2]; k++){ for(int j=0; j<numbins[1]; j++){ cout<<"[debug ] "; for(int i=0; i<numbins[0]; i++){ if(i!=0) cout<<" "; cout <<h_binsize[i+j*numbins[0]+k*numbins[0]*numbins[1]]; } cout<<endl; } } free(h_binsize); cout<<"[debug ] ----------------------------------------------------"<<endl; #endif #ifdef DEBUG int *h_sortidx; h_sortidx = (int*)malloc(M*sizeof(int)); checkCudaErrors(cudaMemcpy(h_sortidx,d_sortidx,M*sizeof(int), cudaMemcpyDeviceToHost)); for(int i=0; i<10; i++){ cout<<"[debug ] "; cout <<"point["<<setw(3)<<i<<"]="<<setw(3)<<h_sortidx[i]<<endl; } #endif cudaEventRecord(start); int n=numbins[0]*numbins[1]*numbins[2]; thrust::device_ptr<int> d_ptr(d_binsize); thrust::device_ptr<int> d_result(d_binstartpts); thrust::exclusive_scan(d_ptr, d_ptr + n, d_result); #ifdef SPREADTIME cudaEventRecord(stop); cudaEventSynchronize(stop); cudaEventElapsedTime(&milliseconds, start, stop); printf("[time ] \tKernel BinStartPts_3d \t\t\t%.3g ms\n", milliseconds); #endif #ifdef DEBUG int *h_binstartpts; h_binstartpts = (int*)malloc((numbins[0]*numbins[1]*numbins[2])*sizeof(int)); checkCudaErrors(cudaMemcpy(h_binstartpts,d_binstartpts,(numbins[0]* numbins[1]*numbins[2])*sizeof(int),cudaMemcpyDeviceToHost)); cout<<"[debug ] Result of scan bin_size array:"<<endl; for(int k=0; k<numbins[2]; k++){ for(int j=0; j<numbins[1]; j++){ cout<<"[debug ] "; for(int i=0; i<numbins[0]; i++){ if(i!=0) cout<<" "; cout <<h_binstartpts[i+j*numbins[0]+k*numbins[0]*numbins[1]]; } cout<<endl; } } free(h_binstartpts); cout<<"[debug ] ---------------------------------------------------"<<endl; #endif cudaEventRecord(start); CalcInvertofGlobalSortIdx_3d<<<(M+1024-1)/1024,1024>>>(M,bin_size_x, bin_size_y,bin_size_z,numbins[0],numbins[1],numbins[2], d_binstartpts, d_sortidx,d_kx,d_ky,d_kz,d_idxnupts,pirange,nf1,nf2,nf3); #ifdef DEBUG int *h_idxnupts; h_idxnupts = (int*)malloc(M*sizeof(int)); checkCudaErrors(cudaMemcpy(h_idxnupts,d_idxnupts,M*sizeof(int), cudaMemcpyDeviceToHost)); for (int i=0; i<4; i++){ cout <<"[debug ] idx="<< h_idxnupts[i]<<endl; } free(h_idxnupts); #endif /* --------------------------------------------- */ // Determining Subproblem properties // /* --------------------------------------------- */ cudaEventRecord(start); CalcSubProb_3d_v2<<<(M+1024-1)/1024, 1024>>>(d_binsize,d_numsubprob, maxsubprobsize,numbins[0]*numbins[1]*numbins[2]); #ifdef SPREADTIME cudaEventRecord(stop); cudaEventSynchronize(stop); cudaEventElapsedTime(&milliseconds, start, stop); printf("[time ] \tKernel CalcSubProb_3d_v2\t\t%.3g ms\n", milliseconds); #endif #ifdef DEBUG int* h_numsubprob; h_numsubprob = (int*) malloc(n*sizeof(int)); checkCudaErrors(cudaMemcpy(h_numsubprob,d_numsubprob,numbins[0]*numbins[1]* numbins[2]*sizeof(int),cudaMemcpyDeviceToHost)); for(int k=0; k<numbins[2]; k++){ for(int j=0; j<numbins[1]; j++){ cout<<"[debug ] "; for(int i=0; i<numbins[0]; i++){ if(i!=0) cout<<" "; cout <<h_numsubprob[i+j*numbins[0]+k*numbins[0]*numbins[1]]; } cout<<endl; } } free(h_numsubprob); #endif d_ptr = thrust::device_pointer_cast(d_numsubprob); d_result = thrust::device_pointer_cast(d_subprobstartpts+1); thrust::inclusive_scan(d_ptr, d_ptr + n, d_result); checkCudaErrors(cudaMemset(d_subprobstartpts,0,sizeof(int))); #ifdef SPREADTIME cudaEventRecord(stop); cudaEventSynchronize(stop); cudaEventElapsedTime(&milliseconds, start, stop); printf("[time ] \tKernel Scan Subprob array\t\t%.3g ms\n", milliseconds); #endif #ifdef DEBUG printf("[debug ] Subproblem start points\n"); int* h_subprobstartpts; h_subprobstartpts = (int*) malloc((n+1)*sizeof(int)); checkCudaErrors(cudaMemcpy(h_subprobstartpts,d_subprobstartpts, (n+1)*sizeof(int),cudaMemcpyDeviceToHost)); for(int k=0; k<numbins[2]; k++){ for(int j=0; j<numbins[1]; j++){ cout<<"[debug ] "; for(int i=0; i<numbins[0]; i++){ if(i!=0) cout<<" "; cout <<h_subprobstartpts[i+j*numbins[0]+k*numbins[0]*numbins[1]]; } cout<<endl; } } printf("[debug ] Total number of subproblems = %d\n", h_subprobstartpts[n]); free(h_subprobstartpts); #endif int totalnumsubprob; checkCudaErrors(cudaMemcpy(&totalnumsubprob,&d_subprobstartpts[n], sizeof(int),cudaMemcpyDeviceToHost)); checkCudaErrors(cudaMalloc(&d_subprob_to_bin,totalnumsubprob*sizeof(int))); MapBintoSubProb_3d_v2<<<(numbins[0]*numbins[1]+1024-1)/1024, 1024>>>( d_subprob_to_bin,d_subprobstartpts,d_numsubprob,numbins[0]*numbins[1]* numbins[2]); assert(d_subprob_to_bin != NULL); if (d_plan->subprob_to_bin != NULL) cudaFree(d_plan->subprob_to_bin); d_plan->subprob_to_bin = d_subprob_to_bin; assert(d_plan->subprob_to_bin != NULL); d_plan->totalnumsubprob = totalnumsubprob; #ifdef DEBUG printf("[debug ] Map Subproblem to Bins\n"); int* h_subprob_to_bin; h_subprob_to_bin = (int*) malloc((totalnumsubprob)*sizeof(int)); checkCudaErrors(cudaMemcpy(h_subprob_to_bin,d_subprob_to_bin, (totalnumsubprob)*sizeof(int),cudaMemcpyDeviceToHost)); cout << totalnumsubprob << endl; for(int j=0; j<min(totalnumsubprob,10); j++){ cout<<"[debug ] "; cout <<"nsub["<<j<<"] = "<<setw(2)<<h_subprob_to_bin[j]; cout<<endl; } free(h_subprob_to_bin); #endif #ifdef SPREADTIME cudaEventRecord(stop); cudaEventSynchronize(stop); cudaEventElapsedTime(&milliseconds, start, stop); printf("[time ] \tKernel Subproblem to Bin map\t\t%.3g ms\n", milliseconds); #endif cudaFree(d_temp_storage); return 0; } int CUSPREAD3D_SUBPROB(int nf1, int nf2, int nf3, int M, CUFINUFFT_PLAN d_plan, int blksize) { cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); int ns=d_plan->spopts.nspread; // psi's support in terms of number of cells int maxsubprobsize=d_plan->opts.gpu_maxsubprobsize; // assume that bin_size_x > ns/2; int bin_size_x=d_plan->opts.gpu_binsizex; int bin_size_y=d_plan->opts.gpu_binsizey; int bin_size_z=d_plan->opts.gpu_binsizez; int numbins[3]; numbins[0] = ceil((FLT) nf1/bin_size_x); numbins[1] = ceil((FLT) nf2/bin_size_y); numbins[2] = ceil((FLT) nf3/bin_size_z); #ifdef INFO cout<<"[info ] Dividing the uniform grids to bin size[" <<d_plan->opts.gpu_binsizex<<"x"<<d_plan->opts.gpu_binsizey<<"x"<<d_plan->opts.gpu_binsizez<<"]"<<endl; cout<<"[info ] numbins = ["<<numbins[0]<<"x"<<numbins[1]<<"]"<<endl; cout<<ns<<endl; #endif FLT* d_kx = d_plan->kx; FLT* d_ky = d_plan->ky; FLT* d_kz = d_plan->kz; CUCPX* d_c = d_plan->c; CUCPX* d_fw = d_plan->fw; int *d_binsize = d_plan->binsize; int *d_binstartpts = d_plan->binstartpts; int *d_numsubprob = d_plan->numsubprob; int *d_subprobstartpts = d_plan->subprobstartpts; int *d_idxnupts = d_plan->idxnupts; int totalnumsubprob=d_plan->totalnumsubprob; int *d_subprob_to_bin = d_plan->subprob_to_bin; FLT sigma=d_plan->spopts.upsampfac; FLT es_c=d_plan->spopts.ES_c; FLT es_beta=d_plan->spopts.ES_beta; int pirange=d_plan->spopts.pirange; cudaEventRecord(start); size_t sharedplanorysize = (bin_size_x+2*ceil(ns/2.0))*(bin_size_y+2* ceil(ns/2.0))*(bin_size_z+2*ceil(ns/2.0))*sizeof(CUCPX); if(sharedplanorysize > 49152){ cout<<"error: not enough shared memory ("<<sharedplanorysize<<")"<<endl; return 1; } for(int t=0; t<blksize; t++){ if(d_plan->opts.gpu_kerevalmeth){ Spread_3d_Subprob_Horner<<<totalnumsubprob, 256, sharedplanorysize>>>(d_kx, d_ky, d_kz, d_c+t*M, d_fw+t*nf1*nf2*nf3, M, ns, nf1, nf2, nf3, sigma, d_binstartpts, d_binsize, bin_size_x, bin_size_y, bin_size_z, d_subprob_to_bin, d_subprobstartpts, d_numsubprob, maxsubprobsize,numbins[0], numbins[1], numbins[2], d_idxnupts,pirange); }else{ Spread_3d_Subprob<<<totalnumsubprob, 256, sharedplanorysize>>>(d_kx, d_ky, d_kz, d_c+t*M, d_fw+t*nf1*nf2*nf3, M, ns, nf1, nf2, nf3, es_c, es_beta, d_binstartpts, d_binsize, bin_size_x,bin_size_y, bin_size_z, d_subprob_to_bin, d_subprobstartpts,d_numsubprob, maxsubprobsize,numbins[0], numbins[1], numbins[2],d_idxnupts,pirange); } } #ifdef SPREADTIME float milliseconds = 0; cudaEventRecord(stop); cudaEventSynchronize(stop); cudaEventElapsedTime(&milliseconds, start, stop); printf("[time ] \tKernel Spread_3d_Subprob (%d) \t%.3g ms\n", milliseconds, d_plan->opts.gpu_kerevalmeth); #endif return 0; }
the_stack
extern "C" { #endif #include <stdio.h> #include <math.h> #include <float.h> #include "gaterecurrent2dnoind_kernel.h" #define CUDA_1D_KERNEL_LOOP(i, n) \ for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < n; \ i += blockDim.x * gridDim.x) __device__ void get_gate_idx_sf(int h1, int w1, int h2, int w2, int * out, int horizontal, int reverse) { if(horizontal && ! reverse) // left -> right { if(w1>w2) { out[0]=h1; out[1]=w1; } else { out[0]=h2; out[1]=w2; } } if(horizontal && reverse) // right -> left { if(w1<w2) { out[0]=h1; out[1]=w1; } else { out[0]=h2; out[1]=w2; } } if(!horizontal && !reverse) // top -> bottom { if(h1>h2) { out[0]=h1; out[1]=w1; } else { out[0]=h2; out[1]=w2; } } if(!horizontal && reverse) // bottom -> top { if(h1<h2) { out[0]=h1; out[1]=w1; } else { out[0]=h2; out[1]=w2; } } } __device__ float get_data_sf(float * data, int num, int channels,int height, int width,int n,int c,int h,int w) { if(h<0 || h >=height) return 0; if(w<0 || w >= width) return 0; return data[n*channels*height*width + c * height*width + h * width + w]; } __device__ void set_data_sf(float * data, int num, int channels,int height, int width,int n,int c,int h,int w, float v) { if(h<0 || h >=height) return ; if(w<0 || w >= width) return ; data[n*channels*height*width + c * height*width + h * width + w]=v; } __device__ float get_gate_sf(float * data, int num, int channels,int height, int width,int n,int c,int h1,int w1,int h2,int w2,int horizontal,int reverse) { if(h1<0 || h1 >=height) return 0; if(w1<0 || w1 >= width) return 0; if(h2<0 || h2 >=height) return 0; if(w2<0 || w2 >= width) return 0; int idx[2]; get_gate_idx_sf(h1,w1,h2,w2, idx,horizontal, reverse); int h = idx[0]; int w = idx[1]; return data[n*channels*height*width + c * height*width + h * width + w]; } __device__ void set_gate_sf(float * data, int num, int channels,int height, int width,int n,int c,int h1,int w1,int h2,int w2,int horizontal,int reverse, float v) { if(h1<0 || h1 >=height) return ; if(w1<0 || w1 >= width) return ; if(h2<0 || h2 >=height) return ; if(w2<0 || w2 >= width) return ; int idx[2]; get_gate_idx_sf(h1,w1,h2,w2, idx,horizontal, reverse); int h = idx[0]; int w = idx[1]; data[n*channels*height*width + c * height*width + h * width + w]=v; } // we do not use set_gate_add_sf(...) in the caffe implimentation // avoid using atomicAdd __global__ void forward_one_col_left_right( int count, int T, int num,int channels, int height, int width, float* X, float* G1, float* G2, float* G3, float* H, int horizontal, int reverse) { CUDA_1D_KERNEL_LOOP(index, count) { int hc_count = height * channels; int n,c,h,w; int temp=index; w = T; n = temp / hc_count; temp = temp % hc_count; c = temp / height; temp = temp % height; h = temp; float x_data = get_data_sf(X,num,channels,height,width,n,c,h,w); float g_data_1 = get_gate_sf(G1,num,channels,height,width,n,c,h,w,h-1,w-1,horizontal,reverse); float h_minus1_data_1 = get_data_sf(H,num,channels,height,width,n,c,h-1,w-1); float h1_minus1 = g_data_1 * h_minus1_data_1; float g_data_2 = get_gate_sf(G2,num,channels,height,width,n,c,h,w,h,w-1,horizontal,reverse); float h_minus1_data_2 = get_data_sf(H,num,channels,height,width,n,c,h,w-1); float h2_minus1 = g_data_2 * h_minus1_data_2; float g_data_3 = get_gate_sf(G3,num,channels,height,width,n,c,h,w,h+1,w-1,horizontal,reverse); float h_minus1_data_3 = get_data_sf(H,num,channels,height,width,n,c,h+1,w-1); float h3_minus1 = g_data_3 * h_minus1_data_3; float h_hype = h1_minus1 + h2_minus1 + h3_minus1; float x_hype = (1 - g_data_1 - g_data_2 - g_data_3) * x_data; float h_data = x_hype + h_hype; set_data_sf(H,num,channels,height,width,n,c,h,w,h_data); } } __global__ void forward_one_col_right_left( int count, int T, int num,int channels, int height, int width, float* X, float* G1, float* G2, float* G3, float* H,int horizontal,int reverse) { CUDA_1D_KERNEL_LOOP(index, count) { int hc_count = height * channels; int n,c,h,w; int temp=index; w = T; n = temp / hc_count; temp = temp % hc_count; c = temp / height; temp = temp % height; h = temp; float x_data = get_data_sf(X,num,channels,height,width,n,c,h,w); float g_data_1 = get_gate_sf(G1,num,channels,height,width,n,c,h,w,h-1,w+1,horizontal,reverse); float h_minus1_data_1 = get_data_sf(H,num,channels,height,width,n,c,h-1,w+1); float h1_minus1 = g_data_1 * h_minus1_data_1; float g_data_2 = get_gate_sf(G2,num,channels,height,width,n,c,h,w,h,w+1,horizontal,reverse); float h_minus1_data_2 = get_data_sf(H,num,channels,height,width,n,c,h,w+1); float h2_minus1 = g_data_2 * h_minus1_data_2; float g_data_3 = get_gate_sf(G3,num,channels,height,width,n,c,h,w,h+1,w+1,horizontal,reverse); float h_minus1_data_3 = get_data_sf(H,num,channels,height,width,n,c,h+1,w+1); float h3_minus1 = g_data_3 * h_minus1_data_3; float h_hype = h1_minus1 + h2_minus1 + h3_minus1; float x_hype = (1 - g_data_1 - g_data_2 - g_data_3) * x_data; float h_data = x_hype + h_hype; set_data_sf(H,num,channels,height,width,n,c,h,w,h_data); } } __global__ void forward_one_row_top_bottom( int count, int T, int num,int channels, int height, int width, float* X, float* G1, float* G2, float* G3, float* H,int horizontal,int reverse) { CUDA_1D_KERNEL_LOOP(index, count) { int wc_count = width * channels; int n,c,h,w; int temp=index; h = T; n = temp / wc_count; temp = temp % wc_count; c = temp / width; temp = temp % width; w = temp; float x_data = get_data_sf(X,num,channels,height,width,n,c,h,w); float g_data_1 = get_gate_sf(G1,num,channels,height,width,n,c,h,w,h-1,w-1,horizontal,reverse); float h_minus1_data_1 = get_data_sf(H,num,channels,height,width,n,c,h-1,w-1); float h1_minus1 = g_data_1 * h_minus1_data_1; float g_data_2 = get_gate_sf(G2,num,channels,height,width,n,c,h,w,h-1,w,horizontal,reverse); float h_minus1_data_2 = get_data_sf(H,num,channels,height,width,n,c,h-1,w); float h2_minus1 = g_data_2 * h_minus1_data_2; float g_data_3 = get_gate_sf(G3,num,channels,height,width,n,c,h,w,h-1,w+1,horizontal,reverse); float h_minus1_data_3 = get_data_sf(H,num,channels,height,width,n,c,h-1,w+1); float h3_minus1 = g_data_3 * h_minus1_data_3; float h_hype = h1_minus1 + h2_minus1 + h3_minus1; float x_hype = (1 - g_data_1 - g_data_2 - g_data_3) * x_data; float h_data = x_hype + h_hype; set_data_sf(H,num,channels,height,width,n,c,h,w,h_data); } } __global__ void forward_one_row_bottom_top( int count, int T, int num,int channels, int height, int width, float* X, float* G1, float* G2, float* G3, float* H,int horizontal,int reverse) { CUDA_1D_KERNEL_LOOP(index, count) { int wc_count = width * channels; int n,c,h,w; int temp=index; h = T; n = temp / wc_count; temp = temp % wc_count; c = temp / width; temp = temp % width; w = temp; float x_data = get_data_sf(X,num,channels,height,width,n,c,h,w); float g_data_1 = get_gate_sf(G1,num,channels,height,width,n,c,h,w,h+1,w-1,horizontal,reverse); float h_minus1_data_1 = get_data_sf(H,num,channels,height,width,n,c,h+1,w-1); float h1_minus1 = g_data_1 * h_minus1_data_1; float g_data_2 = get_gate_sf(G2,num,channels,height,width,n,c,h,w,h+1,w,horizontal,reverse); float h_minus1_data_2 = get_data_sf(H,num,channels,height,width,n,c,h+1,w); float h2_minus1 = g_data_2 * h_minus1_data_2; float g_data_3 = get_gate_sf(G3,num,channels,height,width,n,c,h,w,h+1,w+1,horizontal,reverse); float h_minus1_data_3 = get_data_sf(H,num,channels,height,width,n,c,h+1,w+1); float h3_minus1 = g_data_3 * h_minus1_data_3; float h_hype = h1_minus1 + h2_minus1 + h3_minus1; float x_hype = (1 - g_data_1 - g_data_2 - g_data_3) * x_data; float h_data = x_hype + h_hype; set_data_sf(H,num,channels,height,width,n,c,h,w,h_data); } } __global__ void backward_one_col_left_right( int count, int T, int num,int channels, int height, int width, float* X, float* G1, float* G2, float* G3, float* H, float * X_diff, float * G1_diff,float* G2_diff,float * G3_diff, float * Hdiff,int horizontal,int reverse) { CUDA_1D_KERNEL_LOOP(index, count) { int hc_count = height * channels; int n,c,h,w; int temp=index; w = T; n = temp / hc_count; temp = temp % hc_count; c = temp / height; temp = temp % height; h = temp; float x_data = get_data_sf(X,num,channels,height,width,n,c,h,w); //h(t)_diff = top(t)_diff float h_diff = get_data_sf(Hdiff,num,channels,height,width,n,c,h,w); //h(t)_diff += h(t+1)_diff * g(t+1) if t<T float add1_h3_diff = get_data_sf(Hdiff,num,channels,height,width,n,c,h-1,w+1); float add1_g3_data = get_gate_sf(G3,num,channels,height,width,n,c,h,w,h-1,w+1,horizontal,reverse); float add1_h2_diff = get_data_sf(Hdiff,num,channels,height,width,n,c,h,w+1); float add1_g2_data = get_gate_sf(G2,num,channels,height,width,n,c,h,w,h,w+1,horizontal,reverse); float add1_h1_diff = get_data_sf(Hdiff,num,channels,height,width,n,c,h+1,w+1); float add1_g1_data = get_gate_sf(G1,num,channels,height,width,n,c,h,w,h+1,w+1,horizontal,reverse); h_diff = h_diff + add1_h3_diff * add1_g3_data + add1_h2_diff * add1_g2_data + add1_h1_diff * add1_g1_data; //Hdiff[n*channels*height*width + c*height*width + h*width + w]=0; set_data_sf(Hdiff,num,channels,height,width,n,c,h,w,h_diff); //x(t)_diff=(1-sum(g_date))*h(t)_diff float g1_data = get_gate_sf(G1,num,channels,height,width,n,c,h,w,h-1,w-1,horizontal,reverse); float g2_data = get_gate_sf(G2,num,channels,height,width,n,c,h,w,h,w-1,horizontal,reverse); float g3_data = get_gate_sf(G3,num,channels,height,width,n,c,h,w,h+1,w-1,horizontal,reverse); float x_diff = (1- g1_data -g2_data -g3_data) * h_diff; set_data_sf(X_diff,num,channels,height,width,n,c,h,w,x_diff); // g_diff = h_diff * (h_data(t-1) - x_data) float h1_minus1_data = get_data_sf(H,num,channels,height,width,n,c,h-1,w-1); float g1_diff = h_diff * (h1_minus1_data - x_data); set_gate_sf(G1_diff,num,channels,height,width,n,c,h,w,h-1,w-1,horizontal,reverse,g1_diff); float h2_minus1_data = get_data_sf(H,num,channels,height,width,n,c,h,w-1); float g2_diff = h_diff * (h2_minus1_data - x_data); set_gate_sf(G2_diff,num,channels,height,width,n,c,h,w,h,w-1,horizontal,reverse,g2_diff); float h3_minus1_data = get_data_sf(H,num,channels,height,width,n,c,h+1,w-1); float g3_diff = h_diff * (h3_minus1_data - x_data); set_gate_sf(G3_diff,num,channels,height,width,n,c,h,w,h+1,w-1,horizontal,reverse,g3_diff); } } __global__ void backward_one_col_right_left( int count, int T, int num,int channels, int height, int width, float* X, float* G1, float* G2, float* G3, float* H, float * X_diff, float * G1_diff,float* G2_diff,float * G3_diff, float * Hdiff,int horizontal,int reverse) { CUDA_1D_KERNEL_LOOP(index, count) { int hc_count = height * channels; int n,c,h,w; int temp=index; w = T; n = temp / hc_count; temp = temp % hc_count; c = temp / height; temp = temp % height; h = temp; float x_data = get_data_sf(X,num,channels,height,width,n,c,h,w); //h(t)_diff = top(t)_diff float h_diff = get_data_sf(Hdiff,num,channels,height,width,n,c,h,w); ///h(t)_diff += h(t+1)_diff * g(t+1) if t<T float add1_h3_diff = get_data_sf(Hdiff,num,channels,height,width,n,c,h-1,w-1); float add1_g3_data = get_gate_sf(G3,num,channels,height,width,n,c,h,w,h-1,w-1,horizontal,reverse); float add1_h2_diff = get_data_sf(Hdiff,num,channels,height,width,n,c,h,w-1); float add1_g2_data = get_gate_sf(G2,num,channels,height,width,n,c,h,w,h,w-1,horizontal,reverse); float add1_h1_diff = get_data_sf(Hdiff,num,channels,height,width,n,c,h+1,w-1); float add1_g1_data = get_gate_sf(G1,num,channels,height,width,n,c,h,w,h+1,w-1,horizontal,reverse); h_diff = h_diff + add1_h3_diff * add1_g3_data + add1_h2_diff * add1_g2_data + add1_h1_diff * add1_g1_data; set_data_sf(Hdiff,num,channels,height,width,n,c,h,w,h_diff); float g1_data = get_gate_sf(G1,num,channels,height,width,n,c,h,w,h-1,w+1,horizontal,reverse); float g2_data = get_gate_sf(G2,num,channels,height,width,n,c,h,w,h,w+1,horizontal,reverse); float g3_data = get_gate_sf(G3,num,channels,height,width,n,c,h,w,h+1,w+1,horizontal,reverse); float x_diff = (1- g1_data -g2_data -g3_data) * h_diff; set_data_sf(X_diff,num,channels,height,width,n,c,h,w,x_diff); // g_diff = h_diff * (h_data(t-1) - x_data) float h1_minus1_data = get_data_sf(H,num,channels,height,width,n,c,h-1,w+1); float g1_diff = h_diff * (h1_minus1_data - x_data); set_gate_sf(G1_diff,num,channels,height,width,n,c,h,w,h-1,w+1,horizontal,reverse,g1_diff); float h2_minus1_data = get_data_sf(H,num,channels,height,width,n,c,h,w+1); float g2_diff = h_diff * (h2_minus1_data - x_data); set_gate_sf(G2_diff,num,channels,height,width,n,c,h,w,h,w+1,horizontal,reverse,g2_diff); float h3_minus1_data = get_data_sf(H,num,channels,height,width,n,c,h+1,w+1); float g3_diff = h_diff * (h3_minus1_data - x_data); set_gate_sf(G3_diff,num,channels,height,width,n,c,h,w,h+1,w+1,horizontal,reverse,g3_diff); } } __global__ void backward_one_row_top_bottom( int count, int T, int num,int channels, int height, int width, float* X, float* G1, float* G2, float* G3, float* H, float * X_diff, float * G1_diff,float* G2_diff,float * G3_diff, float * Hdiff,int horizontal,int reverse) { CUDA_1D_KERNEL_LOOP(index, count) { int wc_count = width * channels; int n,c,h,w; int temp=index; h = T; n = temp / wc_count; temp = temp % wc_count; c = temp / width; temp = temp % width; w = temp; float x_data = get_data_sf(X,num,channels,height,width,n,c,h,w); float h_diff = get_data_sf(Hdiff,num,channels,height,width,n,c,h,w); //h(t)_diff += h(t+1)_diff * g(t+1) if t<T float add1_h3_diff = get_data_sf(Hdiff,num,channels,height,width,n,c,h+1,w-1); float add1_g3_data = get_gate_sf(G3,num,channels,height,width,n,c,h,w,h+1,w-1,horizontal,reverse); float add1_h2_diff = get_data_sf(Hdiff,num,channels,height,width,n,c,h+1,w); float add1_g2_data = get_gate_sf(G2,num,channels,height,width,n,c,h,w,h+1,w,horizontal,reverse); float add1_h1_diff = get_data_sf(Hdiff,num,channels,height,width,n,c,h+1,w+1); float add1_g1_data = get_gate_sf(G1,num,channels,height,width,n,c,h,w,h+1,w+1,horizontal,reverse); h_diff = h_diff + add1_h3_diff * add1_g3_data + add1_h2_diff * add1_g2_data + add1_h1_diff * add1_g1_data; set_data_sf(Hdiff,num,channels,height,width,n,c,h,w,h_diff); //x(t)_diff=(1-g(t))*h(t)_diff float g1_data = get_gate_sf(G1,num,channels,height,width,n,c,h,w,h-1,w-1,horizontal,reverse); float g2_data = get_gate_sf(G2,num,channels,height,width,n,c,h,w,h-1,w,horizontal,reverse); float g3_data = get_gate_sf(G3,num,channels,height,width,n,c,h,w,h-1,w+1,horizontal,reverse); float x_diff = (1- g1_data -g2_data -g3_data) * h_diff; set_data_sf(X_diff,num,channels,height,width,n,c,h,w,x_diff); // g_diff = h_diff * (h_data(t-1) - x_data) float h1_minus1_data = get_data_sf(H,num,channels,height,width,n,c,h-1,w-1); float g1_diff = h_diff * (h1_minus1_data - x_data); set_gate_sf(G1_diff,num,channels,height,width,n,c,h,w,h-1,w-1,horizontal,reverse,g1_diff); float h2_minus1_data = get_data_sf(H,num,channels,height,width,n,c,h-1,w); float g2_diff = h_diff * (h2_minus1_data - x_data); set_gate_sf(G2_diff,num,channels,height,width,n,c,h,w,h-1,w,horizontal,reverse,g2_diff); float h3_minus1_data = get_data_sf(H,num,channels,height,width,n,c,h-1,w+1); float g3_diff = h_diff * (h3_minus1_data - x_data); set_gate_sf(G3_diff,num,channels,height,width,n,c,h,w,h-1,w+1,horizontal,reverse,g3_diff); } } __global__ void backward_one_row_bottom_top( int count, int T, int num,int channels, int height, int width, float* X, float* G1, float* G2, float* G3, float* H, float * X_diff, float * G1_diff,float* G2_diff,float * G3_diff, float * Hdiff,int horizontal,int reverse) { CUDA_1D_KERNEL_LOOP(index, count) { int wc_count = width * channels; int n,c,h,w; int temp=index; h = T; n = temp / wc_count; temp = temp % wc_count; c = temp / width; temp = temp % width; w = temp; float x_data = get_data_sf(X,num,channels,height,width,n,c,h,w); //h(t)_diff = top(t)_diff float h_diff = get_data_sf(Hdiff,num,channels,height,width,n,c,h,w); //h(t)_diff += h(t+1)_diff * g(t+1) if t<T float add1_h3_diff = get_data_sf(Hdiff,num,channels,height,width,n,c,h-1,w-1); float add1_g3_data = get_gate_sf(G3,num,channels,height,width,n,c,h,w,h-1,w-1,horizontal,reverse); float add1_h2_diff = get_data_sf(Hdiff,num,channels,height,width,n,c,h-1,w); float add1_g2_data = get_gate_sf(G2,num,channels,height,width,n,c,h,w,h-1,w,horizontal,reverse); float add1_h1_diff = get_data_sf(Hdiff,num,channels,height,width,n,c,h-1,w+1); float add1_g1_data = get_gate_sf(G1,num,channels,height,width,n,c,h,w,h-1,w+1,horizontal,reverse); h_diff = h_diff + add1_h3_diff * add1_g3_data + add1_h2_diff * add1_g2_data + add1_h1_diff * add1_g1_data; set_data_sf(Hdiff,num,channels,height,width,n,c,h,w,h_diff); //x(t)_diff=(1-g(t))*h(t)_diff float g1_data = get_gate_sf(G1,num,channels,height,width,n,c,h,w,h+1,w-1,horizontal,reverse); float g2_data = get_gate_sf(G2,num,channels,height,width,n,c,h,w,h+1,w,horizontal,reverse); float g3_data = get_gate_sf(G3,num,channels,height,width,n,c,h,w,h+1,w+1,horizontal,reverse); float x_diff = (1- g1_data -g2_data -g3_data) * h_diff; set_data_sf(X_diff,num,channels,height,width,n,c,h,w,x_diff); // g_diff = h_diff * (h_data(t-1) - x_data) float h1_minus1_data = get_data_sf(H,num,channels,height,width,n,c,h+1,w-1); float g1_diff = h_diff * (h1_minus1_data - x_data); set_gate_sf(G1_diff,num,channels,height,width,n,c,h,w,h+1,w-1,horizontal,reverse,g1_diff); //float g2_diff = h_diff * g2_idx * x_data * -1; float h2_minus1_data = get_data_sf(H,num,channels,height,width,n,c,h+1,w); float g2_diff = h_diff * (h2_minus1_data - x_data); set_gate_sf(G2_diff,num,channels,height,width,n,c,h,w,h+1,w,horizontal,reverse,g2_diff); //float g3_diff = h_diff * g3_idx * x_data * -1; float h3_minus1_data = get_data_sf(H,num,channels,height,width,n,c,h+1,w+1); float g3_diff = h_diff * (h3_minus1_data - x_data); set_gate_sf(G3_diff,num,channels,height,width,n,c,h,w,h+1,w+1,horizontal,reverse,g3_diff); } } int Forward_left_right(int num_, int channels_, int height_, int width_, float * X, float * G1, float * G2, float * G3, float * H, int horizontal_, int reverse_, cudaStream_t stream) { int count = height_ * channels_ * num_; int kThreadsPerBlock = 1024; cudaError_t err; for(int t=0; t<width_; t++) { forward_one_col_left_right<<<(count + kThreadsPerBlock - 1) / kThreadsPerBlock, kThreadsPerBlock, 0, stream>>>(count, t, num_, channels_, height_, width_, X, G1, G2, G3, H, horizontal_, reverse_); err = cudaGetLastError(); if(cudaSuccess != err) { fprintf( stderr, "cudaCheckError() failed : %s\n", cudaGetErrorString( err ) ); exit( -1 ); } } return 1; } int Forward_right_left(int num_, int channels_, int height_, int width_, float * X, float * G1, float * G2, float * G3, float * H, int horizontal_, int reverse_, cudaStream_t stream) { int count = height_ * channels_ * num_; int kThreadsPerBlock = 1024; cudaError_t err; for(int t = width_ - 1; t >= 0; t--) { forward_one_col_right_left<<<(count + kThreadsPerBlock - 1) / kThreadsPerBlock, kThreadsPerBlock, 0, stream>>>(count, t, num_, channels_, height_, width_, X, G1, G2, G3, H, horizontal_, reverse_); err = cudaGetLastError(); if(cudaSuccess != err) { fprintf( stderr, "cudaCheckError() failed : %s\n", cudaGetErrorString( err ) ); exit( -1 ); } } return 1; } int Forward_top_bottom(int num_, int channels_, int height_, int width_, float * X, float * G1, float * G2, float * G3, float * H, int horizontal_, int reverse_, cudaStream_t stream) { int count = width_ * channels_ * num_; int kThreadsPerBlock = 1024; cudaError_t err; for(int t=0; t< height_; t++) { forward_one_row_top_bottom<<<(count + kThreadsPerBlock - 1) / kThreadsPerBlock, kThreadsPerBlock, 0, stream>>>(count, t, num_, channels_, height_, width_, X, G1, G2, G3, H, horizontal_, reverse_); err = cudaGetLastError(); if(cudaSuccess != err) { fprintf( stderr, "cudaCheckError() failed : %s\n", cudaGetErrorString( err ) ); exit( -1 ); } } return 1; } int Forward_bottom_top(int num_, int channels_, int height_, int width_, float * X, float * G1, float * G2, float * G3, float * H, int horizontal_, int reverse_, cudaStream_t stream) { int count = width_ * channels_ * num_; int kThreadsPerBlock = 1024; cudaError_t err; for(int t = height_-1; t >= 0; t--) { forward_one_row_bottom_top<<<(count + kThreadsPerBlock - 1) / kThreadsPerBlock, kThreadsPerBlock, 0, stream>>>(count, t, num_, channels_, height_, width_, X, G1, G2, G3, H, horizontal_, reverse_); err = cudaGetLastError(); if(cudaSuccess != err) { fprintf( stderr, "cudaCheckError() failed : %s\n", cudaGetErrorString( err ) ); exit( -1 ); } } return 1; } int Backward_left_right(int num_, int channels_, int height_, int width_, float * X, float * G1, float * G2, float * G3, float * H, float * X_diff, float * G1_diff, float * G2_diff, float * G3_diff, float * H_diff, int horizontal_, int reverse_, cudaStream_t stream) { int count = height_ * channels_ * num_; int kThreadsPerBlock = 1024; cudaError_t err; for(int t = width_ -1; t>=0; t--) { backward_one_col_left_right<<<(count + kThreadsPerBlock - 1) / kThreadsPerBlock, kThreadsPerBlock, 0, stream>>>(count, t, num_, channels_, height_, width_, X, G1, G2, G3, H, X_diff, G1_diff, G2_diff, G3_diff, H_diff, horizontal_, reverse_); err = cudaGetLastError(); if(cudaSuccess != err) { fprintf( stderr, "cudaCheckError() failed : %s\n", cudaGetErrorString( err ) ); exit( -1 ); } } return 1; } int Backward_right_left(int num_, int channels_, int height_, int width_, float * X, float * G1, float * G2, float * G3, float * H, float * X_diff, float * G1_diff, float * G2_diff, float * G3_diff, float * H_diff, int horizontal_, int reverse_, cudaStream_t stream) { int count = height_ * channels_ * num_; int kThreadsPerBlock = 1024; cudaError_t err; for(int t = 0; t<width_; t++) { backward_one_col_right_left<<<(count + kThreadsPerBlock - 1) / kThreadsPerBlock, kThreadsPerBlock, 0, stream>>>(count, t, num_, channels_, height_, width_, X, G1, G2, G3, H, X_diff, G1_diff, G2_diff, G3_diff, H_diff, horizontal_, reverse_); err = cudaGetLastError(); if(cudaSuccess != err) { fprintf( stderr, "cudaCheckError() failed : %s\n", cudaGetErrorString( err ) ); exit( -1 ); } } return 1; } int Backward_top_bottom(int num_, int channels_, int height_, int width_, float * X, float * G1, float * G2, float * G3, float * H, float * X_diff, float * G1_diff, float * G2_diff, float * G3_diff, float * H_diff, int horizontal_, int reverse_, cudaStream_t stream) { int count = width_ * channels_ * num_; int kThreadsPerBlock = 1024; cudaError_t err; for(int t = height_-1; t>=0; t--) { backward_one_row_top_bottom<<<(count + kThreadsPerBlock - 1) / kThreadsPerBlock, kThreadsPerBlock, 0, stream>>>(count, t, num_, channels_, height_, width_, X, G1, G2, G3, H, X_diff, G1_diff, G2_diff, G3_diff, H_diff, horizontal_, reverse_); err = cudaGetLastError(); if(cudaSuccess != err) { fprintf( stderr, "cudaCheckError() failed : %s\n", cudaGetErrorString( err ) ); exit( -1 ); } } return 1; } int Backward_bottom_top(int num_, int channels_, int height_, int width_, float * X, float * G1, float * G2, float * G3, float * H, float * X_diff, float * G1_diff, float * G2_diff, float * G3_diff, float * H_diff, int horizontal_, int reverse_, cudaStream_t stream) { int count = width_ * channels_ * num_; int kThreadsPerBlock = 1024; cudaError_t err; for(int t = 0; t<height_; t++) { backward_one_row_bottom_top<<<(count + kThreadsPerBlock - 1) / kThreadsPerBlock, kThreadsPerBlock, 0, stream>>>(count, t, num_, channels_, height_, width_, X, G1, G2, G3, H, X_diff, G1_diff, G2_diff, G3_diff, H_diff, horizontal_, reverse_); err = cudaGetLastError(); if(cudaSuccess != err) { fprintf( stderr, "cudaCheckError() failed : %s\n", cudaGetErrorString( err ) ); exit( -1 ); } } return 1; } #ifdef __cplusplus } #endif
the_stack
//#include <cub/device/device_radix_sort.cuh> #if !defined(__CUDA_ARCH__) || __CUDA_ARCH__ >= 600 #else __device__ double atomicAdd(double* address, double val) { unsigned long long int* address_as_ull = (unsigned long long int*)address; unsigned long long int old = *address_as_ull, assumed; do { assumed = old; old = atomicCAS(address_as_ull, assumed, __double_as_longlong(val + __longlong_as_double(assumed))); // Note: uses integer comparison to avoid hang in case of NaN (since NaN != NaN) } while (assumed != old); return __longlong_as_double(old); } #endif #if __CUDA_ARCH__ > 200 #define MAXXGRID 2147483647 #else #define MAXXGRID 65535 #endif int getDeviceVersionD() { int igpu; cudaGetDevice(&igpu); cudaDeviceProp prop; cudaGetDeviceProperties(&prop, igpu); return 100 * prop.major + 10 * prop.minor; } void setsizesD(long long N, dim3 *gridp, int *nthreadsp) { int nblocks = 1; int nthreads = 32; int threads_per_block = 1024; // int version; // version = getDeviceVersionD(); // if (version == 320) threads_per_block = 512; while (1L * nblocks * nthreads < N) { if (nblocks < 16) { nblocks = 2*nblocks; } else if (nthreads < threads_per_block) { nthreads = 2*nthreads; } else { nblocks = 2*nblocks; } } gridp->y = 1 + (nblocks-1)/65536; gridp->x = 1 + (nblocks-1)/gridp->y; gridp->z = 1; *nthreadsp = nthreads; } void setsizesLeanD(long long N, dim3 *gridp, int *nthreadsp) { int nblocks = 1; int nthreads = 32; int threads_per_block = 1024; // int version; // version = getDeviceVersionD(); // if (version == 320) threads_per_block = 512; while (1L * nblocks * nthreads < N) { if (nblocks < 16) { nblocks = 2*nblocks; } else if (nthreads < threads_per_block) { nthreads = 2*nthreads; } else { nblocks = max(nblocks, 1 + (int)((N-1)/nthreads)); } } gridp->y = 1 + (nblocks-1)/65536; gridp->x = 1 + (nblocks-1)/gridp->y; gridp->z = 1; *nthreadsp = nthreads; } template <class T> __global__ void __toDouble(T *A, double *B, int N) { int ip = threadIdx.x + blockDim.x * (blockIdx.x + gridDim.x * blockIdx.y); for (int i = ip; i < N; i += blockDim.x * gridDim.x * gridDim.y) { B[i] = (double)(A[i]); } } __global__ void __toInt(double *A, int *B, int N) { int ip = threadIdx.x + blockDim.x * (blockIdx.x + gridDim.x * blockIdx.y); for (int i = ip; i < N; i += blockDim.x * gridDim.x * gridDim.y) { B[i] = (int)(A[i]); } } int IntToDouble(int *A, double *B, int N) { int nthreads; dim3 griddims; setsizesLeanD(N, &griddims, &nthreads); __toDouble<int><<<griddims,nthreads>>>(A, B, N); cudaStreamSynchronize(SYNC_STREAM); cudaError_t err = cudaGetLastError(); return err; } int FloatToDouble(float *A, double *B, int N) { int nthreads; dim3 griddims; setsizesLeanD(N, &griddims, &nthreads); __toDouble<float><<<griddims,nthreads>>>(A, B, N); cudaStreamSynchronize(SYNC_STREAM); cudaError_t err = cudaGetLastError(); return err; } int toInt(double *A, int *B, int N) { int nthreads; dim3 griddims; setsizesLeanD(N, &griddims, &nthreads); __toInt<<<griddims,nthreads>>>(A, B, N); cudaStreamSynchronize(SYNC_STREAM); cudaError_t err = cudaGetLastError(); return err; } __global__ void __full(int *ir, int *ic, double *data, double *od, int nrows, int ncols, int nnz) { int i, row, col; double v; int id = threadIdx.x + blockIdx.x * blockDim.x; for (i = id; i < nnz; i += blockDim.x * gridDim.x) { v = data[i]; row = ir[i]; col = ic[i]; od[row + col * nrows] = v; } } int full(int *ir, int *ic, double *data, double *od, int nrows, int ncols, int nnz) { int nblocks = min(32, 1+(nnz-1)/32); int nthreads = max(32, min(1+(nnz-1)/nblocks, 1024)); __full<<<nblocks,nthreads>>>(ir, ic, data, od, nrows, ncols, nnz); cudaStreamSynchronize(SYNC_STREAM); cudaError_t err = cudaGetLastError(); return err; } __global__ void __set_val(double *A, double val, int length) { int ip = threadIdx.x + blockDim.x * (blockIdx.x + gridDim.x * blockIdx.y); for (int i = ip; i < length; i += blockDim.x * gridDim.x * gridDim.y) { A[i] = val; } } int set_val(double *A, double val, int length) { int nthreads; dim3 griddims; setsizesLeanD(length, &griddims, &nthreads); __set_val<<<griddims,nthreads>>>(A, val, length); cudaStreamSynchronize(SYNC_STREAM); cudaError_t err = cudaGetLastError(); return err; } int set_ival(double *A, int val, int length) { int nthreads; dim3 griddims; setsizesLeanD(length, &griddims, &nthreads); __set_val<<<griddims,nthreads>>>(A, *((double *)&val), length); cudaStreamSynchronize(SYNC_STREAM); cudaError_t err = cudaGetLastError(); return err; } __global__ void __copyToInds(double *A, double *B, int *I, long long len) { int tid = threadIdx.x + blockDim.x * (blockIdx.x + gridDim.x * blockIdx.y); int step = blockDim.x * gridDim.x * gridDim.y; long long i; for (i = tid; i < len; i += step) { B[I[i]] = A[i]; } } int copyToInds(double *A, double *B, int *I, long long len) { int nthreads; dim3 griddims; setsizesLeanD(len, &griddims, &nthreads); __copyToInds<<<griddims,nthreads>>>(A, B, I, len); cudaStreamSynchronize(SYNC_STREAM); cudaError_t err = cudaGetLastError(); return err; } template<typename T> __global__ void __copyFromInds(T *A, T *B, int *I, long long len) { int tid = threadIdx.x + blockDim.x * (blockIdx.x + gridDim.x * blockIdx.y); int step = blockDim.x * gridDim.x * gridDim.y; long long i; for (i = tid; i < len; i += step) { B[i] = A[I[i]]; } } int copyFromInds(double *A, double *B, int *I, long long len) { int nthreads; dim3 griddims; setsizesLeanD(len, &griddims, &nthreads); __copyFromInds<<<griddims,nthreads>>>(A, B, I, len); cudaStreamSynchronize(SYNC_STREAM); cudaError_t err = cudaGetLastError(); return err; } // Implement B[I,J] = A // indexed copy: version with one block per column #define COPYTOINDS2DA(DFNAME,IEXPR,JEXPR) \ __global__ void __copyToInds2D##DFNAME(double *A, int lda, double *B, int ldb, int *I, int nrows, int *J, int ncols) { \ int iblock = blockIdx.x + blockIdx.y * gridDim.x; \ if (iblock < ncols) { \ int icol = JEXPR; \ for (int i = threadIdx.x; i < nrows; i += blockDim.x) { \ B[IEXPR + icol * ldb] = A[i + iblock * lda]; \ } \ } \ } COPYTOINDS2DA(nn,I[i],J[iblock]) COPYTOINDS2DA(xn,i,J[iblock]) COPYTOINDS2DA(nx,I[i],iblock) COPYTOINDS2DA(xx,i,iblock) // Implement B[I,J] = A // indexed copy: version with one thread per element #define COPYTOINDS2DB(DFNAME,IEXPR,JEXPR) \ __global__ void __copyToInds2DB##DFNAME(double *A, int lda, double *B, int ldb, int *I, int nrows, int *J, int ncols) { \ int indx = threadIdx.x + blockDim.x * (blockIdx.x + blockIdx.y * gridDim.x); \ if (indx < nrows * ncols) { \ int irow = indx % nrows; \ int icol = indx / nrows; \ B[IEXPR + JEXPR * ldb] = A[irow + icol * lda]; \ } \ } COPYTOINDS2DB(nn,I[irow],J[icol]) COPYTOINDS2DB(xn,irow,J[icol]) COPYTOINDS2DB(nx,I[irow],icol) COPYTOINDS2DB(xx,irow,icol) // Implement B[I,J] = A int copyToInds2D(double *A, int lda, double *B, int ldb, int *I, int nrows, int *J, int ncols) { int len = nrows * ncols; int nthreads = max(32, min(1024, nrows)); int nblocks = min(ncols, (len-1)/nthreads + 1); dim3 griddims; griddims.x = 1; griddims.y = 1; griddims.z = 1; if (nblocks < 65536) { griddims.x = nblocks; } else { int vs = (int)sqrt((double)nblocks); griddims.x = vs; griddims.y = (nblocks-1)/vs + 1; } if (nblocks == ncols) { if (I == NULL) { if (J == NULL) { __copyToInds2Dxx<<<griddims,nthreads>>>(A, lda, B, ldb, I, nrows, J, ncols); } else { __copyToInds2Dxn<<<griddims,nthreads>>>(A, lda, B, ldb, I, nrows, J, ncols); } } else { if (J == NULL) { __copyToInds2Dnx<<<griddims,nthreads>>>(A, lda, B, ldb, I, nrows, J, ncols); } else { __copyToInds2Dnn<<<griddims,nthreads>>>(A, lda, B, ldb, I, nrows, J, ncols); } } } else { if (I == NULL) { if (J == NULL) { __copyToInds2DBxx<<<griddims,nthreads>>>(A, lda, B, ldb, I, nrows, J, ncols); } else { __copyToInds2DBxn<<<griddims,nthreads>>>(A, lda, B, ldb, I, nrows, J, ncols); } } else { if (J == NULL) { __copyToInds2DBnx<<<griddims,nthreads>>>(A, lda, B, ldb, I, nrows, J, ncols); } else { __copyToInds2DBnn<<<griddims,nthreads>>>(A, lda, B, ldb, I, nrows, J, ncols); } } } cudaStreamSynchronize(SYNC_STREAM); cudaError_t err = cudaGetLastError(); return err; } __global__ void __copyToInds3D(double *A, int lda, int rda, double *B, int ldb, int rdb, int *I, int nrows, int *J, int ncols, int *K, int nk) { int ii = threadIdx.x + blockDim.x * blockIdx.x; int jj = threadIdx.y + blockDim.y * blockIdx.y; int kk = threadIdx.z + blockDim.z * blockIdx.z; int i, j, k, mapi, mapj, mapk; for (k = kk; k < nk; k += blockDim.z * gridDim.z) { mapk = k; if (K != NULL) mapk = K[k]; for (j = jj; j < ncols; j += blockDim.y * gridDim.y) { mapj = j; if (J != NULL) mapj = J[j]; if (I != NULL) { for (i = ii; i < nrows; i += blockDim.x * gridDim.x) { mapi = I[i]; B[mapi + ldb * (mapj + rdb * mapk)] = A[i + lda * (j + rda * k)]; } } else { for (i = ii; i < nrows; i += blockDim.x * gridDim.x) { mapi = i; B[mapi + ldb * (mapj + rdb * mapk)] = A[i + lda * (j + rda * k)]; } } } } } int copyToInds3D(double *A, int lda, int rda, double *B, int ldb, int rdb, int *I, int nrows, int *J, int ncols, int *K, int nk) { int ntx, nty, ntz, nbx, nby, nbz; ntx = min(nrows, 1024); nbx = min((nrows - 1) / ntx + 1, 1024); nty = min(ncols, 1024/ntx); nby = min((ncols - 1) / nty + 1, 1024); ntz = min(nk, 1024/ntx/nty); nbz = min((nk - 1) / ntz + 1, 1024); dim3 blockdims(ntx, nty, ntz); dim3 griddims(nbx, nby, nbz); __copyToInds3D<<<griddims,blockdims>>>(A, lda, rda, B, ldb, rdb, I, nrows, J, ncols, K, nk); cudaStreamSynchronize(SYNC_STREAM); cudaError_t err = cudaGetLastError(); return err; } __global__ void __copyToInds4D(double *A, int lda, int rda, int tda, double *B, int ldb, int rdb, int tdb, int *I, int nrows, int *J, int ncols, int *K, int nk, int *L, int nl, int ntk, int nbk, int ntl, int nbl) { int ii = threadIdx.x + blockDim.x * blockIdx.x; int jj = threadIdx.y + blockDim.y * blockIdx.y; int tk = threadIdx.z / ntl; int tl = threadIdx.z - tk * ntl; int bk = blockIdx.z / nbl; int bl = blockIdx.z - bk * nbl; int kk = tk + ntk * bk; int ll = tl + ntl * bl; int i, j, k, l, mapi, mapj, mapk, mapl; for (l = ll; l < nl; l += ntl * nbl) { mapl = l; if (L != NULL) mapl = L[l]; for (k = kk; k < nk; k += ntk * nbk) { mapk = k; if (K != NULL) mapk = K[k]; for (j = jj; j < ncols; j += blockDim.y * gridDim.y) { mapj = j; if (J != NULL) mapj = J[j]; if (I != NULL) { for (i = ii; i < nrows; i += blockDim.x * gridDim.x) { mapi = I[i]; B[mapi + ldb * (mapj + rdb * (mapk + tdb * mapl))] = A[i + lda * (j + rda * (k + tda * l))]; } } else { for (i = ii; i < nrows; i += blockDim.x * gridDim.x) { B[i + ldb * (mapj + rdb * (mapk + tdb * mapl))] = A[i + lda * (j + rda * (k + tda * l))]; } } } } } } int copyToInds4D(double *A, int lda, int rda, int tda, double *B, int ldb, int rdb, int tdb, int *I, int nrows, int *J, int ncols, int *K, int nk, int *L, int nl) { int ntx, nty, ntk, ntl, nbx, nby, nbk, nbl; ntx = min(nrows, 1024); nbx = min((nrows - 1) / ntx + 1, 1024); nty = min(ncols, 1024/ntx); nby = min((ncols - 1) / nty + 1, 1024); ntk = min(nk, 1024/ntx/nty); nbk = min((nk - 1) / ntk + 1, 255); ntl = min(nl, 1024/ntx/nty/ntk); nbl = min((nl - 1) / ntl + 1, 255); dim3 blockdims(ntx, nty, ntk * ntl); dim3 griddims(nbx, nby, nbk * nbl); __copyToInds4D<<<griddims,blockdims>>>(A, lda, rda, tda, B, ldb, rdb, tdb, I, nrows, J, ncols, K, nk, L, nl, ntk, nbk, ntl, nbl); cudaStreamSynchronize(SYNC_STREAM); cudaError_t err = cudaGetLastError(); return err; } __global__ void __fillToInds(double A, double *B, int *I, long long len) { int tid = threadIdx.x + blockDim.x * (blockIdx.x + gridDim.x * blockIdx.y); int step = blockDim.x * gridDim.x * gridDim.y; long long i; for (i = tid; i < len; i += step) { B[I[i]] = A; } } int fillToInds(double A, double *B, int *I, long long len) { int nthreads; dim3 griddims; setsizesLeanD(len, &griddims, &nthreads); __fillToInds<<<griddims,nthreads>>>(A, B, I, len); cudaStreamSynchronize(SYNC_STREAM); cudaError_t err = cudaGetLastError(); return err; } // Implement B[I,J] = c // indexed copy: version with one block per column #define FILLTOINDS2DA(DFNAME,IEXPR,JEXPR,ETYPE) \ __global__ void __fillToInds2D##DFNAME(ETYPE A, ETYPE *B, int ldb, int *I, int nrows, int *J, int ncols) { \ int iblock = blockIdx.x + blockIdx.y * gridDim.x; \ if (iblock < ncols) { \ int icol = JEXPR; \ for (int i = threadIdx.x; i < nrows; i += blockDim.x) { \ B[IEXPR + icol * ldb] = A; \ } \ } \ } FILLTOINDS2DA(nn,I[i],J[iblock],double) FILLTOINDS2DA(xn,i,J[iblock],double) FILLTOINDS2DA(nx,I[i],iblock,double) FILLTOINDS2DA(xx,i,iblock,double) // Implement B[I,J] = A // indexed copy: version with one thread per element #define FILLTOINDS2DB(DFNAME,IEXPR,JEXPR,ETYPE) \ __global__ void __fillToInds2DB##DFNAME(ETYPE A, ETYPE *B, int ldb, int *I, int nrows, int *J, int ncols) { \ int indx = threadIdx.x + blockDim.x * (blockIdx.x + blockIdx.y * gridDim.x); \ if (indx < nrows * ncols) { \ int irow = indx % nrows; \ int icol = indx / nrows; \ B[IEXPR + JEXPR * ldb] = A; \ } \ } FILLTOINDS2DB(nn,I[irow],J[icol],double) FILLTOINDS2DB(xn,irow,J[icol],double) FILLTOINDS2DB(nx,I[irow],icol,double) FILLTOINDS2DB(xx,irow,icol,double) int fillToInds2D(double A, double *B, int ldb, int *I, int nrows, int *J, int ncols) { int len = nrows * ncols; int nthreads = max(32, min(1024, nrows)); int nblocks = min(ncols, (len-1)/nthreads + 1); dim3 griddims; griddims.x = 1; griddims.y = 1; griddims.z = 1; if (nblocks < 65536) { griddims.x = nblocks; } else { int vs = (int)sqrt((float)nblocks); griddims.x = vs; griddims.y = (nblocks-1)/vs + 1; } if (nblocks == ncols) { if (I == NULL) { if (J == NULL) { __fillToInds2Dxx<<<griddims,nthreads>>>(A, B, ldb, I, nrows, J, ncols); } else { __fillToInds2Dxn<<<griddims,nthreads>>>(A, B, ldb, I, nrows, J, ncols); } } else { if (J == NULL) { __fillToInds2Dnx<<<griddims,nthreads>>>(A, B, ldb, I, nrows, J, ncols); } else { __fillToInds2Dnn<<<griddims,nthreads>>>(A, B, ldb, I, nrows, J, ncols); } } } else { if (I == NULL) { if (J == NULL) { __fillToInds2DBxx<<<griddims,nthreads>>>(A, B, ldb, I, nrows, J, ncols); } else { __fillToInds2DBxn<<<griddims,nthreads>>>(A, B, ldb, I, nrows, J, ncols); } } else { if (J == NULL) { __fillToInds2DBnx<<<griddims,nthreads>>>(A, B, ldb, I, nrows, J, ncols); } else { __fillToInds2DBnn<<<griddims,nthreads>>>(A, B, ldb, I, nrows, J, ncols); } } } cudaStreamSynchronize(SYNC_STREAM); cudaError_t err = cudaGetLastError(); return err; } __global__ void __fillToInds3D(double A, double *B, int ldb, int rdb, int *I, int nrows, int *J, int ncols, int *K, int nk) { int ii = threadIdx.x + blockDim.x * blockIdx.x; int jj = threadIdx.y + blockDim.y * blockIdx.y; int kk = threadIdx.z + blockDim.z * blockIdx.z; int i, j, k, mapi, mapj, mapk; for (k = kk; k < nk; k += blockDim.z * gridDim.z) { mapk = k; if (K != NULL) mapk = K[k]; for (j = jj; j < ncols; j += blockDim.y * gridDim.y) { mapj = j; if (J != NULL) mapj = J[j]; if (I != NULL) { for (i = ii; i < nrows; i += blockDim.x * gridDim.x) { mapi = I[i]; B[mapi + ldb * (mapj + rdb * mapk)] = A; } } else { for (i = ii; i < nrows; i += blockDim.x * gridDim.x) { mapi = i; B[mapi + ldb * (mapj + rdb * mapk)] = A; } } } } } int fillToInds3D(double A, double *B, int ldb, int rdb, int *I, int nrows, int *J, int ncols, int *K, int nk) { int ntx, nty, ntz, nbx, nby, nbz; ntx = min(nrows, 1024); nbx = min((nrows - 1) / ntx + 1, 1024); nty = min(ncols, 1024/ntx); nby = min((ncols - 1) / nty + 1, 1024); ntz = min(nk, 1024/ntx/nty); nbz = min((nk - 1) / ntz + 1, 1024); dim3 blockdims(ntx, nty, ntz); dim3 griddims(nbx, nby, nbz); __fillToInds3D<<<griddims,blockdims>>>(A, B, ldb, rdb, I, nrows, J, ncols, K, nk); cudaStreamSynchronize(SYNC_STREAM); cudaError_t err = cudaGetLastError(); return err; } __global__ void __fillToInds4D(double A, double *B, int ldb, int rdb, int tdb, int *I, int nrows, int *J, int ncols, int *K, int nk, int *L, int nl, int ntk, int nbk, int ntl, int nbl) { int ii = threadIdx.x + blockDim.x * blockIdx.x; int jj = threadIdx.y + blockDim.y * blockIdx.y; int tk = threadIdx.z / ntl; int tl = threadIdx.z - tk * ntl; int bk = blockIdx.z / nbl; int bl = blockIdx.z - bk * nbl; int kk = tk + ntk * bk; int ll = tl + ntl * bl; int i, j, k, l, mapi, mapj, mapk, mapl; for (l = ll; l < nl; l += ntl * nbl) { mapl = l; if (L != NULL) mapl = L[l]; for (k = kk; k < nk; k += ntk * nbk) { mapk = k; if (K != NULL) mapk = K[k]; for (j = jj; j < ncols; j += blockDim.y * gridDim.y) { mapj = j; if (J != NULL) mapj = J[j]; if (I != NULL) { for (i = ii; i < nrows; i += blockDim.x * gridDim.x) { mapi = I[i]; B[mapi + ldb * (mapj + rdb * (mapk + tdb * mapl))] = A; } } else { for (i = ii; i < nrows; i += blockDim.x * gridDim.x) { B[i + ldb * (mapj + rdb * (mapk + tdb * mapl))] = A; } } } } } } int fillToInds4D(double A, double *B, int ldb, int rdb, int tdb, int *I, int nrows, int *J, int ncols, int *K, int nk, int *L, int nl) { int ntx, nty, ntk, ntl, nbx, nby, nbk, nbl; ntx = min(nrows, 1024); nbx = min((nrows - 1) / ntx + 1, 1024); nty = min(ncols, 1024/ntx); nby = min((ncols - 1) / nty + 1, 1024); ntk = min(nk, 1024/ntx/nty); nbk = min((nk - 1) / ntk + 1, 255); ntl = min(nl, 1024/ntx/nty/ntk); nbl = min((nl - 1) / ntl + 1, 255); dim3 blockdims(ntx, nty, ntk * ntl); dim3 griddims(nbx, nby, nbk * nbl); __fillToInds4D<<<griddims,blockdims>>>(A, B, ldb, rdb, tdb, I, nrows, J, ncols, K, nk, L, nl, ntk, nbk, ntl, nbl); cudaStreamSynchronize(SYNC_STREAM); cudaError_t err = cudaGetLastError(); return err; } // Implement B = A[I,J] // indexed copy: version with one block per column #define COPYFROMINDS2DA(FNAME,IEXPR,JEXPR) \ __global__ void __copyFromInds2D##FNAME(double *A, int lda, double *B, int ldb, int *I, int nrows, int *J, int ncols) { \ int iblock = blockIdx.x + blockIdx.y * gridDim.x; \ if (iblock < ncols) { \ int icol = JEXPR; \ for (int i = threadIdx.x; i < nrows; i += blockDim.x) { \ B[i + iblock * ldb] = A[IEXPR + icol * lda]; \ } \ } \ } COPYFROMINDS2DA(nn,I[i],J[iblock]) COPYFROMINDS2DA(xn,i,J[iblock]) COPYFROMINDS2DA(nx,I[i],iblock) COPYFROMINDS2DA(xx,i,iblock) // Implement B = A[I,J] // indexed copy: version with one thread per element #define COPYFROMINDS2DB(FNAME,IEXPR,JEXPR) \ __global__ void __copyFromInds2DB##FNAME(double *A, int lda, double *B, int ldb, int *I, int nrows, int *J, int ncols) { \ int indx = threadIdx.x + blockDim.x * (blockIdx.x + blockIdx.y * gridDim.x); \ if (indx < nrows * ncols) { \ int irow = indx % nrows; \ int icol = indx / nrows; \ B[irow + icol * ldb] = A[IEXPR + JEXPR * lda]; \ } \ } COPYFROMINDS2DB(nn,I[irow],J[icol]) COPYFROMINDS2DB(xn,irow,J[icol]) COPYFROMINDS2DB(nx,I[irow],icol) COPYFROMINDS2DB(xx,irow,icol) // Implement B = A[I,J] int copyFromInds2D(double *A, int lda, double *B, int ldb, int *I, int nrows, int *J, int ncols) { int len = nrows * ncols; int nthreads = max(32, min(1024, nrows)); int nblocks = min(ncols, (len-1)/nthreads + 1); dim3 griddims; griddims.x = 1; griddims.y = 1; griddims.z = 1; if (nblocks < 65536) { griddims.x = nblocks; } else { int vs = (int)sqrt((float)nblocks); griddims.x = vs; griddims.y = (nblocks-1)/vs + 1; } if (nblocks == ncols) { if (I == NULL) { if (J == NULL) { __copyFromInds2Dxx<<<griddims,nthreads>>>(A, lda, B, ldb, I, nrows, J, ncols); } else { __copyFromInds2Dxn<<<griddims,nthreads>>>(A, lda, B, ldb, I, nrows, J, ncols); } } else { if (J == NULL) { __copyFromInds2Dnx<<<griddims,nthreads>>>(A, lda, B, ldb, I, nrows, J, ncols); } else { __copyFromInds2Dnn<<<griddims,nthreads>>>(A, lda, B, ldb, I, nrows, J, ncols); } } } else { if (I == NULL) { if (J == NULL) { __copyFromInds2DBxx<<<griddims,nthreads>>>(A, lda, B, ldb, I, nrows, J, ncols); } else { __copyFromInds2DBxn<<<griddims,nthreads>>>(A, lda, B, ldb, I, nrows, J, ncols); } } else { if (J == NULL) { __copyFromInds2DBnx<<<griddims,nthreads>>>(A, lda, B, ldb, I, nrows, J, ncols); } else { __copyFromInds2DBnn<<<griddims,nthreads>>>(A, lda, B, ldb, I, nrows, J, ncols); } } } cudaStreamSynchronize(SYNC_STREAM); cudaError_t err = cudaGetLastError(); return err; } __global__ void __copyFromInds3D(double *A, int lda, int rda, double *B, int ldb, int rdb, int *I, int nrows, int *J, int ncols, int *K, int nk) { int ii = threadIdx.x + blockDim.x * blockIdx.x; int jj = threadIdx.y + blockDim.y * blockIdx.y; int kk = threadIdx.z + blockDim.z * blockIdx.z; int i, j, k, mapi, mapj, mapk; for (k = kk; k < nk; k += blockDim.z * gridDim.z) { mapk = k; if (K != NULL) mapk = K[k]; for (j = jj; j < ncols; j += blockDim.y * gridDim.y) { mapj = j; if (J != NULL) mapj = J[j]; if (I != NULL) { for (i = ii; i < nrows; i += blockDim.x * gridDim.x) { mapi = I[i]; B[i + ldb * (j + rdb * k)] = A[mapi + lda * (mapj + rda * mapk)]; } } else { for (i = ii; i < nrows; i += blockDim.x * gridDim.x) { mapi = i; B[i + ldb * (j + rdb * k)] = A[mapi + lda * (mapj + rda * mapk)]; } } } } } int copyFromInds3D(double *A, int lda, int rda, double *B, int ldb, int rdb, int *I, int nrows, int *J, int ncols, int *K, int nk) { int ntx, nty, ntz, nbx, nby, nbz; ntx = min(nrows, 1024); nbx = (nrows - 1) / ntx + 1; nty = min(ncols, 1024/ntx); nby = (ncols - 1) / nty + 1; ntz = min(nk, 1024/(ntx*nty)); nbz = (nk - 1) / ntz + 1; dim3 blockdims(ntx, nty, ntz); dim3 griddims(nbx, nby, nbz); __copyFromInds3D<<<griddims,blockdims>>>(A, lda, rda, B, ldb, rdb, I, nrows, J, ncols, K, nk); cudaStreamSynchronize(SYNC_STREAM); cudaError_t err = cudaGetLastError(); return err; } __global__ void __copyFromInds4D(double *A, int lda, int rda, int tda, double *B, int ldb, int rdb, int tdb, int *I, int nrows, int *J, int ncols, int *K, int nk, int *L, int nl, int ntk, int nbk, int ntl, int nbl) { int ii = threadIdx.x + blockDim.x * blockIdx.x; int jj = threadIdx.y + blockDim.y * blockIdx.y; int tk = threadIdx.z / ntl; int tl = threadIdx.z - tk * ntl; int bk = blockIdx.z / nbl; int bl = blockIdx.z - bk * nbl; int kk = tk + ntk * bk; int ll = tl + ntl * bl; int i, j, k, l, mapi, mapj, mapk, mapl; for (l = ll; l < nl; l += ntl * nbl) { mapl = l; if (L != NULL) mapl = L[l]; for (k = kk; k < nk; k += ntk * nbk) { mapk = k; if (K != NULL) mapk = K[k]; for (j = jj; j < ncols; j += blockDim.y * gridDim.y) { mapj = j; if (J != NULL) mapj = J[j]; if (I != NULL) { for (i = ii; i < nrows; i += blockDim.x * gridDim.x) { mapi = I[i]; B[i + ldb * (j + rdb * (k + tdb * l))] = A[mapi + lda * (mapj + rda * (mapk + tda * mapl))]; } } else { for (i = ii; i < nrows; i += blockDim.x * gridDim.x) { B[i + ldb * (j + rdb * (k + tdb * l))] = A[i + lda * (mapj + rda * (mapk + tda * mapl))]; } } } } } } int copyFromInds4D(double *A, int lda, int rda, int tda, double *B, int ldb, int rdb, int tdb, int *I, int nrows, int *J, int ncols, int *K, int nk, int *L, int nl) { int ntx, nty, ntk, ntl, nbx, nby, nbk, nbl; ntx = min(nrows, 1024); nbx = min((nrows - 1) / ntx + 1, 1024); nty = min(ncols, 1024/ntx); nby = min((ncols - 1) / nty + 1, 1024); ntk = min(nk, 1024/ntx/nty); nbk = min((nk - 1) / ntk + 1, 255); ntl = min(nl, 1024/ntx/nty/ntk); nbl = min((nl - 1) / ntl + 1, 255); dim3 blockdims(ntx, nty, ntk * ntl); dim3 griddims(nbx, nby, nbk * nbl); __copyFromInds4D<<<griddims,blockdims>>>(A, lda, rda, tda, B, ldb, rdb, tdb, I, nrows, J, ncols, K, nk, L, nl, ntk, nbk, ntl, nbl); cudaStreamSynchronize(SYNC_STREAM); cudaError_t err = cudaGetLastError(); return err; } __global__ void __dsmult(int nrows, int nnz, double *A, double *Bdata, int *Bir, int *Bic, double *C) { int jstart = ((long long)blockIdx.x) * nnz / gridDim.x; int jend = ((long long)(blockIdx.x + 1)) * nnz / gridDim.x; for (int i = threadIdx.x; i < nrows; i += blockDim.x) { double sum = 0; for (int j = jstart; j < jend ; j++) { sum += A[i + nrows * Bir[j]] * Bdata[j]; if (j == jend-1 || Bic[j] != Bic[j+1]) { atomicAdd(&C[i + nrows * Bic[j]], sum); sum = 0; } } } } __global__ void __dsmultx(int nrows, int nnz, double *A, double *Bdata, int *Bir, int *Bic, double *C) { int bid = threadIdx.y + blockDim.y * blockIdx.x; int nb = blockDim.y * gridDim.x; int jstart = ((long long)bid) * nnz / nb; int jend = ((long long)(bid + 1)) * nnz / nb; double sum = 0; for (int j = jstart; j < jend ; j++) { sum += A[threadIdx.x + nrows * Bir[j]] * Bdata[j]; if (j == jend-1 || Bic[j] != Bic[j+1]) { atomicAdd(&C[threadIdx.x + nrows * Bic[j]], sum); sum = 0; } } } int dsmult(int nrows, int ncols, int nnz, double *A, double *Bdata, int *Bir, int *Bic, double *C) { if (nrows < 128) { int nt = max(1, min(ncols/2, 256/nrows)); dim3 threadDim(nrows, nt, 1); int nblocks = min(MAXXGRID, max(1, ncols/nt)); __dsmultx<<<nblocks,threadDim>>>(nrows, nnz, A, Bdata, Bir, Bic, C); } else { int nthreads = min(1024, nrows); int nblocks = min(MAXXGRID, ncols); __dsmult<<<nblocks,nthreads>>>(nrows, nnz, A, Bdata, Bir, Bic, C); } cudaStreamSynchronize(SYNC_STREAM); cudaError_t err = cudaGetLastError(); return err; } int dsmult_tune(int nrows, int ncols, int nnz, double *A, double *Bdata, int *Bir, int *Bic, double *C, int nblocks, int nthreads) { __dsmult<<<nblocks,nthreads>>>(nrows, nnz, A, Bdata, Bir, Bic, C); cudaStreamSynchronize(SYNC_STREAM); cudaError_t err = cudaGetLastError(); return err; } int dsmultx_tune(int nrows, int ncols, int nnz, double *A, double *Bdata, int *Bir, int *Bic, double *C, int nblocks, int nthreadsx, int nthreadsy) { dim3 threadDim(nthreadsx, nthreadsy, 1); __dsmultx<<<nblocks,threadDim>>>(nrows, nnz, A, Bdata, Bir, Bic, C); cudaStreamSynchronize(SYNC_STREAM); cudaError_t err = cudaGetLastError(); return err; } __global__ void __dsmultT(int nrows, int nnz, double *A, double *Bdata, int *Bir, int *Bic, double *C) { int jstart = ((long long)blockIdx.x) * nnz / gridDim.x; int jend = ((long long)(blockIdx.x + 1)) * nnz / gridDim.x; for (int i = threadIdx.x; i < nrows; i += blockDim.x) { double aval = 0; for (int j = jstart; j < jend ; j++) { if (j == jstart || Bic[j-1] != Bic[j]) { aval = A[i + nrows * Bic[j]]; } atomicAdd(&C[i + nrows * Bir[j]], aval * Bdata[j]); } } } __global__ void __dsmultTx(int nrows, int nnz, double *A, double *Bdata, int *Bir, int *Bic, double *C) { int bid = threadIdx.y + blockDim.y * blockIdx.x; int nb = blockDim.y * gridDim.x; int jstart = ((long long)bid) * nnz / nb; int jend = ((long long)(bid + 1)) * nnz / nb; double aval = 0; for (int j = jstart; j < jend ; j++) { if (j == jstart || Bic[j-1] != Bic[j]) { aval = A[threadIdx.x + nrows * Bic[j]]; } atomicAdd(&C[threadIdx.x + nrows * Bir[j]], aval * Bdata[j]); } } int dsmultT(int nrows, int ncols, int nnz, double *A, double *Bdata, int *Bir, int *Bic, double *C) { if (nrows < 128) { int nt = max(1, min(ncols/2, 256/nrows)); dim3 threadDim(nrows, nt, 1); int nblocks = min(MAXXGRID, max(1, ncols/nt)); __dsmultTx<<<nblocks,threadDim>>>(nrows, nnz, A, Bdata, Bir, Bic, C); } else { int nthreads = min(1024, nrows); int nblocks = min(MAXXGRID, ncols); __dsmultT<<<nblocks,nthreads>>>(nrows, nnz, A, Bdata, Bir, Bic, C); } cudaStreamSynchronize(SYNC_STREAM); cudaError_t err = cudaGetLastError(); return err; } __global__ void __spsum1(int nrows, int ncols, int nnz, int *Air, int *Aic, double *P, double *B) { int jstart = ((long long)blockIdx.x) * nnz / gridDim.x; int jend = ((long long)(blockIdx.x + 1)) * nnz / gridDim.x; for (int i = jstart + threadIdx.x; i < jend; i += blockDim.x) { atomicAdd(&B[Aic[i]], P[i]); } } __global__ void __spsum2(int nrows, int ncols, int nnz, int *Air, int *Aic, double *P, double *B) { int jstart = ((long long)blockIdx.x) * nnz / gridDim.x; int jend = ((long long)(blockIdx.x + 1)) * nnz / gridDim.x; for (int i = jstart + threadIdx.x; i < jend; i += blockDim.x) { atomicAdd(&B[Air[i]], P[i]); } } int spsum(int nrows, int ncols, int nnz, int *Air, int *Aic, double *P, double *B, int n) { int nthreads = max(32, min(128, nnz)); int nblks = min(65536, max(1, (nnz-1) / 128)); if (n == 1) { __spsum1<<<nblks,nthreads>>>(nrows, ncols, nnz, Air, Aic, P, B); } else { __spsum2<<<nblks,nthreads>>>(nrows, ncols, nnz, Air, Aic, P, B); } cudaStreamSynchronize(SYNC_STREAM); cudaError_t err = cudaGetLastError(); return err; } __global__ void __dds(int nrows, int nnz, double *A, double *B, int *Cir, int *Cic, double *P); __global__ void __dds0(int nrows, int ncols, double *A, double *B, int *Cir, int *Cic, double *P); #define DDS_BLKY 32 #if __CUDA_ARCH__ > 200 __global__ void __dds(int nrows, int nnz, double *A, double *B, int *Cir, int *Cic, double *P) { int jstart = ((long long)blockIdx.x) * nnz / gridDim.x; int jend = ((long long)(blockIdx.x + 1)) * nnz / gridDim.x; int tid = threadIdx.x + blockDim.x * threadIdx.y; for (int j = jstart; j < jend ; j++) { double sum = 0; int aoff = nrows * Cir[j]; int boff = nrows * Cic[j]; for (int i = tid; i < nrows; i += blockDim.x * blockDim.y) { sum += A[i + aoff] * B[i + boff]; } for (int i = 1; i < blockDim.x; i *= 2) { double tmp = __shfl_down(sum, i); if (threadIdx.x + i < blockDim.x) sum = sum + tmp; } if (threadIdx.x == 0) { atomicAdd(&P[j], sum); } } } __global__ void __dds0(int nrows, int ncols, double *A, double *B, int *Cir, int *Cjc, double *P) { __shared__ double merge[32]; int jstart = ((long long)blockIdx.x) * ncols / gridDim.x; int jend = ((long long)(blockIdx.x + 1)) * ncols / gridDim.x; int tid = threadIdx.x + blockDim.x * threadIdx.y; int aoff, boff; double user, prod, sum, bsum; for (int j0 = jstart; j0 < jend ; j0++) { boff = nrows * j0; user = B[tid + boff]; for (int j = Cjc[j0]; j < Cjc[j0+1]; j++) { aoff = nrows * Cir[j]; prod = A[tid + aoff] * user; sum = prod + __shfl_down(prod, 1); sum = sum + __shfl_down(sum, 2); sum = sum + __shfl_down(sum, 4); sum = sum + __shfl_down(sum, 8); sum = sum + __shfl_down(sum, 16); bsum = __shfl(sum, 0); __syncthreads(); if (threadIdx.x == threadIdx.y) { merge[threadIdx.x] = bsum; } __syncthreads(); if (threadIdx.y == 0) { sum = merge[threadIdx.x]; sum = sum + __shfl_down(sum, 1); sum = sum + __shfl_down(sum, 2); sum = sum + __shfl_down(sum, 4); sum = sum + __shfl_down(sum, 8); sum = sum + __shfl_down(sum, 16); if (threadIdx.x == 0) { P[j] = sum; } } } } } #else __global__ void __dds(int nrows, int nnz, double *A, double *B, int *Cir, int *Cic, double *P) { __shared__ double parts[32*DDS_BLKY]; int jstart = ((long long)blockIdx.x) * nnz / gridDim.x; int jend = ((long long)(blockIdx.x + 1)) * nnz / gridDim.x; int tid = threadIdx.x + blockDim.x * threadIdx.y; for (int j = jstart; j < jend ; j++) { double sum = 0; int aoff = nrows * Cir[j]; int boff = nrows * Cic[j]; for (int i = tid; i < nrows; i += blockDim.x * blockDim.y) { sum += A[i + aoff] * B[i + boff]; } parts[tid] = sum; for (int i = 1; i < blockDim.x * blockDim.y; i *= 2) { __syncthreads(); if (i + tid < blockDim.x * blockDim.y) { parts[tid] = parts[tid] + parts[i + tid]; } } __syncthreads(); if (tid == 0) { P[j] = parts[0]; } __syncthreads(); } } __global__ void __dds0(int nrows, int ncols, double *A, double *B, int *Cir, int *Cjc, double *P) {} #endif int dds(int nrows, int nnz, double *A, double *B, int *Cir, int *Cic, double *P) { dim3 blockDims(min(32,nrows), min(DDS_BLKY, 1+(nrows-1)/64), 1); // int nblocks = min(65536, max(1,nnz/8)); int nblocks = min(16384, max(1,nnz/128)); __dds<<<nblocks,blockDims>>>(nrows, nnz, A, B, Cir, Cic, P); cudaStreamSynchronize(SYNC_STREAM); cudaError_t err = cudaGetLastError(); return err; } int dds0(int nrows, int ncols, double *A, double *B, int *Cir, int *Cic, double *P) { dim3 blockDims(32, 32, 1); // int nblocks = min(65536, max(1,nnz/8)); int nblocks = min(16384, max(1,ncols/64)); __dds0<<<nblocks,blockDims>>>(nrows, ncols, A, B, Cir, Cic, P); cudaStreamSynchronize(SYNC_STREAM); cudaError_t err = cudaGetLastError(); return err; } #define BLOCKDIM 32 __global__ void __transpose(double *in, int instride, double *out, int outstride, int nrows, int ncols) { int nx = BLOCKDIM * gridDim.x; int ny = BLOCKDIM * gridDim.y; int ix = BLOCKDIM * blockIdx.x; int iy = BLOCKDIM * blockIdx.y; __shared__ double tile[BLOCKDIM][BLOCKDIM+1]; for (int yb = iy; yb < ncols; yb += ny) { for (int xb = ix; xb < nrows; xb += nx) { if (xb + threadIdx.x < nrows) { int ylim = min(ncols, yb + BLOCKDIM); for (int y = threadIdx.y + yb; y < ylim; y += blockDim.y) { tile[threadIdx.x][y-yb] = in[threadIdx.x+xb + y*instride]; } } __syncthreads(); if (yb + threadIdx.x < ncols) { int xlim = min(nrows, xb + BLOCKDIM); for (int x = threadIdx.y + xb; x < xlim; x += blockDim.y) { out[threadIdx.x + yb + x*outstride] = tile[x-xb][threadIdx.x]; } } __syncthreads(); } } } int transpose(double *in, int instride, double *out, int outstride, int nrows, int ncols) { int gridx = min(32, 1+(nrows-1)/256); int gridy = min(32, 1+(ncols-1)/256); const dim3 griddims(gridx, gridy, 1); const dim3 blockdims(BLOCKDIM,16,1); cudaError_t err; int dev = -1; cudaGetDevice(&dev); __transpose<<<griddims,blockdims>>>(in, instride, out, outstride, nrows, ncols); cudaStreamSynchronize(SYNC_STREAM); err = cudaGetLastError(); if (err != cudaSuccess) { fprintf(stderr, "cuda error device %d in transpose of %dx%d matrix", dev, nrows, ncols); return err; } return 0; } __global__ void __embedmat2d(double *a, long long *b, int nrows, int ncols, int sortdown) { int tid = threadIdx.x + blockDim.x * (blockIdx.x + gridDim.x * blockIdx.y); const int signbit = 0x80000000; const int mag = 0x7fffffff; int icol; for (int i = tid; i < nrows*ncols; i += blockDim.x*gridDim.x*gridDim.y) { double v = a[i]; int vi = *((int *)&v); if (vi & signbit) { vi = -(vi & mag); } icol = (i/nrows+1); if (sortdown) icol = ncols - icol + 1; b[i] = (long long)vi + (((long long)icol)<<32); } } __global__ void __embedmat(double *a, int *b, long long *c, int n) { int tid = threadIdx.x + blockDim.x * (blockIdx.x + gridDim.x * blockIdx.y); const int signbit = 0x80000000; const int mag = 0x7fffffff; for (int i = tid; i < n; i += blockDim.x*gridDim.x*gridDim.y) { double v = a[i]; int vi = *((int *)&v); if (vi & signbit) { vi = -(vi & mag); } c[i] = (long long)vi + (((long long)b[i])<<32); } } int embedmat2d(double *a, long long *b, int nrows, int ncols, int sortdown) { int nthreads; dim3 griddims; setsizesLeanD(nrows*ncols, &griddims, &nthreads); __embedmat2d<<<griddims,nthreads>>>(a, b, nrows, ncols, sortdown); cudaStreamSynchronize(SYNC_STREAM); cudaError_t err = cudaGetLastError(); return err; } int embedmat(double *a, int *b, long long *c, int n) { int nthreads; dim3 griddims; setsizesLeanD(n, &griddims, &nthreads); __embedmat<<<griddims,nthreads>>>(a, b, c, n); cudaStreamSynchronize(SYNC_STREAM); cudaError_t err = cudaGetLastError(); return err; } __global__ void __extractmat2d(double *a, long long *b, int nrows, int ncols) { int tid = threadIdx.x + blockDim.x * (blockIdx.x + gridDim.x * blockIdx.y); const int signbit = 0x80000000; const int mag = 0x7fffffff; for (int i = tid; i < nrows*ncols; i += blockDim.x*gridDim.x*gridDim.y) { int vi = *((int *)&b[i]); if (vi & signbit) { vi = -(vi & mag); } a[i] = *((double *)&vi); } } __global__ void __extractmat(double *a, int *b, long long *c, int n) { int tid = threadIdx.x + blockDim.x * (blockIdx.x + gridDim.x * blockIdx.y); const int signbit = 0x80000000; const int mag = 0x7fffffff; for (int i = tid; i < n; i += blockDim.x*gridDim.x*gridDim.y) { int vi = *((int *)&c[i]); if (vi & signbit) { vi = -(vi & mag); } a[i] = *((double *)&vi); b[i] = *(((int *)&c[i])+1); } } int extractmat2d(double *a, long long *b, int nrows, int ncols) { int nthreads; dim3 griddims; setsizesLeanD(nrows*ncols, &griddims, &nthreads); __extractmat2d<<<griddims,nthreads>>>(a, b, nrows, ncols); cudaStreamSynchronize(SYNC_STREAM); cudaError_t err = cudaGetLastError(); return err; } int extractmat(double *a, int *b, long long *c, int n) { int nthreads; dim3 griddims; setsizesLeanD(n, &griddims, &nthreads); __extractmat<<<griddims,nthreads>>>(a, b, c, n); cudaStreamSynchronize(SYNC_STREAM); cudaError_t err = cudaGetLastError(); return err; } #include <thrust/sort.h> #include <thrust/device_ptr.h> #include <thrust/reverse.h> int fsort2d(double *pkeys, unsigned int *pvals, int nrows, int ncols, int asc) { for (int i = 0; i < ncols; i++) { thrust::device_ptr<double> keys(pkeys+i*nrows); thrust::device_ptr<unsigned int> vals(pvals+i*nrows); if (asc > 0) { thrust::sort_by_key(keys, keys + nrows, vals); } else { thrust::sort_by_key(keys, keys + nrows, vals, thrust::greater<double>()); } } cudaStreamSynchronize(SYNC_STREAM); cudaError_t err = cudaGetLastError(); return err; } int fsort(double *pkeys, int N, int asc) { thrust::device_ptr<double> keys(pkeys); if (asc > 0) { thrust::sort(keys, keys + N); } else { thrust::sort(keys, keys + N, thrust::greater<int>()); } cudaStreamSynchronize(SYNC_STREAM); cudaError_t err = cudaGetLastError(); return err; } int fsorts(double *pkeys, unsigned int *pvals, int *jc, int m, int asc) { for (int i = 0; i < m; i++) { thrust::device_ptr<double> keys(pkeys + jc[i]); thrust::device_ptr<unsigned int> vals(pvals + jc[i]); int b = jc[i+1] - jc[i]; if (asc > 0) { thrust::sort_by_key(keys, keys + b, vals); } else { thrust::sort_by_key(keys, keys + b, vals, thrust::greater<double>()); } } cudaStreamSynchronize(SYNC_STREAM); cudaError_t err = cudaGetLastError(); return err; } #if CUDA_VERSION >= 7000 #if CUDA_VERSION >= 9000 #include <thrust/system/cuda/detail/cub/cub.cuh> long long disortcubsize(double *inkeys, double *outkeys, unsigned int *invals, unsigned int *outvals, int nelems, int asc) { size_t size = 0; void *temp = NULL; thrust::cuda_cub::cub::DoubleBuffer<double> d_keys(inkeys, outkeys); thrust::cuda_cub::cub::DoubleBuffer<unsigned int> d_vals(invals, outvals); if (asc > 0) { thrust::cuda_cub::cub::DeviceRadixSort::SortPairs(temp, size, d_keys, d_vals, nelems); } else { thrust::cuda_cub::cub::DeviceRadixSort::SortPairsDescending(temp, size, d_keys, d_vals, nelems); } cudaStreamSynchronize(SYNC_STREAM); return size; } int disortcub(double *inkeys, double *outkeys, unsigned int *invals, unsigned int *outvals, int *temp, long long size, int nelems, int asc) { thrust::cuda_cub::cub::DoubleBuffer<double> d_keys(inkeys, outkeys); thrust::cuda_cub::cub::DoubleBuffer<unsigned int> d_vals(invals, outvals); if (asc > 0) { thrust::cuda_cub::cub::DeviceRadixSort::SortPairs((void *)temp, (size_t &)size, d_keys, d_vals, nelems); } else { thrust::cuda_cub::cub::DeviceRadixSort::SortPairsDescending((void *)temp, (size_t &)size, d_keys, d_vals, nelems); } cudaStreamSynchronize(SYNC_STREAM); cudaError_t err = cudaGetLastError(); return err; } int fsort2dx(double *pkeys, unsigned int *pvals, double *tkeys, unsigned int *tvals, int nrows, int ncols, int asc) { int i; cudaError_t err; long long ntemp; int * temp; ntemp = disortcubsize(pkeys, tkeys, pvals, tvals, nrows, asc); cudaMalloc(&temp, ntemp * sizeof(int)); cudaStreamSynchronize(SYNC_STREAM); for (i = 0; i < ncols; i++) { thrust::cuda_cub::cub::DoubleBuffer<double> d_keys(pkeys + (nrows * i), tkeys + (nrows * i)); thrust::cuda_cub::cub::DoubleBuffer<unsigned int> d_vals(pvals + (nrows * i), tvals + (nrows * i)); if (asc > 0) { thrust::cuda_cub::cub::DeviceRadixSort::SortPairs((void *)temp, (size_t &)ntemp, d_keys, d_vals, nrows); } else { thrust::cuda_cub::cub::DeviceRadixSort::SortPairsDescending((void *)temp, (size_t &)ntemp, d_keys, d_vals, nrows); } } cudaStreamSynchronize(SYNC_STREAM); cudaFree(temp); err = cudaGetLastError(); return err; } #else long long disortcubsize(double *inkeys, double *outkeys, unsigned int *invals, unsigned int *outvals, int nelems, int asc) { size_t size = 0; void *temp = NULL; thrust::system::cuda::detail::cub_::DoubleBuffer<double> d_keys(inkeys, outkeys); thrust::system::cuda::detail::cub_::DoubleBuffer<unsigned int> d_vals(invals, outvals); if (asc > 0) { thrust::system::cuda::detail::cub_::DeviceRadixSort::SortPairs(temp, size, d_keys, d_vals, nelems); } else { thrust::system::cuda::detail::cub_::DeviceRadixSort::SortPairsDescending(temp, size, d_keys, d_vals, nelems); } cudaStreamSynchronize(SYNC_STREAM); return size; } int disortcub(double *inkeys, double *outkeys, unsigned int *invals, unsigned int *outvals, int *temp, long long size, int nelems, int asc) { thrust::system::cuda::detail::cub_::DoubleBuffer<double> d_keys(inkeys, outkeys); thrust::system::cuda::detail::cub_::DoubleBuffer<unsigned int> d_vals(invals, outvals); if (asc > 0) { thrust::system::cuda::detail::cub_::DeviceRadixSort::SortPairs((void *)temp, (size_t &)size, d_keys, d_vals, nelems); } else { thrust::system::cuda::detail::cub_::DeviceRadixSort::SortPairsDescending((void *)temp, (size_t &)size, d_keys, d_vals, nelems); } cudaStreamSynchronize(SYNC_STREAM); cudaError_t err = cudaGetLastError(); return err; } int fsort2dx(double *pkeys, unsigned int *pvals, double *tkeys, unsigned int *tvals, int nrows, int ncols, int asc) { int i; cudaError_t err; long long ntemp; int * temp; ntemp = disortcubsize(pkeys, tkeys, pvals, tvals, nrows, asc); cudaMalloc(&temp, ntemp * sizeof(int)); cudaStreamSynchronize(SYNC_STREAM); for (i = 0; i < ncols; i++) { thrust::system::cuda::detail::cub_::DoubleBuffer<double> d_keys(pkeys + (nrows * i), tkeys + (nrows * i)); thrust::system::cuda::detail::cub_::DoubleBuffer<unsigned int> d_vals(pvals + (nrows * i), tvals + (nrows * i)); if (asc > 0) { thrust::system::cuda::detail::cub_::DeviceRadixSort::SortPairs((void *)temp, (size_t &)ntemp, d_keys, d_vals, nrows); } else { thrust::system::cuda::detail::cub_::DeviceRadixSort::SortPairsDescending((void *)temp, (size_t &)ntemp, d_keys, d_vals, nrows); } } cudaStreamSynchronize(SYNC_STREAM); cudaFree(temp); err = cudaGetLastError(); return err; } #endif #endif __global__ void __stratify(double *strata, int n, double *a, double *b, unsigned int *bi, int stride) { __shared__ double ss[32]; __shared__ unsigned int ibin[32]; __shared__ unsigned int ebin[32]; __shared__ unsigned int todo[32]; __shared__ double bins[64][33]; __shared__ unsigned int topush; int tid = threadIdx.x; ss[tid] = strata[tid]; ibin[tid] = 0; for (int i = 0; i < n; i += blockDim.x * gridDim.x) { int ii = i + tid + blockDim.x * blockIdx.x; if (tid == 0) topush = 0; if (ii < n) { double v = a[ii]; int j = 1; j = (v > ss[j-1]) ? 2*j+1 : 2*j; j = (v > ss[j-1]) ? 2*j+1 : 2*j; j = (v > ss[j-1]) ? 2*j+1 : 2*j; j = (v > ss[j-1]) ? 2*j+1 : 2*j; j = (v > ss[j-1]) ? 2*j+1 : 2*j; j = j - 32; int k = atomicInc(&ibin[j], 256); bins[k][j] = v; if (k == 31) { k = atomicInc(&topush, 1024); todo[k] = j; } } if (ibin[tid] >= 32) { ebin[tid] = atomicAdd(&bi[tid], 32); ibin[tid] = ibin[tid] - 32; } for (int k = 0; k < topush; k++) { int j = todo[k]; b[j*stride + ebin[j] + tid] = bins[ibin[j] + tid][j]; } } ebin[tid] = atomicAdd(&bi[tid], ibin[tid]); for (int j = 0; j < 32; j++) { if (tid < ibin[j]) { b[j*stride + ebin[j] + tid] = bins[tid][j]; } } } int stratify(double *strata, int n, double *a, double *b, unsigned int *bi, int stride) { __stratify<<<40,32>>>(strata, n, a, b, bi, stride); cudaStreamSynchronize(SYNC_STREAM); cudaError_t err = cudaGetLastError(); return err; } #define SNDVALS 256 #define SNDGRPS 4 #define SNTHREADS 1024 #define SBIGBLK (4*1024) __global__ void __stratifycounts(double *strata, int n, double *a, unsigned int *bi) { __shared__ unsigned int ic[SNDVALS][SNDGRPS]; __shared__ double ss[SNDVALS]; int istart = (int)(((long long)blockIdx.x) * n / gridDim.x); int iend = (int)(((long long)(blockIdx.x+1)) * n / gridDim.x); int bibase = SNDVALS * (blockIdx.x + istart / SBIGBLK); int tid = threadIdx.x + threadIdx.y * blockDim.x; if (threadIdx.y == 0) { ss[threadIdx.x] = strata[threadIdx.x]; } for (int i = istart; i < iend; i += SBIGBLK) { __syncthreads(); if (threadIdx.y < SNDGRPS) { ic[threadIdx.x][threadIdx.y] = 0; } __syncthreads(); for (int k = i + tid; k < min(iend, i + tid + SBIGBLK); k += SNTHREADS) { double v = a[k]; int j = 0; j = (v > ss[j]) ? 2*j+2 : 2*j+1; j = (v > ss[j]) ? 2*j+2 : 2*j+1; j = (v > ss[j]) ? 2*j+2 : 2*j+1; j = (v > ss[j]) ? 2*j+2 : 2*j+1; j = (v > ss[j]) ? 2*j+2 : 2*j+1; j = (v > ss[j]) ? 2*j+2 : 2*j+1; j = (v > ss[j]) ? 2*j+2 : 2*j+1; j = (v > ss[j]) ? 2*j+2 : 2*j+1; j = j - SNDVALS + 1; atomicInc(&ic[j][threadIdx.y], 65536*32767); } __syncthreads(); if (threadIdx.y == 0) { bi[bibase + threadIdx.x] = ic[threadIdx.x][0] + ic[threadIdx.x][1] + ic[threadIdx.x][2] + ic[threadIdx.x][3]; } bibase += SNDVALS; } } int stratifycounts(double *strata, int n, double *a, unsigned int *bi) { const dim3 blockdims(SNDVALS, SNTHREADS/SNDVALS, 1); const dim3 griddims(8,1,1); __stratifycounts<<<griddims,blockdims>>>(strata, n, a, bi); cudaStreamSynchronize(SYNC_STREAM); cudaError_t err = cudaGetLastError(); return err; } #define RNDVALS 256 #define RNTHREADS 256 #define RNDBITS 8 #define RBIGBLK (4*1024) __global__ void __radixcounts(double *a, int n, int digit, unsigned int *bi) { __shared__ unsigned int ic[RNDVALS]; int istart = (int)(((long long)blockIdx.x) * n / gridDim.x); int iend = (int)(((long long)(blockIdx.x+1)) * n / gridDim.x); int tid = threadIdx.x; int bibase = RNDVALS * (blockIdx.x + istart / RBIGBLK); for (int i = istart; i < iend; i += RBIGBLK) { __syncthreads(); ic[threadIdx.x] = 0; __syncthreads(); for (int j = i + tid; j < min(iend, i+tid+RBIGBLK); j += RNTHREADS) { double v = a[j]; unsigned char *cv = (unsigned char *)&v; atomicInc(&ic[cv[digit]], 65536*32767); } __syncthreads(); bi[bibase + threadIdx.x] = ic[threadIdx.x]; bibase += RNDVALS; } } int radixcounts(double *a, int n, int digit, unsigned int *bi) { const dim3 blockdims(RNTHREADS,1,1); const dim3 griddims(32,1,1); __radixcounts<<<griddims,blockdims>>>(a, n, digit, bi); cudaStreamSynchronize(SYNC_STREAM); cudaError_t err = cudaGetLastError(); return err; } #if __CUDA_ARCH__ > 200 #define GENDISTS(DFNAME,DFUNC) \ __global__ void DFNAME(double *A, int lda, double *B, int ldb, double *C, \ int ldc, int d, int nrows, int ncols, double p) { \ int xblk = blockDim.x * (threadIdx.y + blockIdx.y * blockDim.y); \ int yblk = blockDim.x * (threadIdx.z + blockIdx.z * blockDim.z); \ double va, vb, vc; \ double R00, R01, R02, R03, R04, R05, R06, R07, R08, R09, R10, R11, R12, R13, R14, R15, \ R16, R17, R18, R19, R20, R21, R22, R23, R24, R25, R26, R27, R28, R29, R30, R31; \ int xi = threadIdx.x + xblk; \ int yi = threadIdx.x; \ if (xi < nrows) { \ if (yi+yblk < ncols) {R00 = C[xi+(yi+yblk)*ldc];} yi = (yi+1) % blockDim.x; \ if (yi+yblk < ncols) {R01 = C[xi+(yi+yblk)*ldc];} yi = (yi+1) % blockDim.x; \ if (yi+yblk < ncols) {R02 = C[xi+(yi+yblk)*ldc];} yi = (yi+1) % blockDim.x; \ if (yi+yblk < ncols) {R03 = C[xi+(yi+yblk)*ldc];} yi = (yi+1) % blockDim.x; \ if (yi+yblk < ncols) {R04 = C[xi+(yi+yblk)*ldc];} yi = (yi+1) % blockDim.x; \ if (yi+yblk < ncols) {R05 = C[xi+(yi+yblk)*ldc];} yi = (yi+1) % blockDim.x; \ if (yi+yblk < ncols) {R06 = C[xi+(yi+yblk)*ldc];} yi = (yi+1) % blockDim.x; \ if (yi+yblk < ncols) {R07 = C[xi+(yi+yblk)*ldc];} yi = (yi+1) % blockDim.x; \ if (yi+yblk < ncols) {R08 = C[xi+(yi+yblk)*ldc];} yi = (yi+1) % blockDim.x; \ if (yi+yblk < ncols) {R09 = C[xi+(yi+yblk)*ldc];} yi = (yi+1) % blockDim.x; \ if (yi+yblk < ncols) {R10 = C[xi+(yi+yblk)*ldc];} yi = (yi+1) % blockDim.x; \ if (yi+yblk < ncols) {R11 = C[xi+(yi+yblk)*ldc];} yi = (yi+1) % blockDim.x; \ if (yi+yblk < ncols) {R12 = C[xi+(yi+yblk)*ldc];} yi = (yi+1) % blockDim.x; \ if (yi+yblk < ncols) {R13 = C[xi+(yi+yblk)*ldc];} yi = (yi+1) % blockDim.x; \ if (yi+yblk < ncols) {R14 = C[xi+(yi+yblk)*ldc];} yi = (yi+1) % blockDim.x; \ if (yi+yblk < ncols) {R15 = C[xi+(yi+yblk)*ldc];} yi = (yi+1) % blockDim.x; \ if (yi+yblk < ncols) {R16 = C[xi+(yi+yblk)*ldc];} yi = (yi+1) % blockDim.x; \ if (yi+yblk < ncols) {R17 = C[xi+(yi+yblk)*ldc];} yi = (yi+1) % blockDim.x; \ if (yi+yblk < ncols) {R18 = C[xi+(yi+yblk)*ldc];} yi = (yi+1) % blockDim.x; \ if (yi+yblk < ncols) {R19 = C[xi+(yi+yblk)*ldc];} yi = (yi+1) % blockDim.x; \ if (yi+yblk < ncols) {R20 = C[xi+(yi+yblk)*ldc];} yi = (yi+1) % blockDim.x; \ if (yi+yblk < ncols) {R21 = C[xi+(yi+yblk)*ldc];} yi = (yi+1) % blockDim.x; \ if (yi+yblk < ncols) {R22 = C[xi+(yi+yblk)*ldc];} yi = (yi+1) % blockDim.x; \ if (yi+yblk < ncols) {R23 = C[xi+(yi+yblk)*ldc];} yi = (yi+1) % blockDim.x; \ if (yi+yblk < ncols) {R24 = C[xi+(yi+yblk)*ldc];} yi = (yi+1) % blockDim.x; \ if (yi+yblk < ncols) {R25 = C[xi+(yi+yblk)*ldc];} yi = (yi+1) % blockDim.x; \ if (yi+yblk < ncols) {R26 = C[xi+(yi+yblk)*ldc];} yi = (yi+1) % blockDim.x; \ if (yi+yblk < ncols) {R27 = C[xi+(yi+yblk)*ldc];} yi = (yi+1) % blockDim.x; \ if (yi+yblk < ncols) {R28 = C[xi+(yi+yblk)*ldc];} yi = (yi+1) % blockDim.x; \ if (yi+yblk < ncols) {R29 = C[xi+(yi+yblk)*ldc];} yi = (yi+1) % blockDim.x; \ if (yi+yblk < ncols) {R30 = C[xi+(yi+yblk)*ldc];} yi = (yi+1) % blockDim.x; \ if (yi+yblk < ncols) {R31 = C[xi+(yi+yblk)*ldc];} yi = (yi+1) % blockDim.x; \ } \ yi = threadIdx.x + yblk; \ int nbr = (threadIdx.x + 1) % blockDim.x; \ for (int i = 0; i < d; i++) { \ va = (xi < nrows) ? A[xi + i * lda] : 0; \ vb = (yi < ncols) ? B[yi + i * ldb] : 0; \ vc=R00; DFUNC; R00=vc; vb=__shfl(vb, nbr); vc=R01; DFUNC; R01=vc; vb=__shfl(vb, nbr); \ vc=R02; DFUNC; R02=vc; vb=__shfl(vb, nbr); vc=R03; DFUNC; R03=vc; vb=__shfl(vb, nbr); \ vc=R04; DFUNC; R04=vc; vb=__shfl(vb, nbr); vc=R05; DFUNC; R05=vc; vb=__shfl(vb, nbr); \ vc=R06; DFUNC; R06=vc; vb=__shfl(vb, nbr); vc=R07; DFUNC; R07=vc; vb=__shfl(vb, nbr); \ vc=R08; DFUNC; R08=vc; vb=__shfl(vb, nbr); vc=R09; DFUNC; R09=vc; vb=__shfl(vb, nbr); \ vc=R10; DFUNC; R10=vc; vb=__shfl(vb, nbr); vc=R11; DFUNC; R11=vc; vb=__shfl(vb, nbr); \ vc=R12; DFUNC; R12=vc; vb=__shfl(vb, nbr); vc=R13; DFUNC; R13=vc; vb=__shfl(vb, nbr); \ vc=R14; DFUNC; R14=vc; vb=__shfl(vb, nbr); vc=R15; DFUNC; R15=vc; vb=__shfl(vb, nbr); \ vc=R16; DFUNC; R16=vc; vb=__shfl(vb, nbr); vc=R17; DFUNC; R17=vc; vb=__shfl(vb, nbr); \ vc=R18; DFUNC; R18=vc; vb=__shfl(vb, nbr); vc=R19; DFUNC; R19=vc; vb=__shfl(vb, nbr); \ vc=R20; DFUNC; R20=vc; vb=__shfl(vb, nbr); vc=R21; DFUNC; R21=vc; vb=__shfl(vb, nbr); \ vc=R22; DFUNC; R22=vc; vb=__shfl(vb, nbr); vc=R23; DFUNC; R23=vc; vb=__shfl(vb, nbr); \ vc=R24; DFUNC; R24=vc; vb=__shfl(vb, nbr); vc=R25; DFUNC; R25=vc; vb=__shfl(vb, nbr); \ vc=R26; DFUNC; R26=vc; vb=__shfl(vb, nbr); vc=R27; DFUNC; R27=vc; vb=__shfl(vb, nbr); \ vc=R28; DFUNC; R28=vc; vb=__shfl(vb, nbr); vc=R29; DFUNC; R29=vc; vb=__shfl(vb, nbr); \ vc=R30; DFUNC; R30=vc; vb=__shfl(vb, nbr); vc=R31; DFUNC; R31=vc; vb=__shfl(vb, nbr); \ } \ yi = threadIdx.x; \ if (xi < nrows) { \ if (yi+yblk < ncols) {C[xi+(yi+yblk)*ldc] = R00;} yi = (yi+1) % blockDim.x; \ if (yi+yblk < ncols) {C[xi+(yi+yblk)*ldc] = R01;} yi = (yi+1) % blockDim.x; \ if (yi+yblk < ncols) {C[xi+(yi+yblk)*ldc] = R02;} yi = (yi+1) % blockDim.x; \ if (yi+yblk < ncols) {C[xi+(yi+yblk)*ldc] = R03;} yi = (yi+1) % blockDim.x; \ if (yi+yblk < ncols) {C[xi+(yi+yblk)*ldc] = R04;} yi = (yi+1) % blockDim.x; \ if (yi+yblk < ncols) {C[xi+(yi+yblk)*ldc] = R05;} yi = (yi+1) % blockDim.x; \ if (yi+yblk < ncols) {C[xi+(yi+yblk)*ldc] = R06;} yi = (yi+1) % blockDim.x; \ if (yi+yblk < ncols) {C[xi+(yi+yblk)*ldc] = R07;} yi = (yi+1) % blockDim.x; \ if (yi+yblk < ncols) {C[xi+(yi+yblk)*ldc] = R08;} yi = (yi+1) % blockDim.x; \ if (yi+yblk < ncols) {C[xi+(yi+yblk)*ldc] = R09;} yi = (yi+1) % blockDim.x; \ if (yi+yblk < ncols) {C[xi+(yi+yblk)*ldc] = R10;} yi = (yi+1) % blockDim.x; \ if (yi+yblk < ncols) {C[xi+(yi+yblk)*ldc] = R11;} yi = (yi+1) % blockDim.x; \ if (yi+yblk < ncols) {C[xi+(yi+yblk)*ldc] = R12;} yi = (yi+1) % blockDim.x; \ if (yi+yblk < ncols) {C[xi+(yi+yblk)*ldc] = R13;} yi = (yi+1) % blockDim.x; \ if (yi+yblk < ncols) {C[xi+(yi+yblk)*ldc] = R14;} yi = (yi+1) % blockDim.x; \ if (yi+yblk < ncols) {C[xi+(yi+yblk)*ldc] = R15;} yi = (yi+1) % blockDim.x; \ if (yi+yblk < ncols) {C[xi+(yi+yblk)*ldc] = R16;} yi = (yi+1) % blockDim.x; \ if (yi+yblk < ncols) {C[xi+(yi+yblk)*ldc] = R17;} yi = (yi+1) % blockDim.x; \ if (yi+yblk < ncols) {C[xi+(yi+yblk)*ldc] = R18;} yi = (yi+1) % blockDim.x; \ if (yi+yblk < ncols) {C[xi+(yi+yblk)*ldc] = R19;} yi = (yi+1) % blockDim.x; \ if (yi+yblk < ncols) {C[xi+(yi+yblk)*ldc] = R20;} yi = (yi+1) % blockDim.x; \ if (yi+yblk < ncols) {C[xi+(yi+yblk)*ldc] = R21;} yi = (yi+1) % blockDim.x; \ if (yi+yblk < ncols) {C[xi+(yi+yblk)*ldc] = R22;} yi = (yi+1) % blockDim.x; \ if (yi+yblk < ncols) {C[xi+(yi+yblk)*ldc] = R23;} yi = (yi+1) % blockDim.x; \ if (yi+yblk < ncols) {C[xi+(yi+yblk)*ldc] = R24;} yi = (yi+1) % blockDim.x; \ if (yi+yblk < ncols) {C[xi+(yi+yblk)*ldc] = R25;} yi = (yi+1) % blockDim.x; \ if (yi+yblk < ncols) {C[xi+(yi+yblk)*ldc] = R26;} yi = (yi+1) % blockDim.x; \ if (yi+yblk < ncols) {C[xi+(yi+yblk)*ldc] = R27;} yi = (yi+1) % blockDim.x; \ if (yi+yblk < ncols) {C[xi+(yi+yblk)*ldc] = R28;} yi = (yi+1) % blockDim.x; \ if (yi+yblk < ncols) {C[xi+(yi+yblk)*ldc] = R29;} yi = (yi+1) % blockDim.x; \ if (yi+yblk < ncols) {C[xi+(yi+yblk)*ldc] = R30;} yi = (yi+1) % blockDim.x; \ if (yi+yblk < ncols) {C[xi+(yi+yblk)*ldc] = R31;} yi = (yi+1) % blockDim.x; \ } \ } GENDISTS(__l1dist,vc+=abs(va-vb)) GENDISTS(__l2dist,vc+=(va-vb)*(va-vb)) GENDISTS(__minkowskidist,vc+=pow(abs(va-vb),p)) GENDISTS(__linfdist,vc=max(vc,abs(va-vb))) GENDISTS(__msum,vc=max(vc,va+vb)) #else __global__ void __l1dist(double *A, int lda, double *B, int ldb, double *C, int ldc, int d, int nrows, int ncols, double p) { printf("Warning, Lidist not supported on arch <= 200\n"); } __global__ void __l2dist(double *A, int lda, double *B, int ldb, double *C, int ldc, int d, int nrows, int ncols, double p) { printf("Warning, L2dist not supported on arch <= 200\n"); } __global__ void __minkowskidist(double *A, int lda, double *B, int ldb, double *C, int ldc, int d, int nrows, int ncols, double p) { printf("Warning, Minkowski distance not supported on arch <= 200\n"); } __global__ void __linfdist(double *A, int lda, double *B, int ldb, double *C, int ldc, int d, int nrows, int ncols, double p) { printf("Warning, Max-abs distance not supported on arch <= 200\n"); } __global__ void __msum(double *A, int lda, double *B, int ldb, double *C, int ldc, int d, int nrows, int ncols, double p) { printf("Warning, Max-sum multiply not supported on arch <= 200\n"); } #endif int dists(double *A, int lda, double *B, int ldb, double *C, int ldc, int d, int nrows, int ncols, double p) { dim3 blockdim(32,4,4); dim3 griddim(1,1+(nrows-1)/128,1+(ncols-1)/128); // cudaSetDevice(ithread); if (p == 0.0f) { __linfdist<<<griddim,blockdim>>>(A, lda, B, ldb, C, ldc, d, nrows, ncols, p); } else if (p == 1.0f) { __l1dist<<<griddim,blockdim>>>(A, lda, B, ldb, C, ldc, d, nrows, ncols, p); } else if (p == 2.0f) { __l2dist<<<griddim,blockdim>>>(A, lda, B, ldb, C, ldc, d, nrows, ncols, p); } else { __minkowskidist<<<griddim,blockdim>>>(A, lda, B, ldb, C, ldc, d, nrows, ncols, p); } cudaStreamSynchronize(SYNC_STREAM); cudaError_t err = cudaGetLastError(); return err; } int maxsumx(double *A, int lda, double *B, int ldb, double *C, int ldc, int d, int nrows, int ncols) { dim3 blockdim(32,4,4); dim3 griddim(1,1+(nrows-1)/128,1+(ncols-1)/128); __msum<<<griddim,blockdim>>>(A, lda, B, ldb, C, ldc, d, nrows, ncols, 0); cudaStreamSynchronize(SYNC_STREAM); cudaError_t err = cudaGetLastError(); return err; } #if __CUDA_ARCH__ > 200 template<class T> __global__ void __cumsumg(T *in, T *out, int *jc, int nrows, int ncols, int m) { __shared__ T tots[32]; int start, end, ij; int bid = blockIdx.y + blockIdx.z * blockDim.y; // column index T sum, tsum, tmp, ttot, ttot0; if (bid < ncols) { for (ij = blockIdx.x; ij < m; ij += gridDim.x) { start = jc[ij] + bid * nrows; end = jc[ij+1] + bid * nrows; sum = 0; for (int i = start + threadIdx.x + threadIdx.y * blockDim.x; i < end; i += blockDim.x * blockDim.y) { tsum = in[i]; tmp = __shfl_up(tsum, 1); if (threadIdx.x >= 1) tsum += tmp; tmp = __shfl_up(tsum, 2); if (threadIdx.x >= 2) tsum += tmp; tmp = __shfl_up(tsum, 4); if (threadIdx.x >= 4) tsum += tmp; tmp = __shfl_up(tsum, 8); if (threadIdx.x >= 8) tsum += tmp; tmp = __shfl_up(tsum, 16); if (threadIdx.x >= 16) tsum += tmp; ttot = __shfl(tsum, min(end-start-1, 31)); ttot0 = ttot; __syncthreads(); if (threadIdx.x == threadIdx.y) { tots[threadIdx.y] = ttot; } __syncthreads(); for (int k = 1; k < blockDim.y; k *= 2) { if (threadIdx.y >= k) { if (threadIdx.x == threadIdx.y - k) { ttot += tots[threadIdx.x]; } } __syncthreads(); if (threadIdx.y >= k) { ttot = __shfl(ttot, threadIdx.y - k); if (threadIdx.x == threadIdx.y) { tots[threadIdx.y] = ttot; } } __syncthreads(); } out[i] = sum + tsum + ttot - ttot0; if (threadIdx.x == blockDim.y - 1) { ttot = tots[threadIdx.x]; } __syncthreads(); ttot = __shfl(ttot, blockDim.y - 1); sum += ttot; } } } } template<class T> __global__ void __maxming(T *in, T *out, int *outi, int *jc, int nrows, int ncols, int m, T maxminv, int dir) { __shared__ T maxv[32]; __shared__ int maxi[32]; T vmax, vtmp; int imax, itmp, i, k, start, end, ij; int bid = blockIdx.y + blockIdx.z * gridDim.y; if (bid < ncols) { for (ij = blockIdx.x; ij < m; ij += gridDim.x) { vmax = maxminv; imax = -1; start = jc[ij]; end = jc[ij+1]; for (i = start + threadIdx.x + threadIdx.y * blockDim.x; i < end; i += blockDim.x * blockDim.y) { vtmp = in[i + nrows * bid]; itmp = i; if (dir ? (vtmp > vmax) : (vtmp < vmax)) { vmax = vtmp; imax = itmp; } } for (k = 1; k < blockDim.x; k *= 2) { vtmp = __shfl_up(vmax, k); itmp = __shfl_up(imax, k); if (threadIdx.x >= k) { if (dir ? (vtmp > vmax) : (vtmp < vmax)) { vmax = vtmp; imax = itmp; } } } vmax = __shfl(vmax, blockDim.x - 1); imax = __shfl(imax, blockDim.x - 1); __syncthreads(); if (threadIdx.x == threadIdx.y) { maxv[threadIdx.y] = vmax; maxi[threadIdx.y] = imax; } __syncthreads(); if (threadIdx.y == 0) { vmax = maxv[threadIdx.x]; imax = maxi[threadIdx.x]; } __syncthreads(); if (threadIdx.y == 0) { for (k = 1; k < blockDim.y; k *= 2) { vtmp = __shfl_up(vmax, k); itmp = __shfl_up(imax, k); if (threadIdx.x >= k) { if (dir ? (vtmp > vmax) : (vtmp < vmax)) { vmax = vtmp; imax = itmp; } } } if (threadIdx.x == blockDim.y - 1) { out[ij + m * bid] = vmax; outi[ij + m * bid] = imax; } } } } } template<class T> __global__ void __maxmini_cols(T *in, T *out, int *outi, int nrows, int ncols, T maxminv, int dir) { __shared__ T maxv[32]; __shared__ int maxi[32]; T vmax, vtmp; int imax, itmp, i, k; int bid = blockIdx.x + blockIdx.y * gridDim.x; if (bid < ncols) { vmax = maxminv; imax = -1; for (i = threadIdx.x + threadIdx.y * blockDim.x; i < nrows; i += blockDim.x * blockDim.y) { vtmp = in[i + nrows * bid]; itmp = i; if (dir ? (vtmp > vmax) : (vtmp < vmax)) { vmax = vtmp; imax = itmp; } } for (k = 1; k < blockDim.x; k *= 2) { vtmp = __shfl_up(vmax, k); itmp = __shfl_up(imax, k); if (threadIdx.x >= k) { if (dir ? (vtmp > vmax) : (vtmp < vmax)) { vmax = vtmp; imax = itmp; } } } vmax = __shfl(vmax, blockDim.x - 1); imax = __shfl(imax, blockDim.x - 1); __syncthreads(); if (threadIdx.x == threadIdx.y) { maxv[threadIdx.y] = vmax; maxi[threadIdx.y] = imax; } __syncthreads(); if (threadIdx.y == 0) { vmax = maxv[threadIdx.x]; imax = maxi[threadIdx.x]; } __syncthreads(); if (threadIdx.y == 0) { for (k = 1; k < blockDim.y; k *= 2) { vtmp = __shfl_up(vmax, k); itmp = __shfl_up(imax, k); if (threadIdx.x >= k) { if (dir ? (vtmp > vmax) : (vtmp < vmax)) { vmax = vtmp; imax = itmp; } } } if (threadIdx.x == blockDim.y - 1) { out[bid] = vmax; outi[bid] = imax; } } __syncthreads(); } } // Not very fast for wide matrices template<class T> __global__ void __maxmini_rows(T *in, T *out, int *outi, int nrows, int ncols, int dir) { T vmax, vtmp; int imax, itmp, i, j; for (i = threadIdx.x + blockDim.x * (threadIdx.y + blockDim.y * blockIdx.x); i < nrows; i += blockDim.x * blockDim.y * gridDim.x) { if (ncols > 0) { vmax = in[i]; imax = 0; for (j = 1; j < ncols; j++) { vtmp = in[i + nrows * j]; itmp = j; if (dir ? (vtmp > vmax) : (vtmp < vmax)) { vmax = vtmp; imax = itmp; } } out[i] = vmax; outi[i] = imax; } } } #else template<class T> __global__ void __cumsumg(T *in, T *out, int *jc, int nrows, int ncols, int m) {} template<class T> __global__ void __maxming(T *in, T *out, int *outi, int *jc, int nrows, int ncols, int m, T minv, int dir) {} template<class T> __global__ void __maxmini_cols(T *in, T *out, int *outi, int nrows, int ncols, T minv, int dir) {} template<class T> __global__ void __maxmini_rows(T *in, T *out, int *outi, int nrows, int ncols, int dir) {} #endif void setindsD(int ncols, int &nc1, int &nc2) { if (ncols < 65536) { nc1 = ncols; nc2 = 1; } else { nc1 = (int)sqrt((double)ncols); nc2 = 1 + (ncols-1)/nc1; } } template<class T> int cumsumg(T *in, T *out, int *jc, int nrows, int ncols, int m) { int nc1, nc2; setindsD(ncols, nc1, nc2); dim3 grid(min(64, m), nc1, nc2); int ny = min(32, 1+nrows/m/32); dim3 tblock(32, ny, 1); __cumsumg<T><<<grid,tblock>>>(in, out, jc, nrows, ncols, m); cudaStreamSynchronize(SYNC_STREAM); cudaError_t err = cudaGetLastError(); return err; } int cumsumgf(double *in, double *out, int *jc, int nrows, int ncols, int m) { return cumsumg<double>(in, out, jc, nrows, ncols, m); } template<class T> int maxming(T *in, T *out, int *outi, int *jc, int nrows, int ncols, int m, T minv, int dir) { int nc1, nc2; setindsD(ncols, nc1, nc2); dim3 grid(min(64, m), nc1, nc2); int ny = min(32, 1+nrows/m/32); dim3 tblock(32, ny, 1); __maxming<T><<<grid,tblock>>>(in, out, outi, jc, nrows, ncols, m, minv, dir); cudaStreamSynchronize(SYNC_STREAM); cudaError_t err = cudaGetLastError(); return err; } // JFC: problem here ncols a non-multiple of 16, and nrows < 32. template<class T> int maxmini_cols(T *in, T *out, int *outi, int nrows, int ncols, T minv, int dir) { int nc1, nc2; setindsD(ncols, nc1, nc2); dim3 grid(nc1, nc2, 1); int ny = min(32, 1+nrows/32); dim3 tblock(32, ny, 1); __maxmini_cols<T><<<grid,tblock>>>(in, out, outi, nrows, ncols, minv, dir); cudaStreamSynchronize(SYNC_STREAM); cudaError_t err = cudaGetLastError(); return err; } template<class T> int maxmini_rows(T *in, T *out, int *outi, int nrows, int ncols, int dir) { int nb = min(32,1+nrows/32); dim3 grid(nb,1,1); int ny = min(32, 1+nrows/nb/32); dim3 tblock(32, ny, 1); __maxmini_rows<T><<<grid,tblock>>>(in, out, outi, nrows, ncols, dir); cudaStreamSynchronize(SYNC_STREAM); cudaError_t err = cudaGetLastError(); return err; } int maxgf(double *in, double *out, int *outi, int *jc, int nrows, int ncols, int m) { return maxming<double>(in, out, outi, jc, nrows, ncols, m, -3e38f, 1); } int mingf(double *in, double *out, int *outi, int *jc, int nrows, int ncols, int m) { return maxming<double>(in, out, outi, jc, nrows, ncols, m, 3e38f, 0); } int maxif(double *in, double *out, int *outi, int nrows, int ncols, int dir) { if (dir == 1) { return maxmini_cols<double>(in, out, outi, nrows, ncols, -3e38f, 1); } else if (dir == 2) { return maxmini_rows<double>(in, out, outi, nrows, ncols, 1); } else { return -1; } } int minif(double *in, double *out, int *outi, int nrows, int ncols, int dir) { if (dir == 1) { return maxmini_cols<double>(in, out, outi, nrows, ncols, 3e38f, 0); } else if (dir == 2) { return maxmini_rows<double>(in, out, outi, nrows, ncols, 0); } else { return -1; } } __global__ void __dmv(double *a, int nrows, int ncols, double *b, double *c) { for (int tx = threadIdx.x + blockDim.x * blockIdx.x; tx < nrows; tx += blockDim.x * gridDim.x) { double accum = 0.0; for (int ty = threadIdx.y + blockDim.y * blockIdx.y; ty < ncols; ty += blockDim.y * gridDim.y) { accum += a[tx+nrows*ty] * b[ty]; } atomicAdd(&c[tx], accum); } } #if __CUDA_ARCH__ > 200 __global__ void __dmvt(double *a, int nrows, int ncols, double *b, double *c) { for (int ty = threadIdx.y + blockDim.y * blockIdx.y; ty < ncols; ty += blockDim.y * gridDim.y) { double accum = 0.0f; for (int tx = threadIdx.x + blockDim.x * blockIdx.x; tx < nrows; tx += blockDim.x * gridDim.x) { accum += a[tx+nrows*ty] * b[tx]; } for (int i = 1; i < blockDim.x; i *= 2) { double tmp = __shfl_down(accum, i); if (threadIdx.x + i < blockDim.x) accum += tmp; } if (threadIdx.x == 0) { atomicAdd(&c[ty], accum); } } } #else __global__ void __dmvt(double *a, int nrows, int ncols, double *b, double *c) { for (int ty = threadIdx.y + blockDim.y * blockIdx.y; ty < ncols; ty += blockDim.y * gridDim.y) { double accum = 0.0; for (int tx = threadIdx.x + blockDim.x * blockIdx.x; tx < nrows; tx += blockDim.x * gridDim.x) { accum += a[tx+nrows*ty] * b[tx]; } atomicAdd(&c[ty], accum); } } #endif __global__ void __dmv0(double *a, int nrows, int ncols, int tstep, double *b, double *c) { double accum = 0.0f; int tx = threadIdx.x + blockDim.x * blockIdx.x; if (tx < tstep) { for (; tx < nrows*ncols; tx += tstep) { int icol = tx / nrows; accum += a[tx] * b[icol]; } int irow = tx % nrows; atomicAdd(&c[irow], accum); } } int dmv(double *a, int nrows, int ncols, double *b, double *c, int trans) { if (trans == 1) { int ntx = min(32, nrows); int nty = min(32, ncols); int nbx = min(256, 1 + nrows/ntx/8); int nby = min(256, 1 + ncols/nty/2); dim3 blockdims(ntx,nty,1); dim3 griddims(nbx,nby,1); __dmvt<<<griddims,blockdims>>>(a, nrows, ncols, b, c); } else { int ntx = min(1024, nrows*ncols); int nbx = max(1+(nrows-1)/ntx, nrows*ncols/ntx/32); int tstep = (ntx*nbx/nrows)*nrows; __dmv0<<<nbx,ntx>>>(a, nrows, ncols, tstep, b, c); } cudaStreamSynchronize(SYNC_STREAM); cudaError_t err = cudaGetLastError(); return err; } #define ACCUM_KERNEL(TI,TJ,TV,TS,II,IJ,IV) \ __global__ void __accum(TI, TJ, TV, TS, int m, int nrows) { \ int istart = ((int)(((long long)blockIdx.x) * m / gridDim.x)); \ int iend = ((int)(((long long)blockIdx.x + 1) * m / gridDim.x)); \ istart = (istart / 32) * 32; \ if (blockIdx.x != gridDim.x - 1) { \ iend = (iend / 32) * 32; \ } \ for (int i = istart + threadIdx.x; i < iend; i+= blockDim.x) { \ atomicAdd(&S[II + nrows * IJ], IV); \ } \ } \ int accum(TI, TJ, TV, TS, int m, int nrows) { \ int nthreads = max(32, min(512, m)); \ int nblocks = max(1, min(65535, m/nthreads/8)); \ __accum<<<nblocks,nthreads>>>(I,J,V,S,m,nrows); \ cudaStreamSynchronize(SYNC_STREAM); \ cudaError_t err = cudaGetLastError(); \ return err; \ } ACCUM_KERNEL(int*I, int*J, double*V, double*S, I[i], J[i], V[i]) ACCUM_KERNEL(int*I, int J, double*V, double*S, I[i], J, V[i]) ACCUM_KERNEL(int I, int*J, double*V, double*S, I, J[i], V[i]) ACCUM_KERNEL(int*I, int*J, double V, double*S, I[i], J[i], V) ACCUM_KERNEL(int*I, int J, double V, double*S, I[i], J, V) ACCUM_KERNEL(int I, int*J, double V, double*S, I, J[i], V) const int INBLOCK = 4; // copy and transpose columns of the input matrix into the output matrix. nrows refers to the input matrix // (and so is ncols for the output). ncols is the length of the iptrs array, which will be the number of // rows of the output matrix. iptrs specifies the columns of the input array to copy. // outstride is stride of the output matrix __global__ void __icopy_transpose(int *iptrs, double *in, double *out, int outstride, int nrows, int ncols) { __shared__ double tile[BLOCKDIM][BLOCKDIM+1]; int nx = BLOCKDIM * gridDim.x; int ny = BLOCKDIM * gridDim.y; int ix = BLOCKDIM * blockIdx.x; int iy = BLOCKDIM * blockIdx.y; for (int yb = iy; yb < ncols; yb += ny) { for (int xb = ix; xb < nrows; xb += nx) { if (xb + threadIdx.x < nrows) { int ylim = min(ncols, yb + BLOCKDIM); for (int y = threadIdx.y + yb; y < ylim; y += blockDim.y) { tile[threadIdx.x][y-yb] = in[threadIdx.x + xb + iptrs[y]*nrows]; } } __syncthreads(); if (yb + threadIdx.x < ncols) { int xlim = min(nrows, xb + BLOCKDIM); for (int x = threadIdx.y + xb; x < xlim; x += blockDim.y) { out[threadIdx.x + yb + x*outstride] = tile[x-xb][threadIdx.x]; } } __syncthreads(); } } } int icopy_transpose(int *iptrs, double *in, double *out, int stride, int nrows, int ncols) { const dim3 griddims(20,256,1); const dim3 blockdims(BLOCKDIM,INBLOCK,1); cudaError_t err; __icopy_transpose<<<griddims,blockdims>>>(iptrs, in, out, stride, nrows, ncols); cudaStreamSynchronize(SYNC_STREAM); err = cudaGetLastError(); if (err != cudaSuccess) {fprintf(stderr, "cuda error in icopy_transpose"); return err;} return 0; } // copy and transpose the input matrix into columns of the output matrix. nrows, ncols refer to output matrix __global__ void __ocopy_transpose(int *optrs, double *in, double *out, int instride, int nrows, int ncols) { int nx = BLOCKDIM * gridDim.x; int ny = BLOCKDIM * gridDim.y; int ix = BLOCKDIM * blockIdx.x; int iy = BLOCKDIM * blockIdx.y; __shared__ double tile[BLOCKDIM][BLOCKDIM+1]; for (int yb = iy; yb < ncols; yb += ny) { for (int xb = ix; xb < nrows; xb += nx) { if (yb + threadIdx.x < ncols) { int xlim = min(nrows, xb + BLOCKDIM); for (int x = threadIdx.y + xb; x < xlim; x += blockDim.y) { tile[x-xb][threadIdx.x] = in[threadIdx.x + yb + x*instride]; } } __syncthreads(); if (xb + threadIdx.x < nrows) { int ylim = min(ncols, yb + BLOCKDIM); for (int y = threadIdx.y + yb; y < ylim; y += blockDim.y) { out[optrs[y]*nrows + threadIdx.x + xb] = tile[threadIdx.x][y-yb]; } } __syncthreads(); } } } __global__ void __ocopy_transpose_add(int *optrs, double *in, double *out, int instride, int nrows, int ncols) { int nx = BLOCKDIM * gridDim.x; int ny = BLOCKDIM * gridDim.y; int ix = BLOCKDIM * blockIdx.x; int iy = BLOCKDIM * blockIdx.y; __shared__ double tile[BLOCKDIM][BLOCKDIM+1]; for (int yb = iy; yb < ncols; yb += ny) { for (int xb = ix; xb < nrows; xb += nx) { if (yb + threadIdx.x < ncols) { int xlim = min(nrows, xb + BLOCKDIM); for (int x = threadIdx.y + xb; x < xlim; x += blockDim.y) { tile[x-xb][threadIdx.x] = in[threadIdx.x + yb + x*instride]; } } __syncthreads(); if (xb + threadIdx.x < nrows) { int ylim = min(ncols, yb + BLOCKDIM); for (int y = threadIdx.y + yb; y < ylim; y += blockDim.y) { atomicAdd(&out[optrs[y]*nrows + threadIdx.x + xb], tile[threadIdx.x][y-yb]); } } __syncthreads(); } } } __global__ void __ocopy_transpose_min(int *optrs, double *in, double *out, int instride, int nrows, int ncols) { int nx = BLOCKDIM * gridDim.x; int ny = BLOCKDIM * gridDim.y; int ix = BLOCKDIM * blockIdx.x; int iy = BLOCKDIM * blockIdx.y; __shared__ double tile[BLOCKDIM][BLOCKDIM+1]; for (int yb = iy; yb < ncols; yb += ny) { for (int xb = ix; xb < nrows; xb += nx) { if (yb + threadIdx.x < ncols) { int xlim = min(nrows, xb + BLOCKDIM); for (int x = threadIdx.y + xb; x < xlim; x += blockDim.y) { tile[x-xb][threadIdx.x] = in[threadIdx.x + yb + x*instride]; } } __syncthreads(); if (xb + threadIdx.x < nrows) { int ylim = min(ncols, yb + BLOCKDIM); for (int y = threadIdx.y + yb; y < ylim; y += blockDim.y) { atomicMin((int *)&out[optrs[y]*nrows + threadIdx.x + xb], *(int *)(&tile[threadIdx.x][y-yb])); } } __syncthreads(); } } } int ocopy_transpose_add(int *optrs, double *in, double *out, int stride, int nrows, int ncols) { const dim3 griddims(20,256,1); const dim3 blockdims(BLOCKDIM,INBLOCK,1); cudaError_t err; __ocopy_transpose_add<<<griddims,blockdims>>>(optrs, in, out, stride, nrows, ncols); cudaStreamSynchronize(SYNC_STREAM); err = cudaGetLastError(); if (err != cudaSuccess) {fprintf(stderr, "cuda error in ocopy_transpose"); return err;} return 0; } int ocopy_transpose(int *optrs, double *in, double *out, int stride, int nrows, int ncols) { const dim3 griddims(20,256,1); const dim3 blockdims(BLOCKDIM,INBLOCK,1); cudaError_t err; __ocopy_transpose<<<griddims,blockdims>>>(optrs, in, out, stride, nrows, ncols); cudaStreamSynchronize(SYNC_STREAM); err = cudaGetLastError(); if (err != cudaSuccess) {fprintf(stderr, "cuda error in ocopy_transpose"); return err;} return 0; } int ocopy_transpose_min(int *optrs, double *in, double *out, int stride, int nrows, int ncols) { const dim3 griddims(20,256,1); const dim3 blockdims(BLOCKDIM,INBLOCK,1); cudaError_t err; __ocopy_transpose_min<<<griddims,blockdims>>>(optrs, in, out, stride, nrows, ncols); cudaStreamSynchronize(SYNC_STREAM); err = cudaGetLastError(); if (err != cudaSuccess) {fprintf(stderr, "cuda error in ocopy_transpose"); return err;} return 0; } #ifdef TEST int main(int argc, char **argv) { int m=8, n=8, opn = 0; double *dA, *dB, *dC, *A, *B, *C; if (argc > 1) { sscanf(argv[1], "%d", &opn); if (argc > 2) { sscanf(argv[2], "%d", &m); if (argc > 3) { sscanf(argv[3], "%d", &n); } } } A = (double *)malloc(m*n*sizeof(double)); B = (double *)malloc(m*n*sizeof(double)); C = (double *)malloc(m*n*sizeof(double)); cudaMalloc((void**)&dA, m*n*sizeof(double)); cudaMalloc((void**)&dB, m*n*sizeof(double)); cudaMalloc((void**)&dC, m*n*sizeof(double)); for (int i = 0; i < m*n; i++) { A[i] = 1.0f; B[i] = 2.0f; } cudaMemcpy(dA, A, m*n*sizeof(double), cudaMemcpyHostToDevice); cudaMemcpy(dB, B, m*n*sizeof(double), cudaMemcpyHostToDevice); printf("A %f %f %f %f\n", A[0], A[1], A[2], A[3]); printf("B %f %f %f %f\n", B[0], B[1], B[2], B[3]); MatKernel(dA, m, n, dB, m, n, dC, opn); cudaError_t err = cudaGetLastError(); if( cudaSuccess != err) { fprintf(stderr, "CUDA error %d", err); exit(1); } cudaMemcpy(C, dC, m*n*sizeof(double), cudaMemcpyDeviceToHost); printf("C %f %f %f %f\n", C[0], C[1], C[2], C[3]); printf("A %f %f %f %f\n", A[0], A[1], A[2], A[3]); printf("B %f %f %f %f\n", B[0], B[1], B[2], B[3]); if (dA != NULL) cudaFree(dA); if (dB != NULL) cudaFree(dB); if (dC != NULL) cudaFree(dC); if (C != NULL) free(C); } #endif // Cumulative sum of columns #if __CUDA_ARCH__ >= 300 __global__ void __cumsumc(int nrows, int ncols, double *A, double *B) { int i, j, k, lim; double v, w, sum; int icol = threadIdx.y + blockDim.y * blockIdx.x; __syncthreads(); for (i = icol; i < ncols; i += blockDim.y * gridDim.x) { sum = 0.0f; for (j = 0; j < nrows; j += blockDim.x) { v = 0; if (j + threadIdx.x < nrows) { v = A[j + threadIdx.x + i * nrows]; } lim = min(blockDim.x, nrows - j); #pragma unroll for (k = 1; k < lim; k = k + k) { w = __shfl_up(v, k); if (threadIdx.x >= k) { v += w; } } v += sum; if (j + threadIdx.x < nrows) { B[j + threadIdx.x + i * nrows] = v; } sum = __shfl(v, blockDim.x - 1); } } } #else __global__ void __cumsumc(int nrows, int ncols, double *A, double *B) { __shared__ double buff[32]; int i, j, k, lim; double v, sum; int icol = threadIdx.y + blockDim.y * blockIdx.x; __syncthreads(); for (i = icol; i < ncols; i += blockDim.y * gridDim.x) { sum = 0.0f; for (j = 0; j < nrows; j += blockDim.x) { v = 0; if (j + threadIdx.x < nrows) { v = A[j + threadIdx.x + i * nrows]; } __syncthreads(); buff[threadIdx.x] = v; lim = min(blockDim.x, nrows - j); #pragma unroll for (k = 1; k < lim; k = k + k) { __syncthreads(); if (threadIdx.x >= k) { v += buff[threadIdx.x - k]; } __syncthreads(); buff[threadIdx.x] = v; } v += sum; if (j + threadIdx.x < nrows) { B[j + threadIdx.x + i * nrows] = v; } __syncthreads(); sum = buff[31]; __syncthreads(); } } } #endif int cumsumc(int nrows, int ncols, double *A, double *B) { if (ncols == 1) { thrust::device_ptr<double> pa(A); thrust::device_ptr<double> pb(B); thrust::inclusive_scan(pa, pa + nrows, pb); } else { dim3 threads; threads.x = 32; threads.y = min(32, ncols); int nblocks = min(64, 1 + (ncols-1)/threads.y); __cumsumc<<<nblocks,threads>>>(nrows, ncols, A, B); } cudaStreamSynchronize(SYNC_STREAM); cudaError_t err = cudaGetLastError(); return err; } int inclusive_scan_by_key_dd(double *fvals, double *fkeys, double *fout, long long len) { thrust::device_ptr<double> vals(fvals); thrust::device_ptr<double> keys(fkeys); thrust::device_ptr<double> out(fout); thrust::inclusive_scan_by_key(keys, keys+len, vals, out); cudaStreamSynchronize(SYNC_STREAM); cudaError_t err = cudaGetLastError(); return err; } int inclusive_scan_by_key_ll(long long *fvals, long long *fkeys, long long *fout, long long len) { thrust::device_ptr<long long> vals(fvals); thrust::device_ptr<long long> keys(fkeys); thrust::device_ptr<long long> out(fout); thrust::inclusive_scan_by_key(keys, keys+len, vals, out); cudaStreamSynchronize(SYNC_STREAM); cudaError_t err = cudaGetLastError(); return err; } int reverse(double *fvals, double *fout, long long len) { thrust::device_ptr<double> vals(fvals); thrust::device_ptr<double> out(fout); thrust::reverse_copy(vals, vals+len, out); cudaStreamSynchronize(SYNC_STREAM); cudaError_t err = cudaGetLastError(); return err; }
the_stack
#include "ew_op_gpu.h" #include "gpu_hmma.h" #include <stdio.h> #if __CUDA_ARCH__ >= 700 template <uint OP_A, bool GATED> __global__ void __launch_bounds__(128) hgemm_blocksparse_32x128x32_xn_sdd( const uint2* __restrict__ Lut, const float* __restrict__ Gate, const ehalf* __restrict__ A, const ehalf* __restrict__ B, ehalf* C, uint* Lock, uint locks, uint N, uint blk_a, uint blk_b, uint blk_N) { const uint stdA = 32 + 16; const uint stdB = 128 + 16; const uint stdC = 128 + 4; __shared__ ehalf hShare[(stdA + stdB)*32]; uint2* LutOffsets = (uint2*)&hShare[(stdA + stdB)*32]; uint tid = threadIdx.x; uint idx_ab = blockIdx.x; uint idx_B = blockIdx.y; uint idx_A = blockIdx.z; uint idx_L = idx_A * blk_a + idx_ab / blk_b; uint idx_N = idx_B * blk_b + idx_ab % blk_b; uint4 lut_head = ((const uint4*)Lut)[idx_L]; uint lut_offset = lut_head.x; uint lut_size = lut_head.y; uint idx_K = lut_head.z; uint idx_Lock = lut_head.w; uint txa = tid % 8; uint tya = tid / 8; uint txb = tid % 16; uint tyb = tid / 16; if (lut_size > 0) { uint* Gates = (uint*)&LutOffsets[lut_size]; // prefetch the lut and gate data into shared Lut += lut_offset; #pragma unroll 1 for (uint i = tid; i < lut_size; i += 128) { uint2 entry = Lut[i]; if (GATED) { float gate = Gate[entry.y]; uint gate2; asm("{ \n\t" ".reg .f16 gate; \n\t" "cvt.rn.f16.f32 gate, %1; \n\t" "mov.b32 %0, {gate, gate}; \n\t" "}" : "=r"(gate2) : "f"(gate)); Gates[i] = gate2; } else Gates[i] = 1; entry.y *= 32*32; entry.x *= N*32; LutOffsets[i] = entry; } __syncthreads(); uint storA = tya*stdA + txa*4; uint storB = tyb*stdB + txb*8 + stdA*32; uint loadA = fragmentA<OP_A,M16N16K16>::get_idx(tid, stdA); uint loadB = fragmentB<OP_N,M16N16K16>::get_idx(tid, stdB, (tid & 96) + stdA*32); uint n = idx_N*128 + txb*8; uint offsetB = tyb*N + n; asm(".reg .pred pn;\n\tsetp.lt.u32 pn, %0, %1;" :: "r"(n), "r"(N)); // n < N asm("mov.b32 %0, %0;" : "+r"(idx_N) : ); asm("mov.b32 %0, %0;" : "+r"(loadA) : ); asm("mov.b32 %0, %0;" : "+r"(loadB) : ); asm("mov.b32 %0, %0;" : "+r"(offsetB) : ); fragmentC<OP_A,OP_N,M16N16K16> fragC[2][2]; int idx_lut = 0; #pragma unroll 1 do { uint gate = Gates[idx_lut]; if (gate != 0) { uint2 entry = LutOffsets[idx_lut]; const ehalf* pA = A + (entry.y + tid*4); uint2 a00 = load_half4(pA + 0*32); uint2 a16 = load_half4(pA + 16*32); uint4 b00, b08, b16, b24; entry.x += offsetB; asm("mov.b64 { %0, %1}, 0; \n\t" "mov.b64 { %2, %3}, 0; \n\t" "mov.b64 { %4, %5}, 0; \n\t" "mov.b64 { %6, %7}, 0; \n\t" "mov.b64 { %8, %9}, 0; \n\t" "mov.b64 {%10, %11}, 0; \n\t" "mov.b64 {%12, %13}, 0; \n\t" "mov.b64 {%14, %15}, 0; \n\t" "@pn ld.global.nc.v4.u32 { %0, %1, %2, %3}, [%16];\n\t" "@pn ld.global.nc.v4.u32 { %4, %5, %6, %7}, [%17];\n\t" "@pn ld.global.nc.v4.u32 { %8, %9, %10, %11}, [%18];\n\t" "@pn ld.global.nc.v4.u32 {%12, %13, %14, %15}, [%19];\n\t" : "=r"(b00.x), "=r"(b00.y), "=r"(b00.z), "=r"(b00.w), "=r"(b08.x), "=r"(b08.y), "=r"(b08.z), "=r"(b08.w), "=r"(b16.x), "=r"(b16.y), "=r"(b16.z), "=r"(b16.w), "=r"(b24.x), "=r"(b24.y), "=r"(b24.z), "=r"(b24.w) : "l"(B + (entry.x + N* 0)), "l"(B + (entry.x + N* 8)), "l"(B + (entry.x + N*16)), "l"(B + (entry.x + N*24))); if (GATED) { asm("mul.rn.f16x2 %0, %0, %1;" : "+r"(a00.x) : "r"(gate)); asm("mul.rn.f16x2 %0, %0, %1;" : "+r"(a00.y) : "r"(gate)); asm("mul.rn.f16x2 %0, %0, %1;" : "+r"(a16.x) : "r"(gate)); asm("mul.rn.f16x2 %0, %0, %1;" : "+r"(a16.y) : "r"(gate)); } __syncthreads(); *(uint2*)&hShare[storA + 0*stdA] = a00; *(uint2*)&hShare[storA + 16*stdA] = a16; *(uint4*)&hShare[storB + 0*stdB] = b00; *(uint4*)&hShare[storB + 8*stdB] = b08; *(uint4*)&hShare[storB + 16*stdB] = b16; *(uint4*)&hShare[storB + 24*stdB] = b24; __syncthreads(); fragmentA<OP_A,M16N16K16> fragA[2]; fragmentB<OP_N,M16N16K16> fragB[2]; for (int k = 0; k < 2; k++) { for (int i = 0; i < 2; i++) { fragA[i].load(hShare, loadA + (OP_A == OP_N ? stdA : 1)*i*16 + (OP_A == OP_N ? 1 : stdA)*k*16, stdA); fragB[i].load(hShare, loadB + i*16 + k*16*stdB, stdB); } for (int i = 0; i < 2; i++) for (int j = 0; j < 2; j++) fragC[i][j].mma_sync(fragA[i], fragB[j]); } } } while (++idx_lut < lut_size); uint txc = tid % 32; uint tyc = tid / 32; n = idx_N*128 + txc*4; uint loadC = tyc*stdC + txc*4; uint storC = fragmentC<OP_A,OP_N,M16N16K16>::get_idx(tid, stdC, tid & 96); uint offsetC = (idx_K*32 + tyc)*N + n; __syncthreads(); for (int i = 0; i < 2; i++) for (int j = 0; j < 2; j++) fragC[i][j].store(hShare, storC + i*16*stdC + j*16, stdC); __syncthreads(); if (idx_Lock == 0) { if (n < N) for (int j = 0; j < 8; j++) store_half4(C + (offsetC + j*4*N), *(uint2*)&hShare[loadC + stdC*j*4]); } else { Lock += idx_N*locks + idx_Lock - 1; // Critial Section if (tid == 0) while (atomicCAS(Lock, 0, 1) != 0); __syncthreads(); uint* Count = Lock + locks * blk_N; uint count = *Count; __syncthreads(); if (count == 0) { if (tid == 0) *Count = 1; // first block to get here just writes out to init the memory if (n < N) for (int j = 0; j < 8; j++) store_half4(C + (offsetC + j*4*N), *(uint2*)&hShare[loadC + stdC*j*4]); __threadfence(); __syncthreads(); if (tid == 0) atomicExch(Lock, 0); // End Critial Section } else { txc = tid % 64; tyc = tid / 64; n = idx_N*128 + txc*2; loadC = tyc*stdC + txc*2; offsetC = (idx_K*32 + tyc)*N + n; // subsequent blocks must accumulate if (n < N) for (int j = 0; j < 16; j++) reduce_half2(C + (offsetC + j*2*N), *(uint*)&hShare[loadC + stdC*j*2]); __threadfence(); __syncthreads(); if (tid == 0) atomicExch(Lock, 0); // End Critial Section } } } else { uint n = idx_N*128 + txb*8; uint offsetC = (idx_K*32 + tyb)*N + n; if (n < N) for (int i = 0; i < 4; i++) zero_half8(C + (offsetC + i*8*N)); } } template <uint OP_A, bool GATED> __global__ void __launch_bounds__(128) hgemm_blocksparse_16x128x16_xn_sdd( const uint2* __restrict__ Lut, const float* __restrict__ Gate, const ehalf* __restrict__ A, const ehalf* __restrict__ B, ehalf* C, uint* Lock, uint locks, uint N, uint blk_a, uint blk_b, uint blk_N) { const uint stdA = 16; const uint stdB = 128 + 16; const uint stdC = 128 + 4; __shared__ ehalf hShare[(stdA + stdB)*16]; uint2* LutOffsets = (uint2*)&hShare[(stdA + stdB)*16]; uint tid = threadIdx.x; uint idx_ab = blockIdx.x; uint idx_B = blockIdx.y; uint idx_A = blockIdx.z; uint idx_L = idx_A * blk_a + idx_ab / blk_b; uint idx_N = idx_B * blk_b + idx_ab % blk_b; uint4 lut_head = ((const uint4*)Lut)[idx_L]; uint lut_offset = lut_head.x; uint lut_size = lut_head.y; uint idx_K = lut_head.z; uint idx_Lock = lut_head.w; uint txb = tid % 16; uint tyb = tid / 16; if (lut_size > 0) { uint* Gates = (uint*)&LutOffsets[lut_size]; // prefetch the lut and gate data into shared Lut += lut_offset; #pragma unroll 1 for (uint i = tid; i < lut_size; i += 128) { uint2 entry = Lut[i]; if (GATED) { float gate = Gate[entry.y]; uint gate2; asm("{ \n\t" ".reg .f16 gate; \n\t" "cvt.rn.f16.f32 gate, %1; \n\t" "mov.b32 %0, {gate, gate}; \n\t" "}" : "=r"(gate2) : "f"(gate)); Gates[i] = gate2; } else Gates[i] = 1; entry.y *= 16*16; entry.x *= N*16; LutOffsets[i] = entry; } __syncthreads(); uint storB = tyb*stdB + txb*8 + 16*stdA; uint loadA = fragmentA<OP_A,M16N16K16>::get_idx(tid, stdA); uint loadB = fragmentB<OP_N,M16N16K16>::get_idx(tid, stdB, 16*stdA + (tid & 96)); uint n = idx_N*128 + txb*8; uint offsetB = tyb*N + n; asm(".reg .pred pn;\n\tsetp.lt.u32 pn, %0, %1;" :: "r"(n), "r"(N)); // n < N asm("mov.b32 %0, %0;" : "+r"(idx_N) : ); asm("mov.b32 %0, %0;" : "+r"(loadA) : ); asm("mov.b32 %0, %0;" : "+r"(loadB) : ); asm("mov.b32 %0, %0;" : "+r"(offsetB) : ); fragmentC<OP_A,OP_N,M16N16K16> fragC[2]; int idx_lut = 0; #pragma unroll 1 do { uint gate = Gates[idx_lut]; if (gate != 0) { uint2 entry = LutOffsets[idx_lut]; uint a0 = load_half2(A + (entry.y + tid*2)); uint4 b0, b8; asm("mov.b64 {%0, %1}, 0; \n\t" "mov.b64 {%2, %3}, 0; \n\t" "mov.b64 {%4, %5}, 0; \n\t" "mov.b64 {%6, %7}, 0; \n\t" "@pn ld.global.nc.v4.u32 {%0, %1, %2, %3}, [%8];\n\t" "@pn ld.global.nc.v4.u32 {%4, %5, %6, %7}, [%9];\n\t" : "=r"(b0.x), "=r"(b0.y), "=r"(b0.z), "=r"(b0.w), "=r"(b8.x), "=r"(b8.y), "=r"(b8.z), "=r"(b8.w) : "l"(B + (entry.x + offsetB + N*0)), "l"(B + (entry.x + offsetB + N*8))); // printf("%d %d %3d %08x %08x %08x %08x %08x %08x %08x %08x\n", // idx_K, idx_Lock, tid, // b0.x, b0.y, b0.z, b0.w, // b8.x, b8.y, b8.z, b8.w); if (GATED) asm("mul.rn.f16x2 %0, %0, %1;" : "+r"(a0) : "r"(gate)); __syncthreads(); *(uint *)&hShare[tid*2] = a0; *(uint4*)&hShare[storB + 0*stdB] = b0; *(uint4*)&hShare[storB + 8*stdB] = b8; __syncthreads(); fragmentA<OP_A,M16N16K16> fragA; fragmentB<OP_N,M16N16K16> fragB; fragA.load(hShare, loadA, stdA); // printf("%d %d %3d %08x %08x %08x %08x %08x %08x %08x %08x\n", // idx_K, idx_Lock, tid, // fragA.x[0], fragA.x[1], fragA.x[2], fragA.x[3], fragA.x[4], fragA.x[5], fragA.x[6], fragA.x[7]); #pragma unroll for (int j = 0; j < 2; j++) { fragB.load(hShare, loadB + j*16, stdB); fragC[j].mma_sync(fragA, fragB); } } } while (++idx_lut < lut_size); // printf("%d %d %3d %.0f %.0f %.0f %.0f %.0f %.0f %.0f %.0f %.0f %.0f %.0f %.0f %.0f %.0f %.0f %.0f\n", // idx_K, idx_Lock, tid, // fragC[0].x[0], fragC[0].x[1], fragC[0].x[2], fragC[0].x[3], fragC[0].x[4], fragC[0].x[5], fragC[0].x[6], fragC[0].x[7], // fragC[1].x[0], fragC[1].x[1], fragC[1].x[2], fragC[1].x[3], fragC[1].x[4], fragC[1].x[5], fragC[1].x[6], fragC[1].x[7]); // use thread stride of 4 to allow use of shared stride of 132 // which minimizes shared bank conflicts on write. uint txc = tid % 32; uint tyc = tid / 32; n = idx_N*128 + txc*4; uint loadC = tyc*stdC + txc*4; uint storC = fragmentC<OP_A,OP_N,M16N16K16>::get_idx(tid, stdC, tid & 96); uint offsetC = (idx_K*16 + tyc)*N + n; __syncthreads(); for (int j = 0; j < 2; j++) fragC[j].store(hShare, storC + j*16, stdC); __syncthreads(); if (idx_Lock == 0) { // no lock needed just write out the results for (uint i = 0; i < 4; i++) if (n < N) store_half4(C + (offsetC + N*i*4), *(uint2*)&hShare[loadC + stdC*i*4]); } else { Lock += idx_N*locks + idx_Lock - 1; // Critial Section if (tid == 0) while (atomicCAS(Lock, 0, 1) != 0); __syncthreads(); uint* Count = Lock + locks * blk_N; uint count = *Count; __syncthreads(); if (count == 0) { if (tid == 0) *Count = 1; // first block to get here just writes out to init the memory for (uint i = 0; i < 4; i++) if (n < N) store_half4(C + (offsetC + N*i*4), *(uint2*)&hShare[loadC + stdC*i*4]); __threadfence(); __syncthreads(); if (tid == 0) atomicExch(Lock, 0); // End Critial Section } else { txc = tid % 64; tyc = tid / 64; n = idx_N*128 + txc*2; loadC = tyc*stdC + txc*2; offsetC = (idx_K*16 + tyc)*N + n; // subsequent blocks must accumulate for (uint i = 0; i < 8; i++) if (n < N) reduce_half2(C + (offsetC + N*i*2), *(uint*)&hShare[loadC + stdC*i*2]); __threadfence(); __syncthreads(); if (tid == 0) atomicExch(Lock, 0); // End Critial Section } } } else { uint n = idx_N*128 + txb*8; C += (idx_K*16 + tyb)*N + n; if (n < N) { zero_half8(C + N*0); zero_half8(C + N*8); } } } template <uint OP_A, bool GATED> __global__ void __launch_bounds__(128) hgemm_blocksparse_8x128x8_xn_sdd( const uint2* __restrict__ Lut, const float* __restrict__ Gate, const ehalf* __restrict__ A, const ehalf* __restrict__ B, ehalf* C, uint* Lock, uint locks, uint N, uint blk_a, uint blk_b, uint blk_N) { const uint stdA = 8; const uint stdB = 128 + 16; const uint stdC = 128 + 4; __shared__ ehalf hShare[(stdA + stdB)*16]; uint2* LutOffsets = (uint2*)&hShare[(stdA + stdB)*16]; uint tid = threadIdx.x; uint idx_ab = blockIdx.x; uint idx_B = blockIdx.y; uint idx_A = blockIdx.z; uint idx_L = idx_A * blk_a + idx_ab / blk_b; uint idx_N = idx_B * blk_b + idx_ab % blk_b; uint4 lut_head = ((const uint4*)Lut)[idx_L]; uint lut_offset = lut_head.x; uint lut_size = lut_head.y; uint idx_K = lut_head.z; uint idx_Lock = lut_head.w; if (lut_size > 0) { ushort* Gates = (ushort*)&LutOffsets[lut_size]; // prefetch the lut and gate data into shared Lut += lut_offset; #pragma unroll 1 for (uint i = tid; i < lut_size; i += 128) { uint2 entry = Lut[i]; if (GATED) { float gate = Gate[entry.y]; ushort gate16; asm("cvt.rn.f16.f32 %0, %1;" : "=h"(gate16) : "f"(gate)); Gates[i] = gate16; } else Gates[i] = 1; entry.y *= 8*8; // 64 entries of A per block entry.x *= N*8; // 8 lines of B per block LutOffsets[i] = entry; } if (tid == 0) Gates[lut_size] = 0; // needed if lut_size is odd __syncthreads(); uint t64 = tid & 64; uint t63 = tid & 63; uint txb = tid % 16; uint tyb = t63 / 16; uint storB = tyb*stdB + txb*8 + t64*stdB*8/64 + 16*stdA; uint loadA = fragmentA<OP_A,M8N32K16>::get_idx(tid, stdA); uint loadB = fragmentB<OP_N,M8N32K16>::get_idx(tid, stdB, (tid & 96) + 16*stdA); uint n = idx_N*128 + txb*8; uint offsetA = t63; uint offsetB = tyb*N + n; fragmentC<OP_A,OP_N,M8N32K16> fragC; uint idx_lut = t64 / 64; uint idx_lut2 = 0; uint lut_size2 = (lut_size + 1)/2; asm(".reg .pred pn;\n\tsetp.lt.u32 pn, %0, %1;" :: "r"(n), "r"(N)); // n < N asm("mov.b32 %0, %0;" : "+r"(idx_N) : ); asm("mov.b32 %0, %0;" : "+r"(loadA) : ); asm("mov.b32 %0, %0;" : "+r"(loadB) : ); asm("mov.b32 %0, %0;" : "+r"(offsetA) : ); asm("mov.b32 %0, %0;" : "+r"(offsetB) : ); #pragma unroll 1 do { ushort a0 = 0; uint4 b0 = {0}; uint4 b4 = {0}; ushort gate = Gates[idx_lut]; // if the gate is zero just skip over memory loads // we compute 2 blocks per loop so it's easier to just always do the mma math if (gate != 0) { uint2 entry = LutOffsets[idx_lut]; a0 = load_half(A + (entry.y + offsetA)); asm("@pn ld.global.nc.v4.u32 {%0, %1, %2, %3}, [%8];\n\t" "@pn ld.global.nc.v4.u32 {%4, %5, %6, %7}, [%9];\n\t" : "=r"(b0.x), "=r"(b0.y), "=r"(b0.z), "=r"(b0.w), "=r"(b4.x), "=r"(b4.y), "=r"(b4.z), "=r"(b4.w) : "l"(B + (entry.x + offsetB + N*0)), "l"(B + (entry.x + offsetB + N*4))); } if (GATED) asm("mul.rn.f16 %0, %0, %1;" : "+h"(a0) : "h"(gate)); // if (OP_A == OP_T) // printf("%d %2d A:%08x B: %08x %08x %08x %08x %08x %08x %08x %08x\n", idx_K, tid, a0, b0.x,b0.y,b0.z,b0.w, b4.x,b4.y,b4.z,b4.w); __syncthreads(); *(ushort*)&hShare[tid ] = a0; *(uint4*)&hShare[storB + 0*stdB] = b0; *(uint4*)&hShare[storB + 4*stdB] = b4; __syncthreads(); fragmentA<OP_A,M8N32K16> fragA; fragmentB<OP_N,M8N32K16> fragB; fragA.load(hShare, loadA, stdA); fragB.load(hShare, loadB, stdB); // if (OP_A == OP_T) // printf("%d %2d A:%08x %08x %08x %08x %08x %08x %08x %08x B:%08x %08x %08x %08x %08x %08x %08x %08x\n", idx_K, tid, // fragA.x[0], fragA.x[1], fragA.x[2], fragA.x[3], fragA.x[4], fragA.x[5], fragA.x[6], fragA.x[7], // fragB.x[0], fragB.x[1], fragB.x[2], fragB.x[3], fragB.x[4], fragB.x[5], fragB.x[6], fragB.x[7]); fragC.mma_sync(fragA, fragB); idx_lut += 2; } while (++idx_lut2 < lut_size2); // use thread stride of 4 to allow use of shared stride of 68 // which minimizes shared bank conflicts on write. uint txc = tid % 32; uint tyc = tid / 32; n = idx_N*128 + txc*4; uint loadC = tyc*stdC + txc*4; uint storC = fragmentC<OP_A,OP_N,M8N32K16>::get_idx(tid, stdC, tid & 96); uint offsetC = (idx_K*8 + tyc)*N + n; // if (OP_A == OP_T) // printf("%d %d %2d %.0f %.0f %.0f %.0f %.0f %.0f %.0f %.0f\n", idx_K, idx_Lock, tid, fragC.x[0], fragC.x[1], fragC.x[2], fragC.x[3], fragC.x[4], fragC.x[5], fragC.x[6], fragC.x[7]); __syncthreads(); fragC.store(hShare, storC, stdC); __syncthreads(); if (idx_Lock == 0) { // no lock needed just write out the results for (uint i = 0; i < 2; i++) if (n < N) store_half4(C + (offsetC + N*i*4), *(uint2*)&hShare[loadC + stdC*i*4]); } else { Lock += idx_N*locks + idx_Lock - 1; // Critial Section if (tid == 0) while (atomicCAS(Lock, 0, 1) != 0); __syncthreads(); uint* Count = Lock + locks * blk_N; uint count = *Count; __syncthreads(); if (count == 0) { if (tid == 0) *Count = 1; // first block to get here just writes out to init the memory for (uint i = 0; i < 2; i++) if (n < N) store_half4(C + (offsetC + N*i*4), *(uint2*)&hShare[loadC + stdC*i*4]); __threadfence(); __syncthreads(); if (tid == 0) atomicExch(Lock, 0); // End Critial Section } else { txc = tid % 64; tyc = tid / 64; n = idx_N*128 + txc*2; loadC = tyc*stdC + txc*2; offsetC = (idx_K*8 + tyc)*N + n; // subsequent blocks must accumulate for (uint i = 0; i < 4; i++) if (n < N) reduce_half2(C +(offsetC + N*i*2), *(uint*)&hShare[loadC + stdC*i*2]); __threadfence(); __syncthreads(); if (tid == 0) atomicExch(Lock, 0); // End Critial Section } } } else // lut_size == 0 { uint txc = tid % 16; uint tyc = tid / 16; uint n = idx_N*128 + txc*8; uint offsetC = (idx_K*8 + tyc)*N + n; if (n < N) zero_half8(C + offsetC); } } template <bool N128, bool GATED> __global__ void __launch_bounds__(128,6) hgemm_blocksparse_32x32x128_nt_dds( struct Plist<ehalf,8> A, struct Plist<ehalf,8> B, ehalf* C, const uint2* __restrict__ Lut, const float* __restrict__ Gate, uint params8, uint N, uint loops, uint accumulate) { const uint stdAB = 128 + 8; const uint stdC = 128 + 4; __shared__ ehalf hShare[stdAB*2*32]; float* fShare = (float*)hShare; uint tid = threadIdx.x; uint bid = blockIdx.x; float gate = GATED ? Gate[bid] : 1.0f; if (gate != 0.0f) { uint2 lut_head = Lut[bid]; uint tx = tid % 16; uint ty = tid / 16; uint n0 = tx * 8; uint idx_A = lut_head.x; uint idx_B = lut_head.y; uint offsetA0 = (idx_A*32 + ty)*N + n0; uint offsetB0 = (idx_B*32 + ty)*N + n0; uint storAB = ty*stdAB + n0; uint loadA = fragmentA<OP_N,M16N16K16>::get_idx(tid, stdAB, (tid & 96)); uint loadB = fragmentB<OP_T,M16N16K16>::get_idx(tid, stdAB, (tid & 96) + stdAB*32); fragmentC<OP_N,OP_T,M16N16K16> fragC[2][2]; int p8 = 0; #pragma unroll 1 do { const ehalf* A0; const ehalf* B0; asm("ld.param.u64 %0, [%2 + 0x160];\n\t" "ld.param.u64 %1, [%2 + 0x1a0];" : "=l"(A0), "=l"(B0) : "r"(p8)); p8 += 8; uint offsetA = offsetA0; uint offsetB = offsetB0; uint n = n0; uint loop = 0; #pragma unroll 1 do { uint4 a00 = {0}, a08 = {0}, a16 = {0}, a24 = {0}; uint4 b00 = {0}, b08 = {0}, b16 = {0}, b24 = {0}; if (N128 || n < N) { a00 = load_half8(A0 + (offsetA + 0*8*N)); a08 = load_half8(A0 + (offsetA + 1*8*N)); a16 = load_half8(A0 + (offsetA + 2*8*N)); a24 = load_half8(A0 + (offsetA + 3*8*N)); b00 = load_half8(B0 + (offsetB + 0*8*N)); b08 = load_half8(B0 + (offsetB + 1*8*N)); b16 = load_half8(B0 + (offsetB + 2*8*N)); b24 = load_half8(B0 + (offsetB + 3*8*N)); } offsetA += 128; offsetB += 128; if (!N128) n += 128; __syncthreads(); *(uint4*)&hShare[storAB + 0*8*stdAB + 0*stdAB] = a00; *(uint4*)&hShare[storAB + 1*8*stdAB + 0*stdAB] = a08; *(uint4*)&hShare[storAB + 2*8*stdAB + 0*stdAB] = a16; *(uint4*)&hShare[storAB + 3*8*stdAB + 0*stdAB] = a24; *(uint4*)&hShare[storAB + 0*8*stdAB + 32*stdAB] = b00; *(uint4*)&hShare[storAB + 1*8*stdAB + 32*stdAB] = b08; *(uint4*)&hShare[storAB + 2*8*stdAB + 32*stdAB] = b16; *(uint4*)&hShare[storAB + 3*8*stdAB + 32*stdAB] = b24; __syncthreads(); fragmentA<OP_N,M16N16K16> fragA[2]; fragmentB<OP_T,M16N16K16> fragB[2]; for (int k = 0; k < 2; k++) { for (int i = 0; i < 2; i++) { fragA[i].load(hShare, loadA + k*16 + stdAB*i*16, stdAB); fragB[i].load(hShare, loadB + k*16 + stdAB*i*16, stdAB); } for (int i = 0; i < 2; i++) for (int j = 0; j < 2; j++) fragC[i][j].mma_sync(fragA[i], fragB[j]); } } while (++loop < loops); } while (p8 < params8); asm volatile ("mov.u32 %0, %tid.x;" : "=r"(tid) :); asm volatile ("mov.u32 %0, %ctaid.x;" : "=r"(bid) :); tx = tid % 16; ty = tid / 16; uint loadC = ty*stdC + tx*2; uint storC = fragmentC<OP_N,OP_T,M16N16K16>::get_idx(tid, stdC, (tid & 96)); ehalf* pC = C + (bid*32*32 + tid*2); for (int i = 0; i < 2; i++) { __syncthreads(); for (int j = 0; j < 2; j++) fragC[i][j].store(fShare, storC + j*16, stdC); __syncthreads(); for (uint j = 0; j < 2; j++) { uint sum2 = to_half2( ew_add( ew_add( *(float2*)&fShare[loadC + j*8*stdC + 0*32], *(float2*)&fShare[loadC + j*8*stdC + 1*32]), ew_add( *(float2*)&fShare[loadC + j*8*stdC + 2*32], *(float2*)&fShare[loadC + j*8*stdC + 3*32]) ) ); if (accumulate) reduce_half2(pC + i*512 + j*256, sum2); else store_half2(pC + i*512 + j*256, sum2); } } } else if (!accumulate) // gate == 0 zero_half8(C + (bid*32*32 + tid*8)); } template <bool N128, bool GATED> __global__ void __launch_bounds__(128) hgemm_blocksparse_16x16x128_nt_dds( struct Plist<ehalf,8> A, struct Plist<ehalf,8> B, ehalf* C, const uint2* __restrict__ Lut, const float* __restrict__ Gate, uint params8, uint N, uint loops, uint accumulate) { const uint stdAB = 128 + 8; const uint stdC = 64 + 16; __shared__ ehalf hShare[stdAB*2*16]; float* fShare = (float*)hShare; uint tid = threadIdx.x; uint bid = blockIdx.x; float gate = GATED ? Gate[bid] : 1.0f; if (gate != 0.0f) { uint2 lut_head = Lut[bid]; uint tx = tid % 16; uint ty = tid / 16; uint n0 = tx * 8; uint idx_A = lut_head.x; uint idx_B = lut_head.y; uint offsetA0 = (idx_A*16 + ty)*N + n0; uint offsetB0 = (idx_B*16 + ty)*N + n0; uint storAB = ty*stdAB + n0; uint loadA = fragmentA<OP_N,M16N16K16>::get_idx(tid, stdAB, (tid & 96)); uint loadB = fragmentB<OP_T,M16N16K16>::get_idx(tid, stdAB, (tid & 96) + 16*stdAB); fragmentC<OP_N,OP_T,M16N16K16> fragC; int p8 = 0; #pragma unroll 1 do { const ehalf* A0; const ehalf* B0; asm("ld.param.u64 %0, [%2 + 0x160];\n\t" "ld.param.u64 %1, [%2 + 0x1a0];" : "=l"(A0), "=l"(B0) : "r"(p8)); p8 += 8; uint offsetA = offsetA0; uint offsetB = offsetB0; uint n = n0; uint loop = 0; #pragma unroll 1 do { uint4 a0 = {0}, a8 = {0}; uint4 b0 = {0}, b8 = {0}; if (N128 || n < N) { a0 = load_half8(A0 + (offsetA + N*0)); a8 = load_half8(A0 + (offsetA + N*8)); b0 = load_half8(B0 + (offsetB + N*0)); b8 = load_half8(B0 + (offsetB + N*8)); } offsetA += 128; offsetB += 128; if (!N128) n += 128; __syncthreads(); *(uint4*)&hShare[storAB + 0*stdAB + 0*stdAB] = a0; *(uint4*)&hShare[storAB + 8*stdAB + 0*stdAB] = a8; *(uint4*)&hShare[storAB + 0*stdAB + 16*stdAB] = b0; *(uint4*)&hShare[storAB + 8*stdAB + 16*stdAB] = b8; __syncthreads(); fragmentA<OP_N,M16N16K16> fragA; fragmentB<OP_T,M16N16K16> fragB; #pragma unroll for (uint j = 0; j < 2; j++) { fragA.load(hShare, loadA + j*16, stdAB); fragB.load(hShare, loadB + j*16, stdAB); fragC.mma_sync(fragA, fragB); } } while (++loop < loops); } while (p8 < params8); asm volatile ("mov.u32 %0, %tid.x;" : "=r"(tid) :); asm volatile ("mov.u32 %0, %ctaid.x;" : "=r"(bid) :); tx = tid % 8; ty = tid / 8; uint loadC = ty*stdC + tx*2; uint storC = fragmentC<OP_N,OP_T,M16N16K16>::get_idx(tid, stdC, (tid & 96)/2); __syncthreads(); fragC.store(fShare, storC, stdC); __syncthreads(); uint sum2 = to_half2( ew_add( ew_add( *(float2*)&fShare[loadC + 0*16], *(float2*)&fShare[loadC + 1*16] ), ew_add( *(float2*)&fShare[loadC + 2*16], *(float2*)&fShare[loadC + 3*16] ) ) ); ehalf* pC = C + (bid*16*16 + tid*2); if (accumulate) reduce_half2(pC, sum2); else store_half2( pC, sum2); } else if (!accumulate) // gate == 0 zero_half2(C + (bid*16*16 + tid*2)); } template <bool N128, bool GATED> __global__ void __launch_bounds__(64) hgemm_blocksparse_8x8x128_nt_dds( struct Plist<ehalf,8> A, struct Plist<ehalf,8> B, ehalf* C, const uint2* __restrict__ Lut, const float* __restrict__ Gate, uint params8, uint N, uint loops, uint accumulate) { const uint stdAB = 128 + 8; const uint stdC = 16; __shared__ ehalf hShare[stdAB*8*2]; float* fShare = (float*)hShare; uint tid = threadIdx.x; uint bid = blockIdx.x; float gate = GATED ? Gate[bid] : 1.0f; if (gate != 0.0f) { uint2 lut_head = Lut[bid]; uint tx = tid % 16; uint ty = tid / 16; uint n0 = tx * 8; uint idx_A = lut_head.x; uint idx_B = lut_head.y; uint offsetA0 = (idx_A*8 + ty)*N + n0; uint offsetB0 = (idx_B*8 + ty)*N + n0; uint storAB = ty*stdAB + n0; uint loadA = fragmentA<OP_N,M8N8K16>::get_idx(tid, stdAB, 0*stdAB + (tid & 32)*2); uint loadB = fragmentB<OP_T,M8N8K16>::get_idx(tid, stdAB, 8*stdAB + (tid & 32)*2); fragmentC<OP_N,OP_T,M8N8K16> fragC; int p8 = 0; #pragma unroll 1 do { const ehalf* A0; const ehalf* B0; asm("ld.param.u64 %0, [%2 + 0x160];\n\t" "ld.param.u64 %1, [%2 + 0x1a0];" : "=l"(A0), "=l"(B0) : "r"(p8)); p8 += 8; uint offsetA = offsetA0; uint offsetB = offsetB0; uint n = n0; uint loop = 0; #pragma unroll 1 do { asm volatile (".pragma \"nounroll\";"::); // ptxas, don't get clever uint4 a0 = {0}, a4 = {0}; uint4 b0 = {0}, b4 = {0}; if (N128 || n < N) { a0 = load_half8(A0 + (offsetA + N*0)); a4 = load_half8(A0 + (offsetA + N*4)); b0 = load_half8(B0 + (offsetB + N*0)); b4 = load_half8(B0 + (offsetB + N*4)); } offsetA += 128; offsetB += 128; if (!N128) n += 128; __syncthreads(); *(uint4*)&hShare[storAB + 0*stdAB + 0*stdAB] = a0; *(uint4*)&hShare[storAB + 4*stdAB + 0*stdAB] = a4; *(uint4*)&hShare[storAB + 0*stdAB + 8*stdAB] = b0; *(uint4*)&hShare[storAB + 4*stdAB + 8*stdAB] = b4; __syncthreads(); fragmentA<OP_N,M8N8K16> fragA; fragmentB<OP_T,M8N8K16> fragB; #pragma unroll for (uint j = 0; j < 4; j++) { fragA.load(hShare, loadA + j*16, stdAB); fragB.load(hShare, loadB + j*16, stdAB); fragC.mma_sync(fragA, fragB); } } while (++loop < loops); } while (p8 < params8); asm volatile ("mov.u32 %0, %tid.x;" : "=r"(tid) :); asm volatile ("mov.u32 %0, %ctaid.x;" : "=r"(bid) :); uint storC = fragmentC<OP_N,OP_T,M8N8K16>::get_idx(tid, stdC, (tid & 32)/4); __syncthreads(); fragC.store(fShare, storC, stdC); __syncthreads(); if (tid < 32) { tx = tid % 4; ty = tid / 4; uint loadC = ty*stdC + tx*2; uint sum2 = to_half2( ew_add( *(float2*)&fShare[loadC + 0*8], *(float2*)&fShare[loadC + 1*8] ) ); C += bid*8*8 + tid*2; if (accumulate) reduce_half2(C, sum2); else store_half2(C, sum2); } } else if (!accumulate && tid < 32) // gate == 0 zero_half2(C + (bid*8*8 + tid*2)); } #else // __CUDA_ARCH__ >= 700 template <uint OP_A, bool GATED> __global__ void __launch_bounds__(256,3) hgemm_blocksparse_32x128x32_xn_sdd( const uint2* __restrict__ Lut, const float* __restrict__ Gate, const ehalf* __restrict__ A, const ehalf* __restrict__ B, ehalf* C, uint* Lock, uint locks, uint N, uint blk_a, uint blk_b, uint blk_N) { *C = 0; } template <bool N128, bool GATED> __global__ void __launch_bounds__(128,6) hgemm_blocksparse_32x32x128_nt_dds( struct Plist<ehalf,8> A, struct Plist<ehalf,8> B, ehalf* C, const uint2* __restrict__ Lut, const float* __restrict__ Gate, uint params8, uint N, uint loops, uint accumulate) { *C = 0; } template <uint OP_A, bool GATED> __global__ void __launch_bounds__(128) hgemm_blocksparse_16x128x16_xn_sdd( const uint2* __restrict__ Lut, const float* __restrict__ Gate, const ehalf* __restrict__ A, const ehalf* __restrict__ B, ehalf* C, uint* Lock, uint locks, uint N, uint blk_a, uint blk_b, uint blk_N) { *C = 0; } template <bool N128, bool GATED> __global__ void __launch_bounds__(128) hgemm_blocksparse_16x16x128_nt_dds( struct Plist<ehalf,8> A, struct Plist<ehalf,8> B, ehalf* C, const uint2* __restrict__ Lut, const float* __restrict__ Gate, uint params8, uint N, uint loops, uint accumulate) { *C = 0; } template <uint OP_A, bool GATED> __global__ void __launch_bounds__(128) hgemm_blocksparse_8x128x8_xn_sdd( const uint2* __restrict__ Lut, const float* __restrict__ Gate, const ehalf* __restrict__ A, const ehalf* __restrict__ B, ehalf* C, uint* Lock, uint locks, uint N, uint blk_a, uint blk_b, uint blk_N) { *C = 0; } template <bool N128, bool GATED> __global__ void __launch_bounds__(64) hgemm_blocksparse_8x8x128_nt_dds( struct Plist<ehalf,8> A, struct Plist<ehalf,8> B, ehalf* C, const uint2* __restrict__ Lut, const float* __restrict__ Gate, uint params8, uint N, uint loops, uint accumulate) { *C = 0; } #endif // __CUDA_ARCH__ >= 700 cudaError_t hgemm_blocksparse_xn_128_sdd(const ehalf* X, const ehalf* W, ehalf* Y, bsmm_params* params, uint op) { dim3 grid(params->blk_a*params->blk_b, params->blk_B, params->blk_A); uint blk_N = params->blk_b * params->blk_B; //cuMemsetD16Async((CUdeviceptr)Y, 0, params->K * params->N, params->stream); if (params->locks > 0) cuMemsetD32Async((CUdeviceptr)params->Lock, 0, blk_N * params->locks * 2, params->stream); const uint2* Lut = (const uint2*)params->Lut; uint* Lock = (uint*)params->Lock; uint shared = params->shared + params->shared/2; if (params->bsize == 8) { shared += 4; if (params->Gate == 0) { if (op == OP_N) hgemm_blocksparse_8x128x8_xn_sdd<OP_N,false><<<grid,128,shared,params->stream>>>(Lut, params->Gate, W, X, Y, Lock, params->locks, params->N, params->blk_a, params->blk_b, blk_N); else hgemm_blocksparse_8x128x8_xn_sdd<OP_T,false><<<grid,128,shared,params->stream>>>(Lut, params->Gate, W, X, Y, Lock, params->locks, params->N, params->blk_a, params->blk_b, blk_N); } else { if (op == OP_N) hgemm_blocksparse_8x128x8_xn_sdd<OP_N, true><<<grid,128,shared,params->stream>>>(Lut, params->Gate, W, X, Y, Lock, params->locks, params->N, params->blk_a, params->blk_b, blk_N); else hgemm_blocksparse_8x128x8_xn_sdd<OP_T, true><<<grid,128,shared,params->stream>>>(Lut, params->Gate, W, X, Y, Lock, params->locks, params->N, params->blk_a, params->blk_b, blk_N); } } else if (params->bsize == 16) { if (params->Gate == 0) { if (op == OP_N) hgemm_blocksparse_16x128x16_xn_sdd<OP_N,false><<<grid,128,shared,params->stream>>>(Lut, params->Gate, W, X, Y, Lock, params->locks, params->N, params->blk_a, params->blk_b, blk_N); else hgemm_blocksparse_16x128x16_xn_sdd<OP_T,false><<<grid,128,shared,params->stream>>>(Lut, params->Gate, W, X, Y, Lock, params->locks, params->N, params->blk_a, params->blk_b, blk_N); } else { if (op == OP_N) hgemm_blocksparse_16x128x16_xn_sdd<OP_N, true><<<grid,128,shared,params->stream>>>(Lut, params->Gate, W, X, Y, Lock, params->locks, params->N, params->blk_a, params->blk_b, blk_N); else hgemm_blocksparse_16x128x16_xn_sdd<OP_T, true><<<grid,128,shared,params->stream>>>(Lut, params->Gate, W, X, Y, Lock, params->locks, params->N, params->blk_a, params->blk_b, blk_N); } } else if (params->bsize == 32) { if (params->Gate == 0) { if (op == OP_N) hgemm_blocksparse_32x128x32_xn_sdd<OP_N,false><<<grid,128,shared,params->stream>>>(Lut, params->Gate, W, X, Y, Lock, params->locks, params->N, params->blk_a, params->blk_b, blk_N); else hgemm_blocksparse_32x128x32_xn_sdd<OP_T,false><<<grid,128,shared,params->stream>>>(Lut, params->Gate, W, X, Y, Lock, params->locks, params->N, params->blk_a, params->blk_b, blk_N); } else { if (op == OP_N) hgemm_blocksparse_32x128x32_xn_sdd<OP_N, true><<<grid,128,shared,params->stream>>>(Lut, params->Gate, W, X, Y, Lock, params->locks, params->N, params->blk_a, params->blk_b, blk_N); else hgemm_blocksparse_32x128x32_xn_sdd<OP_T, true><<<grid,128,shared,params->stream>>>(Lut, params->Gate, W, X, Y, Lock, params->locks, params->N, params->blk_a, params->blk_b, blk_N); } } return cudaPeekAtLastError(); } cudaError_t hgemm_blocksparse_xn_128_sdd(const bhalf* X, const bhalf* W, bhalf* Y, bsmm_params* params, uint op) { return cudaSuccess; } cudaError_t hgemm_blocksparse_xn_128_sdd(const float* X, const float* W, float* Y, bsmm_params* params, uint op) { return cudaSuccess; } cudaError_t hgemm_blocksparse_nt_128_dds(const ehalf* X, const ehalf* E, ehalf* U, bsmm_params* params) { struct Plist<ehalf,8>* X8 = (struct Plist<ehalf,8>*)X; struct Plist<ehalf,8>* E8 = (struct Plist<ehalf,8>*)E; const uint2* Lut = (const uint2*)params->Lut; uint accumulate = params->beta == 1.0f; uint pcount8 = params->pcount * 8; uint N = params->N; uint loops = CEIL_DIV(N, 128); bool k128 = (N & 127) == 0; dim3 grid(params->blocks, 1, 1); if (params->bsize == 8) { if (params->Gate == 0) { if (k128) hgemm_blocksparse_8x8x128_nt_dds< true,false><<<grid,64,0,params->stream>>>(*X8, *E8, U, Lut, params->Gate, pcount8, N, loops, accumulate); else hgemm_blocksparse_8x8x128_nt_dds<false,false><<<grid,64,0,params->stream>>>(*X8, *E8, U, Lut, params->Gate, pcount8, N, loops, accumulate); } else { if (k128) hgemm_blocksparse_8x8x128_nt_dds< true, true><<<grid,64,0,params->stream>>>(*X8, *E8, U, Lut, params->Gate, pcount8, N, loops, accumulate); else hgemm_blocksparse_8x8x128_nt_dds<false, true><<<grid,64,0,params->stream>>>(*X8, *E8, U, Lut, params->Gate, pcount8, N, loops, accumulate); } } else if (params->bsize == 16) { if (params->Gate == 0) { if (k128) hgemm_blocksparse_16x16x128_nt_dds< true,false><<<grid,128,0,params->stream>>>(*X8, *E8, U, Lut, params->Gate, pcount8, N, loops, accumulate); else hgemm_blocksparse_16x16x128_nt_dds<false,false><<<grid,128,0,params->stream>>>(*X8, *E8, U, Lut, params->Gate, pcount8, N, loops, accumulate); } else { if (k128) hgemm_blocksparse_16x16x128_nt_dds< true, true><<<grid,128,0,params->stream>>>(*X8, *E8, U, Lut, params->Gate, pcount8, N, loops, accumulate); else hgemm_blocksparse_16x16x128_nt_dds<false, true><<<grid,128,0,params->stream>>>(*X8, *E8, U, Lut, params->Gate, pcount8, N, loops, accumulate); } } else if (params->bsize == 32) { if (params->Gate == 0) { if (k128) hgemm_blocksparse_32x32x128_nt_dds< true,false><<<grid,128,0,params->stream>>>(*X8, *E8, U, Lut, params->Gate, pcount8, N, loops, accumulate); else hgemm_blocksparse_32x32x128_nt_dds<false,false><<<grid,128,0,params->stream>>>(*X8, *E8, U, Lut, params->Gate, pcount8, N, loops, accumulate); } else { if (k128) hgemm_blocksparse_32x32x128_nt_dds< true, true><<<grid,128,0,params->stream>>>(*X8, *E8, U, Lut, params->Gate, pcount8, N, loops, accumulate); else hgemm_blocksparse_32x32x128_nt_dds<false, true><<<grid,128,0,params->stream>>>(*X8, *E8, U, Lut, params->Gate, pcount8, N, loops, accumulate); } } return cudaPeekAtLastError(); } cudaError_t hgemm_blocksparse_nt_128_dds(const bhalf* X, const bhalf* E, bhalf* U, bsmm_params* params) { return cudaSuccess; } cudaError_t hgemm_blocksparse_nt_128_dds(const float* X, const float* E, float* U, bsmm_params* params) { return cudaSuccess; } #endif // GOOGLE_CUDA
the_stack
#include <basic_types.h> #include <util.h> #include <error.h> #include <types.h> #include <matrix_coloring/min_max_2ring.h> #include <cusp/format.h> #include <cusp/copy.h> #include <cusp/detail/random.h> #include <thrust/count.h> #include <thrust/extrema.h> #include <sm_utils.inl> #define COLORING_DEBUG 1 // Pseudo-random number generator namespace amgx { static __host__ __device__ unsigned int hash_function(unsigned int a, unsigned int seed, unsigned int rows = 0) { a ^= seed; a = (a + 0x7ed55d16) + (a << 12); a = (a ^ 0xc761c23c) + (a >> 19); a = (a + 0x165667b1) + (a << 5); a = (a ^ 0xd3a2646c) + (a << 9); a = (a + 0xfd7046c5) + (a << 3); a = (a ^ 0xb55a4f09) + (a >> 16); return a; } struct is_zero { __host__ __device__ bool operator()(int x) { return x == 0; } }; // --------------------------- // Kernels // --------------------------- template< int CTA_SIZE, int WARP_SIZE > __global__ void count_gtlt_kernel( const int A_num_rows, const int *__restrict A_rows, const int *__restrict A_cols, const int *__restrict A_colors, int *A_gtlt_count, const int seed) { const int NUM_WARPS_PER_CTA = CTA_SIZE / WARP_SIZE; const int NUM_WARPS_PER_GRID = gridDim.x * NUM_WARPS_PER_CTA; // Thread coordinates. const int warp_id = utils::warp_id(); const int lane_id = utils::lane_id(); // Row identifier. int row_id = blockIdx.x * NUM_WARPS_PER_CTA + warp_id; // Iterate over the rows of the matrix. for ( ; row_id < A_num_rows ; row_id += NUM_WARPS_PER_GRID ) { int row_color = A_colors[row_id]; if ( row_color != 0 ) { if ( lane_id == 0 ) { A_gtlt_count[row_id] = -1; } continue; } // Hash my row id. int row_hash = hash_function( row_id, seed ); // The number of vertices that are greater/smaller than me. int gt_count = 0, lt_count = 0; // Iterators over my row. int row_begin = A_rows[row_id ]; int row_end = A_rows[row_id + 1]; for ( ; row_begin < row_end ; row_begin += WARP_SIZE ) { // Iterator. int row_it = row_begin + lane_id; // Get the column index (if the iterator is valid). int col_id = -1; if ( row_it < row_end ) { col_id = A_cols[row_it]; } // Each thread hashes its column id. int col_hash = hash_function( col_id, seed ); // Get the color of the column. int col_color = -1; if ( row_it < row_end && col_id < A_num_rows) { col_color = A_colors[col_id]; } // Threads determine if they are greater than the row hash. int gt_pred = col_color == 0 && col_hash > row_hash; int lt_pred = col_color == 0 && col_hash < row_hash; // Count greater/smaller neighbors. gt_count += __popc( utils::ballot( gt_pred ) ); lt_count += __popc( utils::ballot( lt_pred ) ); } // The warp leader stores the result. int my_gtlt_count = (gt_count << 16) | lt_count; if ( lane_id == 0 ) { A_gtlt_count[row_id] = my_gtlt_count; } } } template< int CTA_SIZE, int WARP_SIZE, bool LATE_REJECTION > __global__ void color_kernel( const int A_num_rows, const int *A_rows, const int *A_cols, const int *A_gtlt_count, const int current_color, const int weakness_bound, int *A_colors ) { const int NUM_WARPS_PER_CTA = CTA_SIZE / WARP_SIZE; const int NUM_WARPS_PER_GRID = gridDim.x * NUM_WARPS_PER_CTA; // Thread coordinates. const int warp_id = utils::warp_id(); const int lane_id = utils::lane_id(); // Row identifier. int row_id = blockIdx.x * NUM_WARPS_PER_CTA + warp_id; // Iterate over the rows of the matrix. for ( ; row_id < A_num_rows ; row_id += NUM_WARPS_PER_GRID ) { int row_color = A_colors[row_id]; if ( row_color != 0 ) // Already colored!!! { continue; } // The number of vertices that are greater/smaller than me. int row_gtlt_count = A_gtlt_count[row_id]; // Split gtlt_count into 2. int row_gt_count = row_gtlt_count >> 16; int row_lt_count = row_gtlt_count & 0xffff; // Min-max algorithm. if ( row_gt_count == 0 && row_lt_count == 0 ) { if ( lane_id == 0 ) { A_colors[row_id] = current_color + (row_id & 1); } continue; } if ( row_gt_count == 0 ) { if ( lane_id == 0 ) { A_colors[row_id] = current_color; } continue; } if ( row_lt_count == 0 ) { if ( lane_id == 0 ) { A_colors[row_id] = current_color + 1; } continue; } // Do we skip it. int candidate = 1; // Predicates. Is a vertex min/max. int is_max_vertex = row_gt_count <= weakness_bound; int is_min_vertex = row_lt_count <= weakness_bound; // Iterators over my row. int row_begin = A_rows[row_id ]; int row_end = A_rows[row_id + 1]; for ( ; row_begin < row_end ; row_begin += WARP_SIZE ) { // Iterator. int row_it = row_begin + lane_id; // Get the column index (if the iterator is valid). int col_id = -1; if ( row_it < row_end ) { col_id = A_cols[row_it]; } // Get the color of the column (it could help late rejection). if ( LATE_REJECTION ) { int col_color = -1; if ( row_it < row_end && col_id < A_num_rows) { col_color = A_colors[col_id]; } // Late rejection test. if ( col_color == current_color ) { is_max_vertex = 0; } if ( col_color == current_color + 1 ) { is_min_vertex = 0; } } // Get the gt/lt count. int col_gtlt_count = -1; if ( row_it < row_end && col_id < A_num_rows ) { col_gtlt_count = A_gtlt_count[col_id]; } // Split gtlt_count into 2. col_gt/lt_count == -1 if already colored. int col_gt_count = col_gtlt_count >> 16; int col_lt_count = col_gtlt_count & 0xffff; // Threads determine if they are greater than the row hash. if ( col_gtlt_count != -1 ) { is_max_vertex &= col_gt_count > row_gt_count || (col_gt_count == row_gt_count && col_id <= row_id); is_min_vertex &= col_lt_count > row_lt_count || (col_lt_count == row_lt_count && col_id >= row_id); } } // The warp leader stores the result. if ( candidate && utils::all( is_max_vertex ) ) { if ( lane_id == 0 ) { A_colors[row_id] = current_color; } continue; } if ( candidate && utils::all( is_min_vertex ) ) { if ( lane_id == 0 ) { A_colors[row_id] = current_color + 1; } continue; } } } template< int CTA_SIZE, int WARP_SIZE > __global__ void dbg_check_coloring_kernel( const int A_num_rows, const int *A_rows, const int *A_cols, const int *A_colors, const int *A_gtlt_count, int *error_found ) { const int NUM_WARPS_PER_CTA = CTA_SIZE / WARP_SIZE; const int NUM_WARPS_PER_GRID = gridDim.x * NUM_WARPS_PER_CTA; // Thread coordinates. const int warp_id = utils::warp_id(); const int lane_id = utils::lane_id(); // Row identifier. int row_id = blockIdx.x * NUM_WARPS_PER_CTA + warp_id; // Iterate over the rows of the matrix. for ( ; row_id < A_num_rows ; row_id += NUM_WARPS_PER_GRID ) { int row_color = A_colors[row_id]; // Iterators over my row. int row_begin = A_rows[row_id ]; int row_end = A_rows[row_id + 1]; for ( ; row_begin < row_end ; row_begin += WARP_SIZE ) { // Iterator. int row_it = row_begin + lane_id; // Get the column index (if the iterator is valid). int col_id = -1; if ( row_it < row_end ) { col_id = A_cols[row_it]; } // Get the color of the column. int col_color = -1; if ( row_it < row_end && col_id < A_num_rows) { col_color = A_colors[col_id]; } // Is there something wrong ?? if ( row_id != col_id && row_color == col_color && row_color != 0) { if ( A_gtlt_count != NULL && !error_found[0] ) { //printf( "row_id=%d, row_color=%d, col_id=%d, col_color=%d\n", row_id, row_color, col_id, col_color ); /* // The number of vertices that are greater/smaller than me. int row_gtlt_count = A_gtlt_count[row_id]; int row_gt_count = row_gtlt_count >> 16; int row_lt_count = row_gtlt_count & 0xffff; printf( "row_gt_count=%d, row_gt_count=%d\n", row_gt_count, row_lt_count ); int col_gtlt_count = A_gtlt_count[col_id]; int col_gt_count = col_gtlt_count >> 16; int col_lt_count = col_gtlt_count & 0xffff; printf( "col_gt_count=%d, col_gt_count=%d\n", col_gt_count, col_lt_count ); */ } error_found[0] = 1; atomicAdd(error_found + 1, 1); } } } } // --------------------------- // Methods // --------------------------- template< class T_Config > Min_Max_2Ring_Matrix_Coloring_Base<T_Config>::Min_Max_2Ring_Matrix_Coloring_Base( AMG_Config &cfg, const std::string &cfg_scope) : MatrixColoring<T_Config>(cfg, cfg_scope) { if ( this->m_coloring_level != 1 ) { FatalError( "Not implemented for coloring_level != 1", AMGX_ERR_NOT_SUPPORTED_TARGET ); } if ( cfg.AMG_Config::getParameter<IndexType>("determinism_flag", "default") ) { m_uncolored_fraction = 0.0; } else { m_uncolored_fraction = cfg.AMG_Config::getParameter<double>("max_uncolored_percentage", cfg_scope); } m_weakness_bound = cfg.AMG_Config::getParameter<int>( "weakness_bound", cfg_scope ); m_late_rejection = cfg.AMG_Config::getParameter<int>( "late_rejection", cfg_scope ) != 0; } #if !NEW_COLORER_TESTS // Block version template< AMGX_VecPrecision V, AMGX_MatPrecision M, AMGX_IndPrecision I > void Min_Max_2Ring_Matrix_Coloring<TemplateConfig<AMGX_device, V, M, I> >::colorMatrix( Matrix_d &A ) { ViewType oldView = A.currentView(); this->m_row_colors.resize( A.row_offsets.size() - 1 ); if (this->m_halo_coloring == SYNC_COLORS) { A.setView(ALL); } else { A.setViewExterior(); } const int num_rows = A.get_num_rows(); const int max_uncolored_rows = static_cast<int>( this->m_uncolored_fraction * num_rows ); const int CTA_SIZE = 128; const int NUM_WARPS_PER_CTA = CTA_SIZE / 32; const int GRID_SIZE = std::min( 2048, (num_rows + NUM_WARPS_PER_CTA - 1) / NUM_WARPS_PER_CTA ); this->m_num_colors = 1; thrust::fill( this->m_row_colors.begin(), this->m_row_colors.end(), 0 ); cudaCheckError(); device_vector_alloc<int> gtlt_count( num_rows ); for ( int num_uncolored = num_rows ; num_uncolored > max_uncolored_rows ; ) { count_gtlt_kernel<CTA_SIZE, 32> <<< GRID_SIZE, CTA_SIZE>>>( num_rows, A.row_offsets.raw(), A.col_indices.raw(), this->m_row_colors.raw(), thrust::raw_pointer_cast( &gtlt_count.front() ), 0); cudaCheckError(); if ( this->m_late_rejection ) color_kernel<CTA_SIZE, 32, true> <<< GRID_SIZE, CTA_SIZE>>>( num_rows, A.row_offsets.raw(), A.col_indices.raw(), thrust::raw_pointer_cast( &gtlt_count.front() ), this->m_num_colors, this->m_weakness_bound, this->m_row_colors.raw() ); else color_kernel<CTA_SIZE, 32, false> <<< GRID_SIZE, CTA_SIZE>>>( num_rows, A.row_offsets.raw(), A.col_indices.raw(), thrust::raw_pointer_cast( &gtlt_count.front() ), this->m_num_colors, this->m_weakness_bound, this->m_row_colors.raw() ); cudaCheckError(); #if 0 device_vector_alloc<int> error_found( 1, 0 ); dbg_check_coloring_kernel<CTA_SIZE, 32> <<< GRID_SIZE, CTA_SIZE>>>( num_rows, A.row_offsets.raw(), A.col_indices.raw(), this->m_row_colors.raw(), thrust::raw_pointer_cast( &gtlt_count.front() ), thrust::raw_pointer_cast( &error_found.front() ) ); cudaCheckError(); #endif this->m_num_colors += 2; num_uncolored = (int) thrust::count_if( this->m_row_colors.begin(), this->m_row_colors.begin() + num_rows, is_zero() ); cudaCheckError(); } this->m_num_colors = thrust_wrapper::reduce( this->m_row_colors.begin(), this->m_row_colors.begin() + num_rows, 0, thrust::maximum<int>() ) + 1; cudaCheckError(); #if 0 device_vector_alloc<int> error_found( 1, 0 ); dbg_check_coloring_kernel<CTA_SIZE, 32> <<< GRID_SIZE, CTA_SIZE>>>( num_rows, A.row_offsets.raw(), A.col_indices.raw(), this->m_row_colors.raw(), NULL, thrust::raw_pointer_cast( &error_found.front() ) ); cudaCheckError(); if ( error_found[0] != 0 ) { std::cout << "INVALID COLORING !!! Two neighbors have the same color!!!" << std::endl; } #endif A.setView(oldView); } #else template< int CTA_SIZE, int WARP_SIZE > __global__ void dbg_coloring_histogram_kernel( int *colors_count, const int A_num_rows, const int *A_colors, const int n_colors ) { int tid = blockIdx.x * blockDim.x + threadIdx.x; __shared__ unsigned int color_counts[256]; if (threadIdx.x < n_colors) { color_counts[threadIdx.x] = 0; } __syncthreads(); for (; tid < A_num_rows; tid += blockDim.x * gridDim.x) { int col = A_colors[tid]; atomicAdd(color_counts + col, 1); } __syncthreads(); if (threadIdx.x < n_colors) { atomicAdd(colors_count + threadIdx.x, color_counts[threadIdx.x]); } } template< AMGX_VecPrecision V, AMGX_MatPrecision M, AMGX_IndPrecision I > void Min_Max_2Ring_Matrix_Coloring<TemplateConfig<AMGX_device, V, M, I> >::debug_coloring(Matrix_d &A, int step) { #if COLORING_DEBUG const int num_rows = A.get_num_rows(); const int CTA_SIZE = 128; const int NUM_WARPS_PER_CTA = CTA_SIZE / 32; const int GRID_SIZE = std::min( 2048, (num_rows + NUM_WARPS_PER_CTA - 1) / NUM_WARPS_PER_CTA ); int num_uncolored = (int) thrust::count_if( this->m_row_colors.begin(), this->m_row_colors.begin() + num_rows, is_zero() ); cudaCheckError(); int maxr = A.row_offsets[1] - A.row_offsets[0]; /*for(int i=2;i<=num_rows;i++) { int d=A.row_offsets[i]-A.row_offsets[i-1]; if(d>maxr) { maxr=d; } }*/ device_vector_alloc<int> error_found( 2, 0 ); error_found[0] = 0; error_found[1] = 0; dbg_check_coloring_kernel<CTA_SIZE, 32> <<< GRID_SIZE, CTA_SIZE>>>( num_rows, A.row_offsets.raw(), A.col_indices.raw(), this->m_row_colors.raw(), 0,//thrust::raw_pointer_cast( &gtlt_count.front() ), thrust::raw_pointer_cast( &error_found.front() ) ); cudaCheckError(); { device_vector_alloc<int> color_histogram(this->m_num_colors + 1); for (int i = 0; i < color_histogram.size(); i++) { color_histogram[i] = 0; } dbg_coloring_histogram_kernel<CTA_SIZE, 32> <<< GRID_SIZE, CTA_SIZE>>>( thrust::raw_pointer_cast(&color_histogram.front()), num_rows, this->m_row_colors.raw(), this->m_num_colors + 1); cudaCheckError(); for (int i = 0; i < color_histogram.size(); i++) { std::cout << step << "\t" << "H[" << i << "] = " << color_histogram[i] << std::endl; } std::cout << step << "\t" << "Errors=" << error_found[1] << std::endl; std::cout << step << "\t" << "Uncolored=" << num_uncolored << std::endl; std::cout << step << "\t" << "Num colors=" << this->m_num_colors << "/" << maxr << std::endl; } #endif } template< int CTA_SIZE, int WARP_SIZE, bool LATE_REJECTION > __global__ void color_kernel_greedy( const int A_num_rows, const int *A_rows, const int *A_cols, const int *A_gtlt_count, const int current_color, const int weakness_bound, int *A_colors ) { const int NUM_WARPS_PER_CTA = CTA_SIZE / WARP_SIZE; const int NUM_WARPS_PER_GRID = gridDim.x * NUM_WARPS_PER_CTA; // Thread coordinates. const int warp_id = utils::warp_id(); const int lane_id = utils::lane_id(); // Row identifier. int row_id = blockIdx.x * NUM_WARPS_PER_CTA + warp_id; // Iterate over the rows of the matrix. for ( ; row_id < A_num_rows ; row_id += NUM_WARPS_PER_GRID ) { int dice = hash_function(row_id, 19881988, 0); int row_color = A_colors[row_id]; if ( row_color != 0 ) // Already colored!!! { continue; } // The number of vertices that are greater/smaller than me. int row_gtlt_count = A_gtlt_count[row_id]; // Split gtlt_count into 2. int row_gt_count = row_gtlt_count >> 16; int row_lt_count = row_gtlt_count & 0xffff; // Do we skip it. int candidate = 1; // Predicates. Is a vertex min/max. int is_max_vertex = true;//row_gt_count <= weakness_bound; int is_min_vertex = true;//row_lt_count <= weakness_bound; // Iterators over my row. int row_begin = A_rows[row_id ]; int row_end = A_rows[row_id + 1]; unsigned long long used_colors = 0ull; for ( ; row_begin < row_end ; row_begin += WARP_SIZE ) { // Iterator. int row_it = row_begin + lane_id; // Get the column index (if the iterator is valid). int col_id = -1; if ( row_it < row_end ) { col_id = A_cols[row_it]; } if ( row_it < row_end && col_id < A_num_rows ) { int col_color = A_colors[col_id]; if ( col_color > 0 ) { used_colors |= (1ull << (64 - col_color)); } } // Get the gt/lt count. int col_gtlt_count = -1; if ( row_it < row_end && col_id < A_num_rows ) { col_gtlt_count = A_gtlt_count[col_id]; } // Split gtlt_count into 2. col_gt/lt_count == -1 if already colored. int col_gt_count = col_gtlt_count >> 16; int col_lt_count = col_gtlt_count & 0xffff; // Threads determine if they are greater than the row hash. //if( col_gt_count != -1 ) // is_max_vertex &= col_gt_count < row_gt_count || (col_gt_count == row_gt_count && col_id < row_id); //if( col_lt_count != -1 ) // is_min_vertex &= col_lt_count /*>*/ < row_lt_count || (col_lt_count == row_lt_count && col_id > row_id); if ( col_gtlt_count != -1 ) { is_max_vertex &= col_gt_count > row_gt_count || (col_gt_count == row_gt_count && col_id >= row_id); is_min_vertex &= col_lt_count > row_lt_count || (col_lt_count == row_lt_count && col_id >= row_id); } } is_min_vertex = false; //reduce used colors bit by bit. #pragma unroll for (int i = WARP_SIZE / 2; i >= 1; i /= 2) { int tmp_hi = __double2hiint( __longlong_as_double( used_colors ) ); int tmp_lo = __double2hiint( __longlong_as_double( used_colors ) ); tmp_hi = utils::shfl_xor(tmp_hi, i, WARP_SIZE); tmp_lo = utils::shfl_xor(tmp_lo, i, WARP_SIZE); long long tmp = __double_as_longlong(__hiloint2double(tmp_hi, tmp_lo)); used_colors |= tmp; } int my_color = 64 - utils::bfind( ~used_colors ); if (my_color <= 0) { my_color = 1; } /*int sets=0; for(int c=1; c<=64; c++) { unsigned long long int b = (1ull << (64-c)); if((~used_colors & b) && sets < (dice % 3)-1) { sets++; my_color = c; } }*/ // The warp leader stores the result. if ( candidate && utils::all( is_max_vertex ) ) { if ( lane_id == 0 ) { A_colors[row_id] = my_color; } continue; } if ( candidate && utils::all( is_min_vertex ) ) { if ( lane_id == 0 ) { A_colors[row_id] = my_color + 1; } continue; } } } template< int CTA_SIZE, int WARP_SIZE> __global__ void color_kernel_reassign_tail_thread( const int A_num_rows, const int *A_rows, const int *A_cols, int num_colors, int first_tail_color, int *A_colors ) { const int NUM_THREADS_PER_GRID = gridDim.x * CTA_SIZE; // Row identifier. int row_id = blockIdx.x * CTA_SIZE + threadIdx.x; // Iterate over rows. for ( ; row_id < A_num_rows ; row_id += NUM_THREADS_PER_GRID ) { unsigned long long used_colors = 0ull; int row_color = A_colors[row_id]; if ( row_color < first_tail_color ) // Already colored!!! { continue; } int row_hash = hash_function(row_id, 198719871987L, 0); int row_begin = A_rows[row_id ]; int row_end = A_rows[row_id + 1]; for ( int row_it = row_begin; row_it < row_end ; row_it++) { int col_id = A_cols[row_it]; if (col_id >= A_num_rows) { continue; } int col_hash = hash_function( col_id, 1987, 0 ); int col_color = A_colors[col_id]; } } } __global__ void unassign_color(int *A_colors, int num_rows) { for (int tid = threadIdx.x + blockIdx.x * blockDim.x; tid < num_rows; tid += gridDim.x * blockDim.x) { if (tid % 9 == 0) { A_colors[tid] = 0; } } } // Block version template< AMGX_VecPrecision V, AMGX_MatPrecision M, AMGX_IndPrecision I > void Min_Max_2Ring_Matrix_Coloring<TemplateConfig<AMGX_device, V, M, I> >::color_step( Matrix_d &A, int seed ) { ViewType oldView = A.currentView(); this->m_row_colors.resize( A.row_offsets.size() - 1 ); if (this->m_halo_coloring == SYNC_COLORS) { A.setView(ALL); } else { A.setViewExterior(); } const int num_rows = A.get_num_rows(); const int max_uncolored_rows = static_cast<int>( this->m_uncolored_fraction * num_rows ); const int CTA_SIZE = 128; const int NUM_WARPS_PER_CTA = CTA_SIZE / 32; const int GRID_SIZE = std::min( 2048, (num_rows + NUM_WARPS_PER_CTA - 1) / NUM_WARPS_PER_CTA ); device_vector_alloc<int> gtlt_count( num_rows ); for ( int num_uncolored = num_rows ; num_uncolored > max_uncolored_rows ; ) { count_gtlt_kernel<CTA_SIZE, 32> <<< GRID_SIZE, CTA_SIZE>>>( num_rows, A.row_offsets.raw(), A.col_indices.raw(), this->m_row_colors.raw(), thrust::raw_pointer_cast( &gtlt_count.front() ), seed); cudaCheckError(); if ( this->m_late_rejection ) color_kernel_greedy<CTA_SIZE, 32, true> <<< GRID_SIZE, CTA_SIZE>>>( num_rows, A.row_offsets.raw(), A.col_indices.raw(), thrust::raw_pointer_cast( &gtlt_count.front() ), this->m_num_colors, this->m_weakness_bound, this->m_row_colors.raw() ); else color_kernel_greedy<CTA_SIZE, 32, false> <<< GRID_SIZE, CTA_SIZE>>>( num_rows, A.row_offsets.raw(), A.col_indices.raw(), thrust::raw_pointer_cast( &gtlt_count.front() ), this->m_num_colors, this->m_weakness_bound, this->m_row_colors.raw() ); cudaCheckError(); this->m_num_colors += 2; num_uncolored = (int) thrust::count_if( this->m_row_colors.begin(), this->m_row_colors.begin() + num_rows, is_zero() ); cudaCheckError(); } this->m_num_colors = thrust_wrapper::reduce( this->m_row_colors.begin(), this->m_row_colors.begin() + num_rows, 0, thrust::maximum<int>() ) + 1; } // Block version template< AMGX_VecPrecision V, AMGX_MatPrecision M, AMGX_IndPrecision I > void Min_Max_2Ring_Matrix_Coloring<TemplateConfig<AMGX_device, V, M, I> >::color_matrix( Matrix_d &A ) { ViewType oldView = A.currentView(); this->m_row_colors.resize( A.row_offsets.size() - 1 ); if (this->m_halo_coloring == SYNC_COLORS) { A.setView(ALL); } else { A.setViewExterior(); } this->m_num_colors = 1; thrust::fill( this->m_row_colors.begin(), this->m_row_colors.end(), 0 ); cudaCheckError(); color_step(A, 0); A.setView(oldView); } #endif #define AMGX_CASE_LINE(CASE) template class Min_Max_2Ring_Matrix_Coloring_Base<TemplateMode<CASE>::Type>; AMGX_FORALL_BUILDS(AMGX_CASE_LINE) #undef AMGX_CASE_LINE #define AMGX_CASE_LINE(CASE) template class Min_Max_2Ring_Matrix_Coloring<TemplateMode<CASE>::Type>; AMGX_FORALL_BUILDS(AMGX_CASE_LINE) #undef AMGX_CASE_LINE } // end namespace amgx
the_stack
#include <cooperative_groups.h> #include <torch/extension.h> using namespace cooperative_groups; typedef torch::PackedTensorAccessor32<float, 4, torch::RestrictPtrTraits> float_accessor_4d; typedef torch::PackedTensorAccessor32<int64_t, 4, torch::RestrictPtrTraits> int64_accessor_4d; typedef torch::PackedTensorAccessor32<int64_t, 3, torch::RestrictPtrTraits> int64_accessor_3d; typedef torch::PackedTensorAccessor32<int, 4, torch::RestrictPtrTraits> int32_accessor_4d; typedef torch::PackedTensorAccessor32<int, 3, torch::RestrictPtrTraits> int32_accessor_3d; typedef torch::PackedTensorAccessor32<int, 2, torch::RestrictPtrTraits> int32_accessor_2d; typedef torch::PackedTensorAccessor32<int, 1, torch::RestrictPtrTraits> int32_accessor_1d; #define EPB 16 #define QPB 16 #define KPB 16 /* The idea is to follow GEMM like CUDA implementation. We assume the Query matrix is re-arranged such that first block of queries corresponds to the first cluster, next to the next set of clusters and so on. Each block only operates on single cluster. We implement the rest of the kernel similar to this https://docs.nvidia.com/cuda/cuda-c-programming-guide/index.html#shared-memory */ __global__ void clustered_sparse_dot_product_kernel( const float_accessor_4d queries, const float_accessor_4d keys, const int32_accessor_4d topk, const int32_accessor_2d indx_maps, const int32_accessor_3d q_start_indx, const int32_accessor_3d q_end_indx, float_accessor_4d product ) { int E = queries.size(3); int K = topk.size(3); extern __shared__ float shared_mem[]; float* shared_queries = shared_mem; float* shared_keys = shared_queries + (EPB*QPB); float* shared_topk = shared_keys + (EPB*KPB); int n = indx_maps[blockIdx.x][0]; int h = indx_maps[blockIdx.x][1]; int c = indx_maps[blockIdx.x][2]; int l_end = q_end_indx[n][h][c]; if ((threadIdx.x == 0)) { if ((threadIdx.y + (blockIdx.y * KPB)) < K) { shared_topk[threadIdx.y] = topk[n][h][c][threadIdx.y + (blockIdx.y * KPB)]; } else { shared_topk[threadIdx.y] = -1; } } __syncthreads(); float res = 0.0; int rq_indx = q_start_indx[n][h][c] + (indx_maps[blockIdx.x][3] * QPB) + threadIdx.x; int rk_indx = shared_topk[threadIdx.y]; int cq_indx = threadIdx.y; int ck_indx = threadIdx.x; for (int m=0; m<((E + EPB - 1)/EPB); m++) { cq_indx = m*EPB + threadIdx.y; ck_indx = m*EPB + threadIdx.x; if ((rq_indx < l_end) && (cq_indx < E)) { shared_queries[threadIdx.x + (QPB * threadIdx.y)] = queries[n][h][rq_indx][cq_indx]; } else { shared_queries[threadIdx.x + (QPB * threadIdx.y)] = 0; } if ((rk_indx > -1) && (ck_indx) < E) { shared_keys[threadIdx.y + (KPB * threadIdx.x)] = keys[n][h][rk_indx][ck_indx]; } else{ shared_keys[threadIdx.y + (KPB * threadIdx.x)] = 0; } __syncthreads(); for (int e=0; e<EPB; e++) { res += shared_queries[threadIdx.x + (EPB * e)] * shared_keys[threadIdx.y + (EPB * e)]; } __syncthreads(); } if ((rq_indx < l_end) && ((threadIdx.y + (blockIdx.y * KPB)) < K)) { product[n][h][rq_indx][threadIdx.y + (blockIdx.y * KPB)] = res; } } /* Since all of our kernels are implemented to work on a single cluster at a time. This simply creates a bunch of mappings for us to know which blockId operates on which cluster, sequenceid, head and the starting query id */ __global__ void create_maps_kernel( const int32_accessor_3d block_counts, const int32_accessor_3d block_counts_cumsum, const int n_block_per_query, int32_accessor_2d indx_maps ) { int N = block_counts.size(0); int H = block_counts.size(1); int C = block_counts.size(2); int full_idx = blockIdx.x * blockDim.x + threadIdx.x; int n = full_idx / (H*C); int h = (full_idx - n*H*C) / C; int c = full_idx % C; if (n >= N) { return; } int indx = block_counts_cumsum[n][h][c]; int blocks = block_counts[n][h][c]; indx -= blocks; for (int b=0; b<blocks; b++) { indx_maps[indx][0] = n; indx_maps[indx][1] = h; indx_maps[indx][2] = c; indx_maps[indx][3] = int(b / n_block_per_query); indx += 1; } } /* Sparse dot-product between Queries and Keys. The keys to multiplied are defined by the top-k matrix */ void clustered_sparse_dot_product( const torch::Tensor Q, const torch::Tensor K, const torch::Tensor topk, const torch::Tensor q_start_indx, const torch::Tensor q_end_indx, const torch::Tensor block_counts, const torch::Tensor block_counts_cumsum, const int total_blocks, torch::Tensor indx_maps, torch::Tensor product ) { // Make sure that we are using the correct GPU device torch::DeviceGuard _guard(Q.device()); int N = Q.size(0); int H = Q.size(1); int L = Q.size(2); int E = Q.size(3); int k = topk.size(3); int C = topk.size(2); int S = K.size(2); int threads = 1024; int blocks = ((N*H*C) + threads - 1) / threads; create_maps_kernel<<<blocks, threads>>>( block_counts.packed_accessor32<int, 3, torch::RestrictPtrTraits>(), block_counts_cumsum.packed_accessor32<int, 3, torch::RestrictPtrTraits>(), 1, indx_maps.packed_accessor32<int, 2, torch::RestrictPtrTraits>() ); dim3 dimBlock(QPB, KPB); dim3 dimGrid(total_blocks, (k + KPB - 1)/KPB); const int shared_mem = (((KPB + QPB) * EPB) + KPB) * sizeof(float); clustered_sparse_dot_product_kernel<<<dimGrid, dimBlock, shared_mem>>>( Q.packed_accessor32<float, 4, torch::RestrictPtrTraits>(), K.packed_accessor32<float, 4, torch::RestrictPtrTraits>(), topk.packed_accessor32<int, 4, torch::RestrictPtrTraits>(), indx_maps.packed_accessor32<int, 2, torch::RestrictPtrTraits>(), q_start_indx.packed_accessor32<int, 3, torch::RestrictPtrTraits>(), q_end_indx.packed_accessor32<int, 3, torch::RestrictPtrTraits>(), product.packed_accessor32<float, 4, torch::RestrictPtrTraits>() ); } /* Once again each block works for a single cluster Each thread sums over all the responsible keys (in chunks) */ __global__ void clustered_sparse_dot_queries_backward_kernel( const float_accessor_4d grad_out, const float_accessor_4d queries, const float_accessor_4d keys, const int32_accessor_4d topk, const int32_accessor_2d indx_maps, const int32_accessor_3d q_start_indx, const int32_accessor_3d q_end_indx, float_accessor_4d grad_q, float_accessor_4d grad_k ) { int E = grad_q.size(3); int K = topk.size(3); extern __shared__ float shared_mem[]; float* shared_grad = shared_mem; float* shared_keys = shared_grad + (KPB*QPB); float* shared_queries = shared_keys + (EPB*KPB); float* shared_topk = shared_queries + (EPB*QPB); int n = indx_maps[blockIdx.x][0]; int h = indx_maps[blockIdx.x][1]; int c = indx_maps[blockIdx.x][2]; int l_end = q_end_indx[n][h][c]; // Load all the top indices for all keys int thread_id = threadIdx.x + (threadIdx.y * blockDim.x); for (int t=thread_id; t<K; t+=(blockDim.x*blockDim.y)) { shared_topk[t] = topk[n][h][c][t]; } __syncthreads(); float res = 0.0; int rq_indx = q_start_indx[n][h][c] + (indx_maps[blockIdx.x][3] * QPB) + threadIdx.x; int e_indx = threadIdx.y + (blockIdx.y * EPB); float res_k = 0.0; for (int kb=0; kb<((K + KPB - 1)/KPB); kb++) { if ((rq_indx < l_end) && (e_indx < E)) { shared_queries[threadIdx.x + (QPB * threadIdx.y)] = queries[n][h][rq_indx][e_indx]; } else { shared_queries[threadIdx.x + (QPB * threadIdx.y)] = 0; } int rk_indx = (kb*KPB) + threadIdx.y; if ((rq_indx < l_end) && (rk_indx < K)) { shared_grad[threadIdx.x + (QPB * threadIdx.y)] = grad_out[n][h][rq_indx][rk_indx]; } else { shared_grad[threadIdx.x + (QPB * threadIdx.y)] = 0; } rk_indx = kb*KPB + threadIdx.x; if ((rk_indx < K) && (e_indx < E)){ shared_keys[threadIdx.x + (KPB * threadIdx.y)] = keys[n][h][shared_topk[rk_indx]][e_indx]; } else{ shared_keys[threadIdx.x + (KPB * threadIdx.y)] = 0; } __syncthreads(); for (int k=0; k<KPB; k++) { res += shared_grad[threadIdx.x + (QPB * k)] * shared_keys[k + (threadIdx.y * KPB)]; } res_k = 0.0; if ((rk_indx < K) && (e_indx < E)) { for (int q=0; q<QPB; q++) { res_k += (shared_queries[q + (threadIdx.y * QPB)] * shared_grad[q + (threadIdx.x * QPB)]); } atomicAdd(&grad_k[n][h][shared_topk[rk_indx]][e_indx], res_k); } __syncthreads(); } if ((rq_indx < l_end) && (e_indx < E)) { grad_q[n][h][rq_indx][e_indx] = res; } } /* Sparse dot product backward pass. */ void clustered_sparse_dot_backward( const torch::Tensor Q, const torch::Tensor K, const torch::Tensor groups, const torch::Tensor topk, const torch::Tensor grad_out, torch::Tensor grad_Q, torch::Tensor grad_K, const torch::Tensor q_start_indx, const torch::Tensor q_end_indx, const torch::Tensor block_counts, const torch::Tensor block_counts_cumsum, const int total_blocks, torch::Tensor indx_maps ) { // Make sure that we are using the correct GPU device torch::DeviceGuard _guard(Q.device()); int N = Q.size(0); int H = Q.size(1); int L = Q.size(2); int E = Q.size(3); int C = topk.size(2); int k = topk.size(3); int S = K.size(2); int threads = 1024; int blocks = ((N*H*C) + threads - 1) / threads; create_maps_kernel<<<blocks, threads>>>( block_counts.packed_accessor32<int, 3, torch::RestrictPtrTraits>(), block_counts_cumsum.packed_accessor32<int, 3, torch::RestrictPtrTraits>(), 1, indx_maps.packed_accessor32<int, 2, torch::RestrictPtrTraits>() ); dim3 dimBlock(QPB, EPB); dim3 dimGrid(total_blocks, (E + EPB - 1)/EPB); const int mem_grad = QPB * KPB; const int mem_keys = KPB * EPB; const int mem_queries = QPB * EPB; const int shared_mem = (mem_grad + mem_queries + mem_keys + k) * sizeof(float); clustered_sparse_dot_queries_backward_kernel<<<dimGrid, dimBlock, shared_mem>>>( grad_out.packed_accessor32<float, 4, torch::RestrictPtrTraits>(), Q.packed_accessor32<float, 4, torch::RestrictPtrTraits>(), K.packed_accessor32<float, 4, torch::RestrictPtrTraits>(), topk.packed_accessor32<int, 4, torch::RestrictPtrTraits>(), indx_maps.packed_accessor32<int, 2, torch::RestrictPtrTraits>(), q_start_indx.packed_accessor32<int, 3, torch::RestrictPtrTraits>(), q_end_indx.packed_accessor32<int, 3, torch::RestrictPtrTraits>(), grad_Q.packed_accessor32<float, 4, torch::RestrictPtrTraits>(), grad_K.packed_accessor32<float, 4, torch::RestrictPtrTraits>() ); } __global__ void clustered_sparse_weighted_average_kernel( const float_accessor_4d weights, const float_accessor_4d values, const int32_accessor_4d topk, const int32_accessor_2d indx_maps, const int32_accessor_3d q_start_indx, const int32_accessor_3d q_end_indx, float_accessor_4d output ) { int E = output.size(3); int K = topk.size(3); extern __shared__ float shared_mem[]; float* shared_weights = shared_mem; float* shared_values = shared_weights + (KPB*QPB); float* shared_topk = shared_values + (EPB*KPB); int n = indx_maps[blockIdx.x][0]; int h = indx_maps[blockIdx.x][1]; int c = indx_maps[blockIdx.x][2]; int l_end = q_end_indx[n][h][c]; // Load all the top indices for all keys int thread_id = threadIdx.x + threadIdx.y * blockDim.x; for (int t=thread_id; t<K; t+=(blockDim.x*blockDim.y)) { shared_topk[t] = topk[n][h][c][t]; } __syncthreads(); float res = 0.0; int rq_indx = q_start_indx[n][h][c] + (indx_maps[blockIdx.x][3] * QPB) + threadIdx.x; int e_indx = threadIdx.y + (blockIdx.y * EPB); for (int kb=0; kb<((K + KPB - 1)/KPB); kb++) { int rk_indx = kb*KPB + threadIdx.y; if ((rq_indx < l_end) && (rk_indx < K)) { shared_weights[threadIdx.x + (QPB * threadIdx.y)] = weights[n][h][rq_indx][rk_indx]; } else { shared_weights[threadIdx.x + (QPB * threadIdx.y)] = 0; } rk_indx = kb*KPB + threadIdx.x; if ((rk_indx < K) && (e_indx < E)){ shared_values[threadIdx.x + (KPB * threadIdx.y)] = values[n][h][shared_topk[rk_indx]][e_indx]; } else{ shared_values[threadIdx.x + (KPB * threadIdx.y)] = 0; } __syncthreads(); for (int k=0; k<KPB; k++) { res += shared_weights[threadIdx.x + (QPB * k)] * shared_values[k + (threadIdx.y * KPB)]; } __syncthreads(); } if ((rq_indx < l_end) && (e_indx < E)) { output[n][h][rq_indx][e_indx] = res; } } /* Weighted average of the "values" with attention weight stored in the "weights". The values to be selected are defined by the "topk" */ void clustered_sparse_weighted_average( const torch::Tensor weights, const torch::Tensor values, const torch::Tensor topk, torch::Tensor output, const torch::Tensor q_start_indx, const torch::Tensor q_end_indx, const torch::Tensor block_counts, const torch::Tensor block_counts_cumsum, const int total_blocks, torch::Tensor indx_maps ) { // Make sure that we are using the correct GPU device torch::DeviceGuard _guard(weights.device()); int N = weights.size(0); int H = weights.size(1); int L = weights.size(2); int C = topk.size(2); int k = topk.size(3); int S = values.size(2); int E = values.size(3); int threads = 1024; int blocks = ((N*H*C) + threads - 1) / threads; create_maps_kernel<<<blocks, threads>>>( block_counts.packed_accessor32<int, 3, torch::RestrictPtrTraits>(), block_counts_cumsum.packed_accessor32<int, 3, torch::RestrictPtrTraits>(), 1, indx_maps.packed_accessor32<int, 2, torch::RestrictPtrTraits>() ); dim3 dimBlock(QPB, EPB); dim3 dimGrid(total_blocks, (E + EPB - 1)/EPB); const int mem_weights = QPB * KPB; const int mem_values = KPB * EPB; const int shared_mem = (mem_weights + mem_values + k) * sizeof(float); clustered_sparse_weighted_average_kernel<<<dimGrid, dimBlock, shared_mem>>>( weights.packed_accessor32<float, 4, torch::RestrictPtrTraits>(), values.packed_accessor32<float, 4, torch::RestrictPtrTraits>(), topk.packed_accessor32<int, 4, torch::RestrictPtrTraits>(), indx_maps.packed_accessor32<int, 2, torch::RestrictPtrTraits>(), q_start_indx.packed_accessor32<int, 3, torch::RestrictPtrTraits>(), q_end_indx.packed_accessor32<int, 3, torch::RestrictPtrTraits>(), output.packed_accessor32<float, 4, torch::RestrictPtrTraits>() ); } __global__ void clustered_sparse_weighted_average_backward_kernel( const float_accessor_4d weights, const float_accessor_4d grad_out, const int32_accessor_4d topk, const int32_accessor_2d indx_maps, const int32_accessor_3d q_start_indx, const int32_accessor_3d q_end_indx, const int q_per_block, float_accessor_4d grad_v ) { int E = grad_out.size(3); int K = topk.size(3); extern __shared__ float shared_mem[]; float* shared_grad = shared_mem; float* shared_weights = shared_grad + (EPB*q_per_block); float* shared_topk = shared_weights + (KPB*q_per_block); int n = indx_maps[blockIdx.x][0]; int h = indx_maps[blockIdx.x][1]; int c = indx_maps[blockIdx.x][2]; int l_end = q_end_indx[n][h][c]; // Load all the top indices int thread_id = threadIdx.x + (threadIdx.y * blockDim.x); for (int t=thread_id; t<K; t+=(blockDim.x*blockDim.y)) { shared_topk[t] = topk[n][h][c][t]; } int q_indx; int k_indx; int e_indx; int q_indx_local; int e_indx_local; int q_start = q_start_indx[n][h][c] + (indx_maps[blockIdx.x][3] * q_per_block); for (int t=thread_id; t<(EPB*q_per_block); t+=(blockDim.x*blockDim.y)) { q_indx_local = t / EPB; e_indx_local = t % EPB; q_indx = q_start + q_indx_local; e_indx = e_indx_local + (blockIdx.y * EPB); if ((q_indx < l_end) && (e_indx < E)) { shared_grad[(q_indx_local*EPB) + e_indx_local] = grad_out[n][h][q_indx][e_indx]; } else { shared_grad[(q_indx_local*EPB) + e_indx_local] = 0; } } int k_indx_local; for (int t=thread_id; t<(KPB*q_per_block); t+=(blockDim.x*blockDim.y)) { q_indx_local = t / KPB; k_indx_local = t % KPB; q_indx = q_start + q_indx_local; k_indx = k_indx_local + (blockIdx.z * KPB); if ((q_indx < l_end) && (k_indx < K)) { shared_weights[(q_indx_local*KPB) + k_indx_local] = weights[n][h][q_indx][k_indx]; } else { shared_weights[(q_indx_local*KPB) + k_indx_local] = 0; } } __syncthreads(); float res = 0; int k_id = threadIdx.x + (blockIdx.z*KPB); e_indx = (blockIdx.y * EPB) + threadIdx.y; if ((k_id < K) && (e_indx < E)) { k_indx = shared_topk[k_id]; for (int t=0; t<q_per_block; t++) { res += shared_grad[(t*EPB) + threadIdx.y] * shared_weights[(t*KPB) + threadIdx.x]; } atomicAdd(&grad_v[n][h][k_indx][e_indx], res); } } /* Sparse weighted average backward pass */ void clustered_sparse_weighted_average_backward( const torch::Tensor weights, const torch::Tensor values, const torch::Tensor topk, const torch::Tensor grad_out, torch::Tensor grad_weights, torch::Tensor grad_values, const torch::Tensor q_start_indx, const torch::Tensor q_end_indx, const torch::Tensor block_counts, const torch::Tensor block_counts_cumsum, const int total_blocks, torch::Tensor indx_maps ) { // Make sure that we are using the correct GPU device torch::DeviceGuard _guard(weights.device()); int N = weights.size(0); int H = weights.size(1); int L = weights.size(2); int k = weights.size(3); int E = values.size(3); int C = topk.size(2); int S = values.size(2); int threads = 1024; int blocks = ((N*H*C) + threads - 1) / threads; create_maps_kernel<<<blocks, threads>>>( block_counts.packed_accessor32<int, 3, torch::RestrictPtrTraits>(), block_counts_cumsum.packed_accessor32<int, 3, torch::RestrictPtrTraits>(), 1, indx_maps.packed_accessor32<int, 2, torch::RestrictPtrTraits>() ); dim3 dimBlock(QPB, KPB); dim3 dimGrid(total_blocks, (k + KPB - 1)/KPB); const int shared_mem = (((KPB + QPB) * EPB) + KPB) * sizeof(float); clustered_sparse_dot_product_kernel<<<dimGrid, dimBlock, shared_mem>>>( grad_out.packed_accessor32<float, 4, torch::RestrictPtrTraits>(), values.packed_accessor32<float, 4, torch::RestrictPtrTraits>(), topk.packed_accessor32<int, 4, torch::RestrictPtrTraits>(), indx_maps.packed_accessor32<int, 2, torch::RestrictPtrTraits>(), q_start_indx.packed_accessor32<int, 3, torch::RestrictPtrTraits>(), q_end_indx.packed_accessor32<int, 3, torch::RestrictPtrTraits>(), grad_weights.packed_accessor32<float, 4, torch::RestrictPtrTraits>() ); dim3 dimBlockV(KPB, EPB); dim3 dimGridV(total_blocks, (E + EPB - 1)/EPB, (k + KPB - 1)/KPB); const int shared_mem_v = (((KPB + EPB) * QPB) + k) * sizeof(float); clustered_sparse_weighted_average_backward_kernel <<<dimGridV, dimBlockV, shared_mem_v>>>( weights.packed_accessor32<float, 4, torch::RestrictPtrTraits>(), grad_out.packed_accessor32<float, 4, torch::RestrictPtrTraits>(), topk.packed_accessor32<int, 4, torch::RestrictPtrTraits>(), indx_maps.packed_accessor32<int, 2, torch::RestrictPtrTraits>(), q_start_indx.packed_accessor32<int, 3, torch::RestrictPtrTraits>(), q_end_indx.packed_accessor32<int, 3, torch::RestrictPtrTraits>(), QPB, grad_values.packed_accessor32<float, 4, torch::RestrictPtrTraits>() ); } PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) { m.def( "clustered_sparse_dot_product", &clustered_sparse_dot_product, "Compute the dot product only in the positions specified by topk." ); m.def( "clustered_sparse_dot_backward", &clustered_sparse_dot_backward, "Compute the gradients for the sparse dot product." ); m.def( "clustered_sparse_weighted_average", &clustered_sparse_weighted_average, "Average the values using the sparse attention." ); m.def( "clustered_sparse_weighted_average_backward", &clustered_sparse_weighted_average_backward, "Compute the gradients for the sparse weighted average." ); }
the_stack
namespace dgl { using runtime::NDArray; namespace aten { namespace impl { ///////////////////////////// CSRIsNonZero ///////////////////////////// template <DLDeviceType XPU, typename IdType> bool CSRIsNonZero(CSRMatrix csr, int64_t row, int64_t col) { auto* thr_entry = runtime::CUDAThreadEntry::ThreadLocal(); const auto& ctx = csr.indptr->ctx; IdArray rows = aten::VecToIdArray<int64_t>({row}, sizeof(IdType) * 8, ctx); IdArray cols = aten::VecToIdArray<int64_t>({col}, sizeof(IdType) * 8, ctx); rows = rows.CopyTo(ctx); cols = cols.CopyTo(ctx); IdArray out = aten::NewIdArray(1, ctx, sizeof(IdType) * 8); const IdType* data = nullptr; // TODO(minjie): use binary search for sorted csr CUDA_KERNEL_CALL(cuda::_LinearSearchKernel, 1, 1, 0, thr_entry->stream, csr.indptr.Ptr<IdType>(), csr.indices.Ptr<IdType>(), data, rows.Ptr<IdType>(), cols.Ptr<IdType>(), 1, 1, 1, static_cast<IdType*>(nullptr), static_cast<IdType>(-1), out.Ptr<IdType>()); out = out.CopyTo(DLContext{kDLCPU, 0}); return *out.Ptr<IdType>() != -1; } template bool CSRIsNonZero<kDLGPU, int32_t>(CSRMatrix, int64_t, int64_t); template bool CSRIsNonZero<kDLGPU, int64_t>(CSRMatrix, int64_t, int64_t); template <DLDeviceType XPU, typename IdType> NDArray CSRIsNonZero(CSRMatrix csr, NDArray row, NDArray col) { const auto rowlen = row->shape[0]; const auto collen = col->shape[0]; const auto rstlen = std::max(rowlen, collen); NDArray rst = NDArray::Empty({rstlen}, row->dtype, row->ctx); if (rstlen == 0) return rst; const int64_t row_stride = (rowlen == 1 && collen != 1) ? 0 : 1; const int64_t col_stride = (collen == 1 && rowlen != 1) ? 0 : 1; auto* thr_entry = runtime::CUDAThreadEntry::ThreadLocal(); const int nt = cuda::FindNumThreads(rstlen); const int nb = (rstlen + nt - 1) / nt; const IdType* data = nullptr; // TODO(minjie): use binary search for sorted csr CUDA_KERNEL_CALL(cuda::_LinearSearchKernel, nb, nt, 0, thr_entry->stream, csr.indptr.Ptr<IdType>(), csr.indices.Ptr<IdType>(), data, row.Ptr<IdType>(), col.Ptr<IdType>(), row_stride, col_stride, rstlen, static_cast<IdType*>(nullptr), static_cast<IdType>(-1), rst.Ptr<IdType>()); return rst != -1; } template NDArray CSRIsNonZero<kDLGPU, int32_t>(CSRMatrix, NDArray, NDArray); template NDArray CSRIsNonZero<kDLGPU, int64_t>(CSRMatrix, NDArray, NDArray); ///////////////////////////// CSRHasDuplicate ///////////////////////////// /*! * \brief Check whether each row does not have any duplicate entries. * Assume the CSR is sorted. */ template <typename IdType> __global__ void _SegmentHasNoDuplicate( const IdType* indptr, const IdType* indices, int64_t num_rows, int8_t* flags) { int tx = blockIdx.x * blockDim.x + threadIdx.x; const int stride_x = gridDim.x * blockDim.x; while (tx < num_rows) { bool f = true; for (IdType i = indptr[tx] + 1; f && i < indptr[tx + 1]; ++i) { f = (indices[i - 1] != indices[i]); } flags[tx] = static_cast<int8_t>(f); tx += stride_x; } } template <DLDeviceType XPU, typename IdType> bool CSRHasDuplicate(CSRMatrix csr) { if (!csr.sorted) csr = CSRSort(csr); const auto& ctx = csr.indptr->ctx; auto* thr_entry = runtime::CUDAThreadEntry::ThreadLocal(); auto device = runtime::DeviceAPI::Get(ctx); // We allocate a workspace of num_rows bytes. It wastes a little bit memory but should // be fine. int8_t* flags = static_cast<int8_t*>(device->AllocWorkspace(ctx, csr.num_rows)); const int nt = cuda::FindNumThreads(csr.num_rows); const int nb = (csr.num_rows + nt - 1) / nt; CUDA_KERNEL_CALL(_SegmentHasNoDuplicate, nb, nt, 0, thr_entry->stream, csr.indptr.Ptr<IdType>(), csr.indices.Ptr<IdType>(), csr.num_rows, flags); bool ret = cuda::AllTrue(flags, csr.num_rows, ctx); device->FreeWorkspace(ctx, flags); return !ret; } template bool CSRHasDuplicate<kDLGPU, int32_t>(CSRMatrix csr); template bool CSRHasDuplicate<kDLGPU, int64_t>(CSRMatrix csr); ///////////////////////////// CSRGetRowNNZ ///////////////////////////// template <DLDeviceType XPU, typename IdType> int64_t CSRGetRowNNZ(CSRMatrix csr, int64_t row) { const IdType cur = aten::IndexSelect<IdType>(csr.indptr, row); const IdType next = aten::IndexSelect<IdType>(csr.indptr, row + 1); return next - cur; } template int64_t CSRGetRowNNZ<kDLGPU, int32_t>(CSRMatrix, int64_t); template int64_t CSRGetRowNNZ<kDLGPU, int64_t>(CSRMatrix, int64_t); template <typename IdType> __global__ void _CSRGetRowNNZKernel( const IdType* vid, const IdType* indptr, IdType* out, int64_t length) { int tx = blockIdx.x * blockDim.x + threadIdx.x; int stride_x = gridDim.x * blockDim.x; while (tx < length) { const IdType vv = vid[tx]; out[tx] = indptr[vv + 1] - indptr[vv]; tx += stride_x; } } template <DLDeviceType XPU, typename IdType> NDArray CSRGetRowNNZ(CSRMatrix csr, NDArray rows) { auto* thr_entry = runtime::CUDAThreadEntry::ThreadLocal(); const auto len = rows->shape[0]; const IdType* vid_data = static_cast<IdType*>(rows->data); const IdType* indptr_data = static_cast<IdType*>(csr.indptr->data); NDArray rst = NDArray::Empty({len}, rows->dtype, rows->ctx); IdType* rst_data = static_cast<IdType*>(rst->data); const int nt = cuda::FindNumThreads(len); const int nb = (len + nt - 1) / nt; CUDA_KERNEL_CALL(_CSRGetRowNNZKernel, nb, nt, 0, thr_entry->stream, vid_data, indptr_data, rst_data, len); return rst; } template NDArray CSRGetRowNNZ<kDLGPU, int32_t>(CSRMatrix, NDArray); template NDArray CSRGetRowNNZ<kDLGPU, int64_t>(CSRMatrix, NDArray); ///////////////////////////// CSRGetRowColumnIndices ///////////////////////////// template <DLDeviceType XPU, typename IdType> NDArray CSRGetRowColumnIndices(CSRMatrix csr, int64_t row) { const int64_t len = impl::CSRGetRowNNZ<XPU, IdType>(csr, row); const int64_t offset = aten::IndexSelect<IdType>(csr.indptr, row) * sizeof(IdType); return csr.indices.CreateView({len}, csr.indices->dtype, offset); } template NDArray CSRGetRowColumnIndices<kDLGPU, int32_t>(CSRMatrix, int64_t); template NDArray CSRGetRowColumnIndices<kDLGPU, int64_t>(CSRMatrix, int64_t); ///////////////////////////// CSRGetRowData ///////////////////////////// template <DLDeviceType XPU, typename IdType> NDArray CSRGetRowData(CSRMatrix csr, int64_t row) { const int64_t len = impl::CSRGetRowNNZ<XPU, IdType>(csr, row); const int64_t offset = aten::IndexSelect<IdType>(csr.indptr, row) * sizeof(IdType); if (aten::CSRHasData(csr)) return csr.data.CreateView({len}, csr.data->dtype, offset); else return aten::Range(offset, offset + len, csr.indptr->dtype.bits, csr.indptr->ctx); } template NDArray CSRGetRowData<kDLGPU, int32_t>(CSRMatrix, int64_t); template NDArray CSRGetRowData<kDLGPU, int64_t>(CSRMatrix, int64_t); ///////////////////////////// CSRSliceRows ///////////////////////////// template <DLDeviceType XPU, typename IdType> CSRMatrix CSRSliceRows(CSRMatrix csr, int64_t start, int64_t end) { const int64_t num_rows = end - start; const IdType st_pos = aten::IndexSelect<IdType>(csr.indptr, start); const IdType ed_pos = aten::IndexSelect<IdType>(csr.indptr, end); const IdType nnz = ed_pos - st_pos; IdArray ret_indptr = aten::IndexSelect(csr.indptr, start, end + 1) - st_pos; // indices and data can be view arrays IdArray ret_indices = csr.indices.CreateView( {nnz}, csr.indices->dtype, st_pos * sizeof(IdType)); IdArray ret_data; if (CSRHasData(csr)) ret_data = csr.data.CreateView({nnz}, csr.data->dtype, st_pos * sizeof(IdType)); else ret_data = aten::Range(st_pos, ed_pos, csr.indptr->dtype.bits, csr.indptr->ctx); return CSRMatrix(num_rows, csr.num_cols, ret_indptr, ret_indices, ret_data, csr.sorted); } template CSRMatrix CSRSliceRows<kDLGPU, int32_t>(CSRMatrix, int64_t, int64_t); template CSRMatrix CSRSliceRows<kDLGPU, int64_t>(CSRMatrix, int64_t, int64_t); /*! * \brief Copy data segment to output buffers * * For the i^th row r = row[i], copy the data from indptr[r] ~ indptr[r+1] * to the out_data from out_indptr[i] ~ out_indptr[i+1] * * If the provided `data` array is nullptr, write the read index to the out_data. * */ template <typename IdType, typename DType> __global__ void _SegmentCopyKernel( const IdType* indptr, const DType* data, const IdType* row, int64_t length, int64_t n_row, const IdType* out_indptr, DType* out_data) { IdType tx = static_cast<IdType>(blockIdx.x) * blockDim.x + threadIdx.x; const int stride_x = gridDim.x * blockDim.x; while (tx < length) { // find upper bound for tx using binary search. // out_indptr has already a prefix sum. n_row = size(out_indptr)-1 IdType l = 0, r = n_row, m = 0; while (l < r) { m = l + (r-l)/2; if (tx >= out_indptr[m]) { l = m+1; } else { r = m; } } IdType rpos = l-1; IdType rofs = tx - out_indptr[rpos]; const IdType u = row[rpos]; out_data[tx] = data? data[indptr[u]+rofs] : indptr[u]+rofs; tx += stride_x; } } template <DLDeviceType XPU, typename IdType> CSRMatrix CSRSliceRows(CSRMatrix csr, NDArray rows) { auto* thr_entry = runtime::CUDAThreadEntry::ThreadLocal(); const int64_t len = rows->shape[0]; IdArray ret_indptr = aten::CumSum(aten::CSRGetRowNNZ(csr, rows), true); const int64_t nnz = aten::IndexSelect<IdType>(ret_indptr, len); const int nt = 256; // for better GPU usage of small invocations const int nb = (nnz + nt - 1) / nt; // Copy indices. IdArray ret_indices = NDArray::Empty({nnz}, csr.indptr->dtype, csr.indptr->ctx); CUDA_KERNEL_CALL(_SegmentCopyKernel, nb, nt, 0, thr_entry->stream, csr.indptr.Ptr<IdType>(), csr.indices.Ptr<IdType>(), rows.Ptr<IdType>(), nnz, len, ret_indptr.Ptr<IdType>(), ret_indices.Ptr<IdType>()); // Copy data. IdArray ret_data = NDArray::Empty({nnz}, csr.indptr->dtype, csr.indptr->ctx); CUDA_KERNEL_CALL(_SegmentCopyKernel, nb, nt, 0, thr_entry->stream, csr.indptr.Ptr<IdType>(), CSRHasData(csr)? csr.data.Ptr<IdType>() : nullptr, rows.Ptr<IdType>(), nnz, len, ret_indptr.Ptr<IdType>(), ret_data.Ptr<IdType>()); return CSRMatrix(len, csr.num_cols, ret_indptr, ret_indices, ret_data, csr.sorted); } template CSRMatrix CSRSliceRows<kDLGPU, int32_t>(CSRMatrix , NDArray); template CSRMatrix CSRSliceRows<kDLGPU, int64_t>(CSRMatrix , NDArray); ///////////////////////////// CSRGetDataAndIndices ///////////////////////////// /*! * \brief Generate a 0-1 mask for each index that hits the provided (row, col) * index. * * Examples: * Given a CSR matrix (with duplicate entries) as follows: * [[0, 1, 2, 0, 0], * [1, 0, 0, 0, 0], * [0, 0, 1, 1, 0], * [0, 0, 0, 0, 0]] * Given rows: [0, 1], cols: [0, 2, 3] * The result mask is: [0, 1, 1, 1, 0, 0] */ template <typename IdType> __global__ void _SegmentMaskKernel( const IdType* indptr, const IdType* indices, const IdType* row, const IdType* col, int64_t row_stride, int64_t col_stride, int64_t length, IdType* mask) { int tx = blockIdx.x * blockDim.x + threadIdx.x; const int stride_x = gridDim.x * blockDim.x; while (tx < length) { int rpos = tx * row_stride, cpos = tx * col_stride; const IdType r = row[rpos], c = col[cpos]; for (IdType i = indptr[r]; i < indptr[r + 1]; ++i) { if (indices[i] == c) { mask[i] = 1; } } tx += stride_x; } } /*! * \brief Search for the insertion positions for needle in the hay. * * The hay is a list of sorted elements and the result is the insertion position * of each needle so that the insertion still gives sorted order. * * It essentially perform binary search to find lower bound for each needle * elements. Require the largest elements in the hay is larger than the given * needle elements. Commonly used in searching for row IDs of a given set of * coordinates. */ template <typename IdType> __global__ void _SortedSearchKernel( const IdType* hay, int64_t hay_size, const IdType* needles, int64_t num_needles, IdType* pos) { int tx = blockIdx.x * blockDim.x + threadIdx.x; const int stride_x = gridDim.x * blockDim.x; while (tx < num_needles) { const IdType ele = needles[tx]; // binary search IdType lo = 0, hi = hay_size - 1; while (lo < hi) { IdType mid = (lo + hi) >> 1; if (hay[mid] <= ele) { lo = mid + 1; } else { hi = mid; } } pos[tx] = (hay[hi] == ele)? hi : hi - 1; tx += stride_x; } } template <DLDeviceType XPU, typename IdType> std::vector<NDArray> CSRGetDataAndIndices(CSRMatrix csr, NDArray row, NDArray col) { const auto rowlen = row->shape[0]; const auto collen = col->shape[0]; const auto len = std::max(rowlen, collen); if (len == 0) return {NullArray(), NullArray(), NullArray()}; const auto& ctx = row->ctx; const auto nbits = row->dtype.bits; const int64_t nnz = csr.indices->shape[0]; const int64_t row_stride = (rowlen == 1 && collen != 1) ? 0 : 1; const int64_t col_stride = (collen == 1 && rowlen != 1) ? 0 : 1; auto* thr_entry = runtime::CUDAThreadEntry::ThreadLocal(); // Generate a 0-1 mask for matched (row, col) positions. IdArray mask = Full(0, nnz, nbits, ctx); const int nt = cuda::FindNumThreads(len); const int nb = (len + nt - 1) / nt; CUDA_KERNEL_CALL(_SegmentMaskKernel, nb, nt, 0, thr_entry->stream, csr.indptr.Ptr<IdType>(), csr.indices.Ptr<IdType>(), row.Ptr<IdType>(), col.Ptr<IdType>(), row_stride, col_stride, len, mask.Ptr<IdType>()); IdArray idx = AsNumBits(NonZero(mask), nbits); if (idx->shape[0] == 0) // No data. Return three empty arrays. return {idx, idx, idx}; // Search for row index IdArray ret_row = NewIdArray(idx->shape[0], ctx, nbits); const int nt2 = cuda::FindNumThreads(idx->shape[0]); const int nb2 = (idx->shape[0] + nt - 1) / nt; CUDA_KERNEL_CALL(_SortedSearchKernel, nb2, nt2, 0, thr_entry->stream, csr.indptr.Ptr<IdType>(), csr.num_rows, idx.Ptr<IdType>(), idx->shape[0], ret_row.Ptr<IdType>()); // Column & data can be obtained by index select. IdArray ret_col = IndexSelect(csr.indices, idx); IdArray ret_data = CSRHasData(csr)? IndexSelect(csr.data, idx) : idx; return {ret_row, ret_col, ret_data}; } template std::vector<NDArray> CSRGetDataAndIndices<kDLGPU, int32_t>( CSRMatrix csr, NDArray rows, NDArray cols); template std::vector<NDArray> CSRGetDataAndIndices<kDLGPU, int64_t>( CSRMatrix csr, NDArray rows, NDArray cols); ///////////////////////////// CSRSliceMatrix ///////////////////////////// /*! * \brief Generate a 0-1 mask for each index whose column is in the provided set. * It also counts the number of masked values per row. */ template <typename IdType> __global__ void _SegmentMaskColKernel( const IdType* indptr, const IdType* indices, int64_t num_rows, const IdType* col, int64_t col_len, IdType* mask, IdType* count) { int tx = blockIdx.x * blockDim.x + threadIdx.x; const int stride_x = gridDim.x * blockDim.x; // TODO(minjie): consider putting the col array in shared memory. while (tx < num_rows) { IdType cnt = 0; for (IdType i = indptr[tx]; i < indptr[tx + 1]; ++i) { const IdType cur_c = indices[i]; for (int64_t j = 0; j < col_len; ++j) { if (cur_c == col[j]) { mask[i] = 1; ++cnt; break; } } } count[tx] = cnt; tx += stride_x; } } template <DLDeviceType XPU, typename IdType> CSRMatrix CSRSliceMatrix(CSRMatrix csr, runtime::NDArray rows, runtime::NDArray cols) { auto* thr_entry = runtime::CUDAThreadEntry::ThreadLocal(); const auto& ctx = rows->ctx; const auto& dtype = rows->dtype; const auto nbits = dtype.bits; const int64_t new_nrows = rows->shape[0]; const int64_t new_ncols = cols->shape[0]; if (new_nrows == 0 || new_ncols == 0) return CSRMatrix(new_nrows, new_ncols, Full(0, new_nrows + 1, nbits, ctx), NullArray(dtype, ctx), NullArray(dtype, ctx)); // First slice rows csr = CSRSliceRows(csr, rows); if (csr.indices->shape[0] == 0) return CSRMatrix(new_nrows, new_ncols, Full(0, new_nrows + 1, nbits, ctx), NullArray(dtype, ctx), NullArray(dtype, ctx)); // Generate a 0-1 mask for matched (row, col) positions. IdArray mask = Full(0, csr.indices->shape[0], nbits, ctx); // A count for how many masked values per row. IdArray count = NewIdArray(csr.num_rows, ctx, nbits); const int nt = cuda::FindNumThreads(csr.num_rows); const int nb = (csr.num_rows + nt - 1) / nt; CUDA_KERNEL_CALL(_SegmentMaskColKernel, nb, nt, 0, thr_entry->stream, csr.indptr.Ptr<IdType>(), csr.indices.Ptr<IdType>(), csr.num_rows, cols.Ptr<IdType>(), cols->shape[0], mask.Ptr<IdType>(), count.Ptr<IdType>()); IdArray idx = AsNumBits(NonZero(mask), nbits); if (idx->shape[0] == 0) return CSRMatrix(new_nrows, new_ncols, Full(0, new_nrows + 1, nbits, ctx), NullArray(dtype, ctx), NullArray(dtype, ctx)); // Indptr needs to be adjusted according to the new nnz per row. IdArray ret_indptr = CumSum(count, true); // Column & data can be obtained by index select. IdArray ret_col = IndexSelect(csr.indices, idx); IdArray ret_data = CSRHasData(csr)? IndexSelect(csr.data, idx) : idx; // Relabel column IdArray col_hash = NewIdArray(csr.num_cols, ctx, nbits); Scatter_(cols, Range(0, cols->shape[0], nbits, ctx), col_hash); ret_col = IndexSelect(col_hash, ret_col); return CSRMatrix(new_nrows, new_ncols, ret_indptr, ret_col, ret_data); } template CSRMatrix CSRSliceMatrix<kDLGPU, int32_t>( CSRMatrix csr, runtime::NDArray rows, runtime::NDArray cols); template CSRMatrix CSRSliceMatrix<kDLGPU, int64_t>( CSRMatrix csr, runtime::NDArray rows, runtime::NDArray cols); } // namespace impl } // namespace aten } // namespace dgl
the_stack
#include <cuml/common/logger.hpp> #include <raft/linalg/eltwise.cuh> #include <raft/linalg/norm.cuh> #include <cuda_runtime.h> #include <thrust/device_ptr.h> #include <thrust/reduce.h> #include <thrust/transform.h> #include <sys/time.h> #include <raft/random/rng.hpp> #include <raft/stats/sum.hpp> #include <unistd.h> #include <chrono> #include <iostream> #include <raft/device_atomics.cuh> /** * @brief Performs P + P.T. * @param[out] vector: The output vector you want to overwrite with randomness. * @param[in] minimum: The minimum value in the output vector you want. * @param[in] maximum: The maximum value in the output vector you want. * @param[in] size: The size of the output vector. * @param[in] stream: The GPU stream. * @param[in] seed: If seed == -1, then the output is pure randomness. If >= 0, then you can * reproduce TSNE. */ template <typename value_t = float> void random_vector(value_t* vector, const value_t minimum, const value_t maximum, const int size, cudaStream_t stream, long long seed = -1) { if (seed <= 0) { // Get random seed based on time of day struct timeval tp; gettimeofday(&tp, NULL); seed = tp.tv_sec * 1000 + tp.tv_usec; } raft::random::Rng random(seed); random.uniform<value_t>(vector, size, minimum, maximum, stream); } long start, end; struct timeval timecheck; double SymmetrizeTime = 0, DistancesTime = 0, NormalizeTime = 0, PerplexityTime = 0, BoundingBoxKernel_time = 0, ClearKernel1_time = 0, TreeBuildingKernel_time = 0, ClearKernel2_time = 0, SummarizationKernel_time = 0, SortKernel_time = 0, RepulsionTime = 0, Reduction_time = 0, attractive_time = 0, IntegrationKernel_time = 0; // To silence warnings #define START_TIMER \ if (ML::Logger::get().shouldLogFor(CUML_LEVEL_DEBUG)) { \ gettimeofday(&timecheck, NULL); \ start = (long)timecheck.tv_sec * 1000 + (long)timecheck.tv_usec / 1000; \ } #define END_TIMER(add_onto) \ if (ML::Logger::get().shouldLogFor(CUML_LEVEL_DEBUG)) { \ gettimeofday(&timecheck, NULL); \ end = (long)timecheck.tv_sec * 1000 + (long)timecheck.tv_usec / 1000; \ add_onto += (end - start); \ } #define PRINT_TIMES \ if (ML::Logger::get().shouldLogFor(CUML_LEVEL_DEBUG)) { \ double total = (SymmetrizeTime + DistancesTime + NormalizeTime + PerplexityTime + \ BoundingBoxKernel_time + ClearKernel1_time + TreeBuildingKernel_time + \ ClearKernel2_time + SummarizationKernel_time + SortKernel_time + \ RepulsionTime + Reduction_time + attractive_time + IntegrationKernel_time) / \ 100.0; \ CUML_LOG_DEBUG( \ "SymmetrizeTime = %.lf (%.lf)\n" \ "DistancesTime = %.lf (%.lf)\n" \ "NormalizeTime = %.lf (%.lf)\n" \ "PerplexityTime = %.lf (%.lf)\n" \ "BoundingBoxKernel_time = %.lf (%.lf)\n" \ "ClearKernel1_time = %.lf (%.lf)\n" \ "TreeBuildingKernel_time = %.lf (%.lf)\n" \ "ClearKernel2_time = %.lf (%.lf)\n" \ "SummarizationKernel_time = %.lf (%.lf)\n" \ "SortKernel_time = %.lf (%.lf)\n" \ "RepulsionTime = %.lf (%.lf)\n" \ "Reduction_time = %.lf (%.lf)\n" \ "attractive_time = %.lf (%.lf)\n" \ "IntegrationKernel_time = %.lf (%.lf)\n" \ "TOTAL TIME = %.lf", \ SymmetrizeTime, \ SymmetrizeTime / total, \ DistancesTime, \ DistancesTime / total, \ NormalizeTime, \ NormalizeTime / total, \ PerplexityTime, \ PerplexityTime / total, \ BoundingBoxKernel_time, \ BoundingBoxKernel_time / total, \ ClearKernel1_time, \ ClearKernel1_time / total, \ TreeBuildingKernel_time, \ TreeBuildingKernel_time / total, \ ClearKernel2_time, \ ClearKernel2_time / total, \ SummarizationKernel_time, \ SummarizationKernel_time / total, \ SortKernel_time, \ SortKernel_time / total, \ RepulsionTime, \ RepulsionTime / total, \ Reduction_time, \ Reduction_time / total, \ attractive_time, \ attractive_time / total, \ IntegrationKernel_time, \ IntegrationKernel_time / total, \ total * 100.0); \ } template <typename value_t, typename value_idx, int TPB = 256> __global__ void min_max_kernel( const value_t* Y, const value_idx n, value_t* min, value_t* max, bool find_min = true) { auto tid = threadIdx.x + blockDim.x * blockIdx.x; typedef cub::BlockReduce<value_t, TPB> BlockReduce; __shared__ typename BlockReduce::TempStorage temp_storage_min; __shared__ typename BlockReduce::TempStorage temp_storage_max; value_t thread_min, thread_max; if (tid < n) { thread_max = Y[tid]; if (find_min) thread_min = thread_max; } else { if (find_min) thread_min = std::numeric_limits<value_t>::max(); thread_max = std::numeric_limits<value_t>::lowest(); } value_t block_min, block_max; if (find_min) { block_min = BlockReduce(temp_storage_min).Reduce(thread_min, cub::Min()); } block_max = BlockReduce(temp_storage_max).Reduce(thread_max, cub::Max()); // results stored in first thread of block if (threadIdx.x == 0) { if (find_min) atomicMin(min, block_min); atomicMax(max, block_max); } } /** * CUDA kernel to compute KL divergence */ template <typename value_idx, typename value_t> __global__ void compute_kl_div_k(const value_t* Ps, const value_t* Qs, value_t* __restrict__ KL_divs, const value_idx NNZ) { const auto index = (blockIdx.x * blockDim.x) + threadIdx.x; if (index >= NNZ) return; const value_t P = Ps[index]; const value_t Q = max(Qs[index], FLT_EPSILON); KL_divs[index] = P * __logf(__fdividef(max(P, FLT_EPSILON), Q)); } /** * Compute KL divergence */ template <typename value_t> value_t compute_kl_div( value_t* __restrict__ Ps, value_t* Qs, value_t* KL_divs, const size_t NNZ, cudaStream_t stream) { value_t P_sum = thrust::reduce(rmm::exec_policy(stream), Ps, Ps + NNZ); raft::linalg::scalarMultiply(Ps, Ps, 1.0f / P_sum, NNZ, stream); value_t Q_sum = thrust::reduce(rmm::exec_policy(stream), Qs, Qs + NNZ); raft::linalg::scalarMultiply(Qs, Qs, 1.0f / Q_sum, NNZ, stream); const size_t block = 128; const size_t grid = raft::ceildiv(NNZ, block); compute_kl_div_k<<<grid, block, 0, stream>>>(Ps, Qs, KL_divs, NNZ); return thrust::reduce(rmm::exec_policy(stream), KL_divs, KL_divs + NNZ); } template <typename value_t> __device__ value_t compute_q(value_t dist, value_t dof) { const value_t exponent = (dof + 1.0f) / 2.0f; const value_t Q = __powf(dof / (dof + dist), exponent); return Q; }
the_stack
#include <cutil.h> #include <util.h> #include <blas.h> #include <multiply.h> #include <matrix_analysis.h> #include<thrust/count.h> //count #include<thrust/sort.h> //sort #include<thrust/binary_search.h> //lower_bound #include<thrust/unique.h> //unique #include<cusp/detail/format_utils.h> //offsets_to_indices #include <logger.h> #define epsilon 1e-10 namespace amgx { namespace aggregation { // ------------------------ // Kernels // ------------------------ //generate the aggregates labels template <typename IndexType, typename ValueType> __global__ void generatelabel1d(IndexType *aggregation, const ValueType *points, const double cord_min, const double cord_max, const int npoint, const int nlevel) { unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x; double distance = 1.01 * (cord_max - cord_min); for (; idx < npoint; idx += gridDim.x * blockDim.x) { aggregation[idx] = (int)((points[idx] - cord_min) / distance * nlevel); } } template <typename IndexType, typename ValueType> __global__ void generatelabel1dpxb(IndexType *aggregation, const int alpha, const ValueType *points, const double cord_min, const double cord_max, const int npoint, const int nlevel) { unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x; double distance = 1.01 * (cord_max - cord_min); for (; idx < npoint; idx += gridDim.x * blockDim.x) { aggregation[idx] = aggregation[idx] + alpha * (int)((points[idx] - cord_min) / distance * nlevel); } } template <typename IndexType> __global__ void generatelabel3d(IndexType *aggregation, const int n, const int n1d, const int n1d_orig) { unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x; int n1d2 = n1d_orig * n1d_orig; int x, y, z; for (; idx < n; idx += gridDim.x * blockDim.x) { int label = idx % n1d2; x = (label % n1d_orig) / 2; y = (label / n1d_orig) / 2; z = idx / n1d2 / 2; aggregation[idx] = z * n1d * n1d + y * n1d + x; } } template <typename IndexType> __global__ void generatelabel2d(IndexType *aggregation, const int n, const int n1d, const int n1d_orig) { unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x; int x, y; for (; idx < n; idx += gridDim.x * blockDim.x) { x = (idx % n1d_orig) / 2; y = (idx / n1d_orig) / 2; aggregation[idx] = y * n1d + x; } } template <typename IndexType > __global__ void aggregateLevel(IndexType *indices, const IndexType *next_level, const int num_rows) { unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x; for (; idx < num_rows; idx += gridDim.x * blockDim.x) { indices[idx] = next_level[indices[idx]]; } } // ----------------- // Methods // ---------------- // Constructor template<class T_Config> GEO_SelectorBase<T_Config>::GEO_SelectorBase(AMG_Config &cfg, const std::string &cfg_scope) { } template <class T_Config> bool compx (typename GEO_SelectorBase<T_Config>::p3d i, typename GEO_SelectorBase<T_Config>::p3d j) { return (i.x < j.x); } template <class T_Config> bool compy (typename GEO_SelectorBase<T_Config>::p3d i, typename GEO_SelectorBase<T_Config>::p3d j) { return (i.y < j.y); } template <class T_Config> bool compz (typename GEO_SelectorBase<T_Config>::p3d i, typename GEO_SelectorBase<T_Config>::p3d j) { return (i.z < j.z); } template <typename Tuple, typename GeoType, typename IndexType> struct reduce_functor2 : public thrust::binary_function< Tuple, Tuple, Tuple > { __host__ __device__ Tuple operator()(const Tuple x, const Tuple y) { return thrust::make_tuple(thrust::get<0>(x) + thrust::get<0>(y), thrust::get<1>(x) + thrust::get<1>(y), thrust::get<2>(x) + thrust::get<2>(y) ); } }; template <typename Tuple, typename GeoType, typename IndexType> struct reduce_functor3 : public thrust::binary_function< Tuple, Tuple, Tuple > { __host__ __device__ Tuple operator()(const Tuple x, const Tuple y) { return thrust::make_tuple(thrust::get<0>(x) + thrust::get<0>(y), thrust::get<1>(x) + thrust::get<1>(y), thrust::get<2>(x) + thrust::get<2>(y), thrust::get<3>(x) + thrust::get<3>(y) ); } }; // interpolates geometric info from the matrix using the matrix aggregation template<class T_Config> void GEO_SelectorBase<T_Config>::interpolateGeoinfo( Matrix<T_Config> &A ) { const int threads_per_block = 512; const int num_blocks = min( AMGX_GRID_MAX_SIZE, (A.get_num_rows() - 1) / threads_per_block + 1 ); ngeo_x.resize(A.get_num_rows()); ngeo_y.resize(A.get_num_rows()); if (this->dimension == 3) { ngeo_z.resize(A.get_num_rows()); } if (!(A.hasParameter("aggregates_info")) ) { FatalError("Cannot find information about previous aggregations for GEO selector", AMGX_ERR_BAD_PARAMETERS); } std::vector<IVector *> *agg_info = A.template getParameterPtr< std::vector<IVector *> >("aggregates_info"); IVector cur_geo_idx = *(agg_info->at(0)); IVector v_counter(this->num_origonal, 1), v_new_counter(A.get_num_rows(), 1), r_coord(A.get_num_rows()); if ( this->num_origonal != cur_geo_idx.size() ) { FatalError("GEO size doesn't match original matrix dimension", AMGX_ERR_BAD_PARAMETERS); } for (size_t i = 1; i < agg_info->size(); i++) { aggregateLevel <<< num_blocks, threads_per_block>>>(cur_geo_idx.raw(), agg_info->at(i)->raw(), this->num_origonal); cudaCheckError(); } if (this->dimension == 2) { thrust::sort_by_key(cur_geo_idx.begin(), cur_geo_idx.end(), thrust::make_zip_iterator(thrust::make_tuple(this->cord_x->begin(), this->cord_y->begin()))); thrust::reduce_by_key( cur_geo_idx.begin(), cur_geo_idx.end(), thrust::make_zip_iterator(thrust::make_tuple(this->cord_x->begin(), this->cord_y->begin(), v_counter.begin())), r_coord.begin(), thrust::make_zip_iterator(thrust::make_tuple(ngeo_x.begin(), ngeo_y.begin(), v_new_counter.begin())), thrust::equal_to<int>(), reduce_functor2< thrust::tuple< ValueType, ValueType, IndexType>, ValueType, IndexType > () ); } else { thrust::sort_by_key(cur_geo_idx.begin(), cur_geo_idx.end(), thrust::make_zip_iterator(thrust::make_tuple(this->cord_x->begin(), this->cord_y->begin(), this->cord_z->begin()))); thrust::reduce_by_key( cur_geo_idx.begin(), cur_geo_idx.end(), thrust::make_zip_iterator(thrust::make_tuple(this->cord_x->begin(), this->cord_y->begin(), this->cord_z->begin(), v_counter.begin())), r_coord.begin(), thrust::make_zip_iterator(thrust::make_tuple(ngeo_x.begin(), ngeo_y.begin(), ngeo_z.begin(), v_new_counter.begin())), thrust::equal_to<int>(), reduce_functor3< thrust::tuple< ValueType, ValueType, ValueType, IndexType>, ValueType, IndexType > () ); } cudaCheckError(); } // Choose the aggregates for csr_matrix_d format template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec> void GEO_Selector<TemplateConfig<AMGX_device, t_vecPrec, t_matPrec, t_indPrec> >::setAggregates_1x1( Matrix_d &A, typename Matrix_d::IVector &aggregates, typename Matrix_d::IVector &aggregates_global, int &num_aggregates) { if (this->dimension == 0) { FatalError("No input geometric information, exiting", AMGX_ERR_BAD_PARAMETERS); } //initialize the aggregates vector int n = A.get_num_rows(); aggregates.resize(n); thrust::fill(aggregates.begin(), aggregates.end(), -1); cudaCheckError(); int nlevel; if (this->dimension == 2) { nlevel = floor(log(sqrt((double)n)) / log(2.0)); } if (this->dimension == 3) { nlevel = ceil(log(cbrt((double)n)) / log(2.0)); } int num_per_row = (int)std::pow(2, nlevel - 1); const int threads_per_block = 512; const int num_blocks = min( AMGX_GRID_MAX_SIZE, (n - 1) / threads_per_block + 1 ); // generate aggregation index in 1d IndexType *aggregation_ptr = thrust::raw_pointer_cast(&aggregates[0]); if (A.hasParameter("uniform_based")) { printf("GEOAggregating %d-rows matrix as uniform\n", A.get_num_rows()); int npr_orig = 2 * num_per_row; if (this->dimension == 3) { generatelabel3d <<< num_blocks, threads_per_block>>>(aggregation_ptr, n, num_per_row, npr_orig); cudaCheckError(); } else { generatelabel2d <<< num_blocks, threads_per_block>>>(aggregation_ptr, n, num_per_row, npr_orig); cudaCheckError(); } } else { std::vector<VVector *> geo_ptrs(3); // real geometry source geo_ptrs[0] = this->cord_x; geo_ptrs[1] = this->cord_y; geo_ptrs[2] = this->cord_z; // do we need to interpolate from the finest level? if (A.get_num_rows() != this->num_origonal) { this->interpolateGeoinfo(A); if (this->dimension == 3) { geo_ptrs[2] = &this->ngeo_z; } geo_ptrs[0] = &this->ngeo_x; geo_ptrs[1] = &this->ngeo_y; } //Find the boundary coordinates this->xmax = *thrust::max_element(geo_ptrs[0]->begin(), geo_ptrs[0]->end()); this->xmin = *thrust::min_element(geo_ptrs[0]->begin(), geo_ptrs[0]->end()); this->ymax = *thrust::max_element(geo_ptrs[1]->begin(), geo_ptrs[1]->end()); this->ymin = *thrust::min_element(geo_ptrs[1]->begin(), geo_ptrs[1]->end()); if (this->dimension == 3) { this->zmax = *thrust::max_element(geo_ptrs[2]->begin(), geo_ptrs[2]->end()); this->zmin = *thrust::min_element(geo_ptrs[2]->begin(), geo_ptrs[2]->end()); } cudaCheckError(); ValueType *point_ptr = geo_ptrs[0]->raw(); generatelabel1d <<< num_blocks, threads_per_block>>>(aggregation_ptr, point_ptr, this->xmin, this->xmax, n, num_per_row); cudaCheckError(); point_ptr = geo_ptrs[1]->raw(); generatelabel1dpxb <<< num_blocks, threads_per_block>>>(aggregation_ptr, num_per_row, point_ptr, this->ymin, this->ymax, n, num_per_row); cudaCheckError(); if (this->dimension > 2) { point_ptr = geo_ptrs[2]->raw(); generatelabel1dpxb <<< num_blocks, threads_per_block>>>(aggregation_ptr, num_per_row * num_per_row, point_ptr, this->zmin, this->zmax, n, num_per_row); cudaCheckError(); } } num_aggregates = num_per_row * num_per_row; if (this->dimension == 3) { num_aggregates *= num_per_row; } A.template setParameter < int > ("uniform_based", 1); // not exactly correct. But this is workarond for now, since parameters are instanlty copied for the coarser matrix in aggregation level } template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec> void GEO_Selector<TemplateConfig<AMGX_device, t_vecPrec, t_matPrec, t_indPrec> >::setAggregates_common_sqblocks(const Matrix_d &A, typename Matrix_d::IVector &aggregates, typename Matrix_d::IVector &aggregates_global, int &num_aggregates) { FatalError("GEO_Selector not implemented for non square blocks, exiting", AMGX_ERR_NOT_SUPPORTED_BLOCKSIZE); } // Choose aggregates on host template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec> void GEO_Selector<TemplateConfig<AMGX_host, t_vecPrec, t_matPrec, t_indPrec> >::setAggregates_1x1( Matrix_h &A, IVector &aggregates, IVector &aggregates_global, int &num_aggregates) { FatalError("setAggregates not implemented on CPU, exiting", AMGX_ERR_NOT_SUPPORTED_TARGET); } // Choose aggregates on host template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec> void GEO_Selector<TemplateConfig<AMGX_host, t_vecPrec, t_matPrec, t_indPrec> >::setAggregates_common_sqblocks(const Matrix_h &A, IVector &aggregates, IVector &aggregates_global, int &num_aggregates) { FatalError("setAggregates not implemented on CPU, exiting", AMGX_ERR_NOT_SUPPORTED_TARGET); } template <class T_Config> void GEO_SelectorBase<T_Config>::setAggregates(Matrix<T_Config> &A, IVector &aggregates, IVector &aggregates_global, int &num_aggregates) { //printf("Selector: %d processing with the GEO\n", A.get_num_rows()); this->dimension = A.template getParameter<int>("dim"); cord_x = A.template getParameterPtr< VVector >("geo.x"); cord_y = A.template getParameterPtr< VVector >("geo.y"); num_origonal = A.template getParameter<int>("geo_size"); if (this->dimension == 3) { cord_z = A.template getParameterPtr< VVector >("geo.z"); } if (this->dimension == 0) { FatalError("GEO_SELECTOR: Problem dimension is not valid.", AMGX_ERR_BAD_PARAMETERS); } if (num_origonal == 0) { FatalError("Problem size is not valid.", AMGX_ERR_BAD_PARAMETERS); } if (A.get_block_size() == 1 && !A.hasProps(DIAG) ) { setAggregates_1x1( A, aggregates, aggregates_global, num_aggregates ); } else if (A.get_block_dimx() == A.get_block_dimy()) { setAggregates_1x1( A, aggregates, aggregates_global, num_aggregates ); } else { FatalError("Unsupported block size for GEO_Selector", AMGX_ERR_NOT_SUPPORTED_BLOCKSIZE); } } // --------------------------- // Explict instantiations // --------------------------- #define AMGX_CASE_LINE(CASE) template class GEO_SelectorBase<TemplateMode<CASE>::Type>; AMGX_FORALL_BUILDS(AMGX_CASE_LINE) #undef AMGX_CASE_LINE #define AMGX_CASE_LINE(CASE) template class GEO_Selector<TemplateMode<CASE>::Type>; AMGX_FORALL_BUILDS(AMGX_CASE_LINE) #undef AMGX_CASE_LINE } }
the_stack
using namespace std; // Forward declaration template <class real> void RunTest(string testName, ResultDatabase &resultDB, OptionParser &op); // ******************************************************** // Function: toString // // Purpose: // Simple templated function to convert objects into // strings using stringstream // // Arguments: // t: the object to convert to a string // // Returns: a string representation // // Modifications: // // ******************************************************** template<class T> inline string toString(const T& t) { stringstream ss; ss << t; return ss.str(); } // **************************************************************************** // Function: addBenchmarkSpecOptions // // Purpose: // Add benchmark specific options parsing // // Arguments: // op: the options parser / parameter database // // Returns: nothing // // Programmer: Kyle Spafford // Creation: March 13, 2010 // // Modifications: // // **************************************************************************** void addBenchmarkSpecOptions(OptionParser &op) { ; // No S3D specific options } // **************************************************************************** // Function: RunBenchmark // // Purpose: // Executes the S3D benchmark // // Arguments: // resultDB: results from the benchmark are stored in this db // op: the options parser / parameter database // // Returns: nothing // // Programmer: Kyle Spafford // Creation: March 13, 2010 // // Modifications: // // **************************************************************************** void RunBenchmark(ResultDatabase &resultDB, OptionParser &op) { // Always run the single precision test RunTest<float>("S3D-SP", resultDB, op); // Check to see if the device supports double precision int device; cudaGetDevice(&device); cudaDeviceProp deviceProp; cudaGetDeviceProperties(&deviceProp, device); if ((deviceProp.major == 1 && deviceProp.minor >= 3) || (deviceProp.major >= 2)) { cout << "Running double precision test" << endl; RunTest<double>("S3D-DP", resultDB, op); } else { cout << "Skipping double precision test" << endl; char atts[1024] = "DP_Not_Supported"; // resultDB requires neg entry for every possible result unsigned int passes = op.getOptionInt("passes"); for (unsigned int i = 0; i < passes; i++) { resultDB.AddResult("S3D-DP" , atts, "GFLOPS/s", FLT_MAX); resultDB.AddResult("S3D-DP_PCIe" , atts, "GFLOPS/s", FLT_MAX); resultDB.AddResult("S3D-DP_Parity" , atts, "GFLOPS/s", FLT_MAX); } } } template <class real> void RunTest(string testName, ResultDatabase &resultDB, OptionParser &op) { // Number of grid points (specified in header file) int probSizes_SP[4] = { 24, 32, 40, 48}; int probSizes_DP[4] = { 16, 24, 32, 40}; int *probSizes = (sizeof(real) == sizeof(double)) ? probSizes_DP : probSizes_SP; int sizeClass = op.getOptionInt("size") - 1; assert(sizeClass >= 0 && sizeClass < 4); sizeClass = probSizes[sizeClass]; int n = sizeClass * sizeClass * sizeClass; // Host variables real* host_t; real* host_p; real* host_y; real* host_wdot; real* host_molwt; // GPU Variables real* gpu_t; //Temperatures array real* gpu_p; //Pressures array real* gpu_y; //Mass fractions real* gpu_wdot; //Output variables // GPU Intermediate Variables real* gpu_rf, *gpu_rb; real* gpu_rklow; real* gpu_c; real* gpu_a; real* gpu_eg; real* gpu_molwt; // CUDA streams cudaStream_t s1, s2; // configure kernels for large L1 cache, as we don't need shared memory // CUDA_SAFE_CALL(cudaFuncSetCacheConfig(ratt_kernel, cudaFuncCachePreferL1)); // CUDA_SAFE_CALL(cudaFuncSetCacheConfig(rdsmh_kernel, cudaFuncCachePreferL1)); // CUDA_SAFE_CALL(cudaFuncSetCacheConfig(gr_base, cudaFuncCachePreferL1)); // CUDA_SAFE_CALL(cudaFuncSetCacheConfig(ratt2_kernel, cudaFuncCachePreferL1)); // CUDA_SAFE_CALL(cudaFuncSetCacheConfig(ratt3_kernel, cudaFuncCachePreferL1)); // CUDA_SAFE_CALL(cudaFuncSetCacheConfig(ratt4_kernel, cudaFuncCachePreferL1)); // CUDA_SAFE_CALL(cudaFuncSetCacheConfig(ratt5_kernel, cudaFuncCachePreferL1)); // CUDA_SAFE_CALL(cudaFuncSetCacheConfig(ratt6_kernel, cudaFuncCachePreferL1)); // CUDA_SAFE_CALL(cudaFuncSetCacheConfig(ratt7_kernel, cudaFuncCachePreferL1)); // CUDA_SAFE_CALL(cudaFuncSetCacheConfig(ratt8_kernel, cudaFuncCachePreferL1)); // CUDA_SAFE_CALL(cudaFuncSetCacheConfig(ratt9_kernel, cudaFuncCachePreferL1)); // CUDA_SAFE_CALL(cudaFuncSetCacheConfig(ratx_kernel, cudaFuncCachePreferL1)); // CUDA_SAFE_CALL(cudaFuncSetCacheConfig(ratxb_kernel, cudaFuncCachePreferL1)); // CUDA_SAFE_CALL(cudaFuncSetCacheConfig(ratx2_kernel, cudaFuncCachePreferL1)); // CUDA_SAFE_CALL(cudaFuncSetCacheConfig(ratx4_kernel, cudaFuncCachePreferL1)); // CUDA_SAFE_CALL(cudaFuncSetCacheConfig(qssa_kernel, cudaFuncCachePreferL1)); // CUDA_SAFE_CALL(cudaFuncSetCacheConfig(qssab_kernel, cudaFuncCachePreferL1)); // CUDA_SAFE_CALL(cudaFuncSetCacheConfig(qssa2_kernel, cudaFuncCachePreferL1)); // CUDA_SAFE_CALL(cudaFuncSetCacheConfig(rdwdot_kernel, // cudaFuncCachePreferL1)); // CUDA_SAFE_CALL(cudaFuncSetCacheConfig(rdwdot2_kernel, // cudaFuncCachePreferL1)); // CUDA_SAFE_CALL(cudaFuncSetCacheConfig(rdwdot3_kernel, // cudaFuncCachePreferL1)); // CUDA_SAFE_CALL(cudaFuncSetCacheConfig(rdwdot6_kernel, // cudaFuncCachePreferL1)); // CUDA_SAFE_CALL(cudaFuncSetCacheConfig(rdwdot7_kernel, // cudaFuncCachePreferL1)); // CUDA_SAFE_CALL(cudaFuncSetCacheConfig(rdwdot8_kernel, // cudaFuncCachePreferL1)); // CUDA_SAFE_CALL(cudaFuncSetCacheConfig(rdwdot9_kernel, // cudaFuncCachePreferL1)); // CUDA_SAFE_CALL(cudaFuncSetCacheConfig(rdwdot10_kernel, // cudaFuncCachePreferL1)); // Malloc host memory CUDA_SAFE_CALL(cudaMallocHost((void**)&host_t, n*sizeof(real))); CUDA_SAFE_CALL(cudaMallocHost((void**)&host_p, n*sizeof(real))); CUDA_SAFE_CALL(cudaMallocHost((void**)&host_y, Y_SIZE*n*sizeof(real))); CUDA_SAFE_CALL(cudaMallocHost((void**)&host_wdot,WDOT_SIZE*n*sizeof(real))); CUDA_SAFE_CALL(cudaMallocHost((void**)&host_molwt,WDOT_SIZE*sizeof(real))); // Create streams CUDA_SAFE_CALL(cudaStreamCreate(&s1)); CUDA_SAFE_CALL(cudaStreamCreate(&s2)); // Initialize Test Problem // For now these are just 1, to compare results between cpu & gpu real rateconv = 1.0; real tconv = 1.0; real pconv = 1.0; // Initialize temp and pressure for (int i=0; i<n; i++) { host_p[i] = 1.0132e6; host_t[i] = 1000.0; } // Init molwt: for now these are just 1, to compare results betw. cpu & gpu for (int i=0; i<WDOT_SIZE; i++) { host_molwt[i] = 1; } // Initialize mass fractions for (int j=0; j<Y_SIZE; j++) { for (int i=0; i<n; i++) { host_y[(j*n)+i]= 0.0; if (j==14) host_y[(j*n)+i] = 0.064; if (j==3) host_y[(j*n)+i] = 0.218; if (j==21) host_y[(j*n)+i] = 0.718; } } // Malloc GPU memory CUDA_SAFE_CALL(cudaMalloc((void**)&gpu_t, n*sizeof(real))); CUDA_SAFE_CALL(cudaMalloc((void**)&gpu_p, n*sizeof(real))); CUDA_SAFE_CALL(cudaMalloc((void**)&gpu_y, Y_SIZE*n*sizeof(real))); CUDA_SAFE_CALL(cudaMalloc((void**)&gpu_wdot, WDOT_SIZE*n*sizeof(real))); CUDA_SAFE_CALL(cudaMalloc((void**)&gpu_rf, RF_SIZE*n*sizeof(real))); CUDA_SAFE_CALL(cudaMalloc((void**)&gpu_rb, RB_SIZE*n*sizeof(real))); CUDA_SAFE_CALL(cudaMalloc((void**)&gpu_rklow, RKLOW_SIZE*n*sizeof(real))); CUDA_SAFE_CALL(cudaMalloc((void**)&gpu_c, C_SIZE*n*sizeof(real))); CUDA_SAFE_CALL(cudaMalloc((void**)&gpu_a, A_SIZE*n*sizeof(real))); CUDA_SAFE_CALL(cudaMalloc((void**)&gpu_eg, EG_SIZE*n*sizeof(real))); CUDA_SAFE_CALL(cudaMalloc((void**)&gpu_molwt, WDOT_SIZE*sizeof(real))); // Get kernel launch config, assuming n is divisible by block size dim3 thrds(BLOCK_SIZE,1,1); dim3 blks(n / BLOCK_SIZE,1,1); dim3 thrds2(BLOCK_SIZE2,1,1); dim3 blks2(n / BLOCK_SIZE2,1,1); cudaEvent_t start, stop; CUDA_SAFE_CALL(cudaEventCreate(&start)); CUDA_SAFE_CALL(cudaEventCreate(&stop)); // Download of gpu_t, gpu_p, gpu_y, gpu_molwt CUDA_SAFE_CALL(cudaEventRecord(start, 0)); CUDA_SAFE_CALL(cudaMemcpyAsync(gpu_t, host_t, n*sizeof(real), cudaMemcpyHostToDevice, s1)); CUDA_SAFE_CALL(cudaMemcpyAsync(gpu_p, host_p, n*sizeof(real), cudaMemcpyHostToDevice, s2)); CUDA_SAFE_CALL(cudaMemcpyAsync(gpu_y, host_y, Y_SIZE*n*sizeof(real), cudaMemcpyHostToDevice, s2)); CUDA_SAFE_CALL(cudaMemcpyAsync(gpu_molwt,host_molwt,WDOT_SIZE*sizeof(real), cudaMemcpyHostToDevice, s2)); CUDA_SAFE_CALL(cudaEventRecord(stop, 0)); CUDA_SAFE_CALL(cudaEventSynchronize(stop)); // Get elapsed transfer time float iTransferTime = 0.0f; cudaEventElapsedTime(&iTransferTime, start, stop); iTransferTime *= 1.e-3; unsigned int passes = op.getOptionInt("passes"); for (unsigned int i = 0; i < passes; i++) { CUDA_SAFE_CALL(cudaEventRecord(start, 0)); ratt_kernel <<<blks2,thrds2,0,s1>>>(gpu_t, gpu_rf, tconv); rdsmh_kernel <<<blks2,thrds2,0,s1>>>(gpu_t, gpu_eg, tconv); gr_base <<<blks2,thrds2,0,s2>>>(gpu_p, gpu_t, gpu_y, gpu_c, tconv, pconv); ratt2_kernel <<<blks2,thrds2,0,s1>>>(gpu_t, gpu_rf, gpu_rb, gpu_eg, tconv); ratt3_kernel <<<blks2,thrds2,0,s1>>>(gpu_t, gpu_rf, gpu_rb, gpu_eg, tconv); ratt4_kernel <<<blks2,thrds2,0,s1>>>(gpu_t, gpu_rf, gpu_rb, gpu_eg, tconv); ratt5_kernel <<<blks2,thrds2,0,s1>>>(gpu_t, gpu_rf, gpu_rb, gpu_eg, tconv); ratt6_kernel <<<blks2,thrds2,0,s1>>>(gpu_t, gpu_rf, gpu_rb, gpu_eg, tconv); ratt7_kernel <<<blks2,thrds2,0,s1>>>(gpu_t, gpu_rf, gpu_rb, gpu_eg, tconv); ratt8_kernel <<<blks2,thrds2,0,s1>>>(gpu_t, gpu_rf, gpu_rb, gpu_eg, tconv); ratt9_kernel <<<blks2,thrds2,0,s1>>>(gpu_t, gpu_rf, gpu_rb, gpu_eg, tconv); ratt10_kernel <<<blks2,thrds2,0,s1>>>(gpu_t, gpu_rklow, tconv); ratx_kernel <<<blks, thrds, 0,s1>>>(gpu_t, gpu_c, gpu_rf, gpu_rb, gpu_rklow, tconv); ratxb_kernel <<<blks, thrds, 0,s1>>>(gpu_t, gpu_c, gpu_rf, gpu_rb, gpu_rklow, tconv); ratx2_kernel <<<blks2,thrds2,0,s1>>>(gpu_c, gpu_rf, gpu_rb); ratx4_kernel <<<blks2,thrds2,0,s1>>>(gpu_c, gpu_rf, gpu_rb); qssa_kernel <<<blks2,thrds2,0,s1>>>(gpu_rf, gpu_rb, gpu_a); qssab_kernel <<<blks, thrds, 0,s1>>>(gpu_rf, gpu_rb, gpu_a); qssa2_kernel <<<blks2,thrds2,0,s1>>>(gpu_rf, gpu_rb, gpu_a); rdwdot_kernel <<<blks2,thrds2,0,s1>>>(gpu_rf, gpu_rb, gpu_wdot, rateconv, gpu_molwt); rdwdot2_kernel <<<blks2,thrds2,0,s1>>>(gpu_rf, gpu_rb, gpu_wdot, rateconv, gpu_molwt); rdwdot3_kernel <<<blks2,thrds2,0,s1>>>(gpu_rf, gpu_rb, gpu_wdot, rateconv, gpu_molwt); rdwdot6_kernel <<<blks2,thrds2,0,s1>>>(gpu_rf, gpu_rb, gpu_wdot, rateconv, gpu_molwt); rdwdot7_kernel <<<blks2,thrds2,0,s1>>>(gpu_rf, gpu_rb, gpu_wdot, rateconv, gpu_molwt); rdwdot8_kernel <<<blks2,thrds2,0,s1>>>(gpu_rf, gpu_rb, gpu_wdot, rateconv, gpu_molwt); rdwdot9_kernel <<<blks2,thrds2,0,s1>>>(gpu_rf, gpu_rb, gpu_wdot, rateconv, gpu_molwt); rdwdot10_kernel<<<blks2,thrds2,0,s1>>>(gpu_rf, gpu_rb, gpu_wdot, rateconv, gpu_molwt); CUDA_SAFE_CALL(cudaEventRecord(stop, 0)); CUDA_SAFE_CALL(cudaEventSynchronize(stop)); // Get elapsed transfer time float kernelTime = 0.0f; cudaEventElapsedTime(&kernelTime, start, stop); kernelTime *= 1.e-3; // Copy back result CUDA_SAFE_CALL(cudaEventRecord(start, 0)); CUDA_SAFE_CALL(cudaMemcpyAsync(host_wdot, gpu_wdot, WDOT_SIZE * n * sizeof(real), cudaMemcpyDeviceToHost, s1)); CUDA_SAFE_CALL(cudaEventRecord(stop, 0)); CUDA_SAFE_CALL(cudaEventSynchronize(stop)); // Get elapsed transfer time float oTransferTime = 0.0f; cudaEventElapsedTime(&oTransferTime, start, stop); oTransferTime *= 1.e-3; // Approximately 10k flops per grid point (estimated by Ramanan) double gflops = ((n*10000.) / 1.e9); resultDB.AddResult(testName, toString(n) + "_gridPoints", "GFLOPS", gflops / kernelTime); resultDB.AddResult(testName + "_PCIe", toString(n) + "_gridPoints", "GFLOPS", gflops / (kernelTime + iTransferTime + oTransferTime)); resultDB.AddResult(testName + "_Parity", toString(n) + "_gridPoints", "N", (iTransferTime + oTransferTime) / kernelTime); } // // Print out answers to compare with CPU // for (int i=0; i<WDOT_SIZE; i++) { // printf("% 23.16E ", host_wdot[i*n]); // if (i % 3 == 2) // printf("\n"); // } // printf("\n"); // Destroy streams and events CUDA_SAFE_CALL(cudaStreamDestroy(s1)); CUDA_SAFE_CALL(cudaStreamDestroy(s2)); CUDA_SAFE_CALL(cudaEventDestroy(start)); CUDA_SAFE_CALL(cudaEventDestroy(stop)); // Free GPU memory CUDA_SAFE_CALL(cudaFree(gpu_t)); CUDA_SAFE_CALL(cudaFree(gpu_p)); CUDA_SAFE_CALL(cudaFree(gpu_y)); CUDA_SAFE_CALL(cudaFree(gpu_wdot)); CUDA_SAFE_CALL(cudaFree(gpu_rf)); CUDA_SAFE_CALL(cudaFree(gpu_rb)); CUDA_SAFE_CALL(cudaFree(gpu_c)); CUDA_SAFE_CALL(cudaFree(gpu_rklow)); CUDA_SAFE_CALL(cudaFree(gpu_a)); CUDA_SAFE_CALL(cudaFree(gpu_eg)); CUDA_SAFE_CALL(cudaFree(gpu_molwt)); // Free host memory CUDA_SAFE_CALL(cudaFreeHost(host_t)); CUDA_SAFE_CALL(cudaFreeHost(host_p)); CUDA_SAFE_CALL(cudaFreeHost(host_y)); CUDA_SAFE_CALL(cudaFreeHost(host_wdot)); CUDA_SAFE_CALL(cudaFreeHost(host_molwt)); }
the_stack
// SalientRegionDetect.cu // 实现图像显著性区域检测 #include "SalientRegionDetect.h" #include <iostream> #include <stdio.h> #include <cmath> using namespace std; #include "ErrorCode.h" // 宏:DEF_BLOCK_X 和 DEF_BLOCK_Y // 定义了默认的线程块的尺寸。 #define DEF_BLOCK_X 32 #define DEF_BLOCK_Y 8 // 宏:MAX_TEMPLATE // 定义领域模板的最大值。 #ifndef MAX_TEMPLATE #define MAX_TEMPLATE 32 #endif // 宏:GRAY_LEVEL // 定义灰度级范围。 #ifndef GRAY_LEVEL #define GRAY_LEVEL 256 #endif // Device 全局常量:_gaussCoeffDev(高斯模板权重) static __device__ float _gaussCoeffDev[6][170] = { // 3 * 3 模板 { 1.0f / 16.0f, 2.0f / 16.0f, 1.0f / 16.0f, 2.0f / 16.0f, 4.0f / 16.0f, 2.0f / 16.0f, 1.0f / 16.0f, 2.0f / 16.0f, 1.0f / 16.0f }, // 5 * 5 模板 { 1.0f / 352.0f, 5.0f / 352.0f, 8.0f / 352.0f, 5.0f / 352.0f, 1.0f / 352.0f, 5.0f / 352.0f, 21.0f / 352.0f, 34.0f / 352.0f, 21.0f / 352.0f, 5.0f / 352.0f, 8.0f / 352.0f, 34.0f / 352.0f, 56.0f / 352.0f, 34.0f / 352.0f, 8.0f / 352.0f, 5.0f / 352.0f, 21.0f / 352.0f, 34.0f / 352.0f, 21.0f / 352.0f, 5.0f / 352.0f, 1.0f / 352.0f, 5.0f / 352.0f, 8.0f / 352.0f, 5.0f / 352.0f, 1.0f / 352.0f }, // 7 * 7 模板 { 1.0f / 50888.0f, 12.0f / 50888.0f, 55.0f / 50888.0f, 90.0f / 50888.0f, 55.0f / 50888.0f, 12.0f / 50888.0f, 1.0f / 50888.0f, 12.0f / 50888.0f, 148.0f / 50888.0f, 665.0f / 50888.0f, 1097.0f / 50888.0f, 665.0f / 50888.0f, 148.0f / 50888.0f, 12.0f / 50888.0f, 55.0f / 50888.0f, 665.0f / 50888.0f, 2981.0f / 50888.0f, 4915.0f / 50888.0f, 2981.0f / 50888.0f, 665.0f / 50888.0f, 55.0f / 50888.0f, 90.0f / 50888.0f, 1097.0f / 50888.0f, 4915.0f / 50888.0f, 8104.0f / 50888.0f, 4915.0f / 50888.0f, 1097.0f / 50888.0f, 90.0f / 50888.0f, 55.0f / 50888.0f, 665.0f / 50888.0f, 2981.0f / 50888.0f, 4915.0f / 50888.0f, 2981.0f / 50888.0f, 665.0f / 50888.0f, 55.0f / 50888.0f, 12.0f / 50888.0f, 148.0f / 50888.0f, 665.0f / 50888.0f, 1097.0f / 50888.0f, 665.0f / 50888.0f, 148.0f / 50888.0f, 12.0f / 50888.0f, 1.0f / 50888.0f, 12.0f / 50888.0f, 55.0f / 50888.0f, 90.0f / 50888.0f, 55.0f / 50888.0f, 12.0f / 50888.0f, 1.0f / 50888.0f } }; // Kernel 函数:_saliencyMapByDiffValueKer(差值法计算显著性值) // 计算图像中每个像素值的显著性值。以每个像素为中心,计算其与邻域 radius 内所有 // 像素的灰度差值;对所有差值按照降序进行排序,去掉排序中先头的若干值和末尾的若 // 干值(通过设置 highPercent 和 lowPercent),只保留中间部分的排序结果,计算平 // 均值作为显著性值,形成一个初期的 saliency map。然后改变 radius 值,重复上述 // 计算,得到若干个初期 saliency map,将所有的 saliency map 进行累加平均,就得 // 到最终的平均 saliency map,输出到 outimg 中。 static __global__ void // Kernel 函数无返回值。 _saliencyMapByDiffValueKer( ImageCuda inimg, // 输入图像 ImageCuda outimg, // 输出图像 int *radius, // 模板半径 int iteration, // 迭代次数 float hightpercent, // 数组的高位段 float lowpercent // 数组的低位段 ); // Kernel 函数:_saliencyMapByDiffValueKer(差值法计算显著性值) // 计算图像中每个像素值的显著性值。以每个像素为中心,计算其与邻域 radius 内所有 // 像素的灰度差值;不进行数组的筛选,计算所有差值的平均值作为显著性值,形成一个 // 初期的 saliency map。然后改变 radius 大小,重复上述计算,就会得到若干个初期 // saliency map,将所有的 saliency map 进行累加平均,就得到最终的平均显著性图 // saliency map,输出到 outimg 中。 static __global__ void // Kernel 函数无返回值。 _saliencyMapByDiffValueKer( ImageCuda inimg, // 输入图像 ImageCuda outimg, // 输出图像 int *radius, // 模板半径 int iteration // 迭代次数 ); // Kernel 函数:_saliencyMapBySmoothKer(高斯平滑法计算显著值) // 计算图像中每个像素值的显著性值。利用高斯平滑滤波对原始图像进行处理,设置 // smoothWidth 表示平滑尺度大小,将平滑结果与邻域算数几何平均的图像进行整体差分 // ,就得到一个初期的 saliency map。改变 smoothWidth 的值,重复上述计算,得到若 // 干个初期 saliency map,将所有的 saliency map 进行累加平均,就得到最终的平均 // saliency map,输出到 outimg 中。 static __global__ void // Kernel 函数无返回值。 _saliencyMapBySmoothKer( ImageCuda inimg, // 输入图像 ImageCuda outimg, // 输出图像 int *smoothwidth, // 平滑模板 int iteration // 迭代次数 ); // Kernel 函数:_saliencyMapAverageKer(计算平均显著性值) // 将差值法计算显著性值和高斯平滑法计算显著值的结果进行加权平均,weightSM1 是 // 差值法计算显著性值的权重,weightSM2 是高斯平滑法计算显著值的权重。加权结果 // 保存到 sm2img 中。 static __global__ void // Kernel 函数无返回值。 _saliencyMapAverageKer( ImageCuda sm1img, // 输入图像 ImageCuda sm2img, // 输出图像 float weightsm1, // 差值法计算显著性值的权重 float weightsm2 // 高斯平滑法计算显著值的权重 ); // Kernel 函数:_regionSaliencyKer(计算区域累计显著性值) // 计算每个区域所有像素值的累计显著性值。默认区域的最大个数为 256。 static __global__ void // Kernel 函数无返回值。 _regionSaliencyKer( ImageCuda smimg, // 输入图像 ImageCuda connimg, // 输出图像 unsigned int *regionaverg // 区域显著性累计数组 ); // Kernel 函数:_regionAverageKer(计算区域的平均显著性值) // 通过区域累计显著性数组除以区域面积,得到区域平均显著性值。 static __global__ void // Kernel 函数无返回值。 _regionAverageKer( unsigned int *regionaverg, // 区域显著性累计数组 unsigned int *regionarea, // 区域面积数组 unsigned int *flag, // 判断是否满足阈值标识位 int saliencythred // 区域显著性阈值大小 ); // Kernel 函数: _regionShowKer(显示显著性区域) // 根据标识数组,将显著性区域的灰度值设为 255,背景设为 0。 static __global__ void _regionShowKer( ImageCuda inimg, // 输入图像 unsigned int *flaglabel // 标识数组 ); // Kernel 函数:_saliencyMapByDiffValueKer(差值法计算显著性值) static __global__ void _saliencyMapByDiffValueKer( ImageCuda inimg, ImageCuda outimg, int *radius, int iteration) { // dstc 和 dstr 分别表示线程处理的像素点的坐标的 x 和 y 分量 (其中, // c 表示 column, r 表示 row)。 int dstc = blockIdx.x * blockDim.x + threadIdx.x; int dstr = blockIdx.y * blockDim.y + threadIdx.y; // 检查第一个像素点是否越界,如果越界,则不进行处理,一方面节省计算 // 资源,另一方面防止由于段错误导致程序崩溃。 if (dstc >= inimg.imgMeta.width || dstr >= inimg.imgMeta.height) return; // 用来记录当前处理像素的灰度值。 unsigned char *curinptr; curinptr = inimg.imgMeta.imgData + dstc + dstr * inimg.pitchBytes; // 存放邻域像素的灰度值。 unsigned char neighpixel; // 邻域内所有灰度值的差值。 int diffvalue; // 差值累积和。 float tempSum = 0; // 邻域像素个数。 int neighbors; for (int num = 0; num < iteration; num++) { int r = radius[num]; // 计算邻域差值之和。 float diffsum = 0; // 计算邻域像素个数。 neighbors = (2 * r - 1) * (2 * r - 1); // 对当前像素的 (2 * r - 1) * (2 * r - 1)领域,计算出 // 各像素值以及相应的个数。 for (int j = dstr - (r - 1); j <= dstr + (r - 1); j++) { for (int i = dstc - (r - 1); i <= dstc + (r - 1); i++) { // 判断当前像素是否越界。 if (j >= 0 && j < inimg.imgMeta.height && i >= 0 && i < inimg.imgMeta.width) { // 循环计算每个邻域的灰度值差值。 neighpixel = *(inimg.imgMeta.imgData + i + j * inimg.pitchBytes); diffvalue = *curinptr - neighpixel; if (diffvalue < 0) diffvalue = -diffvalue; // 累加邻域内的差值。 diffsum += diffvalue; } } } // 计算一次迭代的显著性值。 diffsum = diffsum / neighbors; // 将多次迭代的结果累加。 tempSum += diffsum; } // 多次迭代结果计算平均显著性值输出到图像中。 unsigned char *curoutptr; curoutptr = outimg.imgMeta.imgData + dstc + dstr * outimg.pitchBytes; *curoutptr = (int)(tempSum + 0.5f) / iteration; } // Kernel 函数:_saliencyMapByDiffValueKer(差值法计算显著性值) static __global__ void _saliencyMapByDiffValueKer( ImageCuda inimg, ImageCuda outimg, int *radius, int iteration, float hightpercent, float lowpercent) { // dstc 和 dstr 分别表示线程处理的像素点的坐标的 x 和 y 分量 (其中, // c 表示 column, r 表示 row)。 int dstc = blockIdx.x * blockDim.x + threadIdx.x; int dstr = blockIdx.y * blockDim.y + threadIdx.y; // 检查第一个像素点是否越界,如果越界,则不进行处理,一方面节省计算 // 资源,另一方面防止由于段错误导致程序崩溃。 if (dstc >= inimg.imgMeta.width || dstr >= inimg.imgMeta.height) return; // 用来记录当前处理像素的灰度值。 unsigned char *curinptr; curinptr = inimg.imgMeta.imgData + dstc + dstr * inimg.pitchBytes; // 存放邻域像素的灰度值。 unsigned char neighpixel; // 邻域内所有灰度值的差值。 int diffvalue; // 差值累积和。 float tempSum = 0; // 邻域像素个数。 int neighbors; for (int num = 0; num < iteration; num++) { int r = radius[num]; // 定义数组,下标代表图像灰度值,数组里存相应的个数 int count[256] = { 0 }; // 计算邻域像素个数。 neighbors = (2 * r - 1) * (2 * r - 1); // 对当前像素的 (2 * r - 1) * (2 * r - 1)领域,计算出 // 各像素值以及相应的个数。 for (int j = dstr - (r - 1); j <= dstr + (r - 1); j++) { for (int i = dstc - (r - 1); i <= dstc + (r - 1); i++) { // 判断当前像素是否越界。 if (j >= 0 && j < inimg.imgMeta.height && i >= 0 && i < inimg.imgMeta.width) { // 循环计算每个邻域的灰度值差值。 neighpixel = *(inimg.imgMeta.imgData + i + j * inimg.pitchBytes); diffvalue = *curinptr - neighpixel; if (diffvalue < 0) diffvalue = -diffvalue; // 当前灰度值差值的计数器加 1。 count[diffvalue]++; } } } // 去掉排序结果中先头的若干值和末尾的若干值。 int hp = (int)(hightpercent * neighbors + 0.5f); int lp = (int)(lowpercent * neighbors + 0.5f); // 定义一些临时变量。 int lpcount = 0, hpcount = 0; // lp 和 hp 完成标识位。 bool lpover = false, hpover = false; // lp 和 hp 结果索引。 int lpindex = 0, hpindex = 0; int lpresidual = 0, hpresidual = 0; // 循环查找数组,找到 lp 和 hp 位置。 for (int lpi = 0; lpi < 256; lpi++) { // 筛选结束。 if (lpover == true && hpover == true) break; // 处理低段数据 lp。 lpcount += count[lpi]; if (lpover == false && lpcount >= lp) { lpindex = lpi + 1; lpover = true; lpresidual = lpi * (lpcount - lp); } // 处理高段数据 hp。 int hpi = 255 - lpi; hpcount += count[hpi]; if (hpover == false && hpcount >= hp) { hpindex = hpi - 1; hpover = true; hpresidual = hpi * (hpcount - hp); } } // 如果 lp 大于 hp,则错误退出。 if (lpindex > hpindex) return; // 计算保留部分的均值。 float sum = lpresidual + hpresidual; for (int j = lpindex; j <= hpindex; j++) { sum += count[j] * j; } // 计算一次迭代的显著性值。 sum = sum / (neighbors - lp - hp); // 将多次迭代的结果累加。 tempSum += sum; } // 多次迭代结果计算平均显著性值输出到图像中。 unsigned char *curoutptr; curoutptr = outimg.imgMeta.imgData + dstc + dstr * outimg.pitchBytes; *curoutptr = (int)(tempSum + 0.5f) / iteration; } // Host 成员方法:saliencyMapByDiffValue(差值法计算显著值) __host__ int SalientRegionDetect::saliencyMapByDiffValue(Image *inimg, Image *outimg) { // 检查输入图像,输出图像是否为空。 if (inimg == NULL || outimg == NULL) return NULL_POINTER; // 检查输入参数是否为空。 if (this->radius == NULL || iterationSM1 == 0) return NULL_POINTER; // 这一段代码进行图像的预处理工作。图像的预处理主要完成在 Device 内存上为 // 输入和输出图像准备内存空间,以便盛放数据。 int errcode; // 局部变量,错误码 // 将输入图像拷贝到 Device 内存中。 errcode = ImageBasicOp::copyToCurrentDevice(inimg); if (errcode != NO_ERROR) return errcode; // 将输出图像拷贝入 Device 内存。 errcode = ImageBasicOp::copyToCurrentDevice(outimg); if (errcode != NO_ERROR) { // 如果输出图像无数据(故上面的拷贝函数会失败),则会创建一个和输入图 // 像的 ROI 子图像尺寸相同的图像。 errcode = ImageBasicOp::makeAtCurrentDevice( outimg, inimg->roiX2 - inimg->roiX1, inimg->roiY2 - inimg->roiY1); // 如果创建图像也操作失败,则说明操作彻底失败,报错退出。 if (errcode != NO_ERROR) return errcode; } // 提取输入图像的 ROI 子图像。 ImageCuda insubimgCud; errcode = ImageBasicOp::roiSubImage(inimg, &insubimgCud); if (errcode != NO_ERROR) return errcode; // 提取输出图像的 ROI 子图像。 ImageCuda outsubimgCud; errcode = ImageBasicOp::roiSubImage(outimg, &outsubimgCud); if (errcode != NO_ERROR) return errcode; // 根据子图像的大小对长,宽进行调整,选择长度小的长,宽进行子图像的统一。 if (insubimgCud.imgMeta.width > outsubimgCud.imgMeta.width) insubimgCud.imgMeta.width = outsubimgCud.imgMeta.width; else outsubimgCud.imgMeta.width = insubimgCud.imgMeta.width; if (insubimgCud.imgMeta.height > outsubimgCud.imgMeta.height) insubimgCud.imgMeta.height = outsubimgCud.imgMeta.height; else outsubimgCud.imgMeta.height = insubimgCud.imgMeta.height; // 计算调用 Kernel 函数的线程块的尺寸和线程块的数量。 dim3 blocksize, gridsize; blocksize.x = DEF_BLOCK_X; blocksize.y = DEF_BLOCK_Y; gridsize.x = (outsubimgCud.imgMeta.width + blocksize.x - 1) / DEF_BLOCK_X; gridsize.y = (outsubimgCud.imgMeta.height + blocksize.y - 1) / DEF_BLOCK_Y; // 在 Device 端申请模板半径数组。 int *devradius; cudaError_t cudaerrcode; cudaerrcode = cudaMalloc((void** )&devradius, iterationSM1 * sizeof (int)); if (cudaerrcode != cudaSuccess) return cudaerrcode; // 将 Host 上的 radius 拷贝到 Device 上的 devradius 中。 cudaerrcode = cudaMemcpy(devradius, radius, iterationSM1 * sizeof (int), cudaMemcpyHostToDevice); if (cudaerrcode != cudaSuccess) return cudaerrcode; if (this->isSelect == true) { // 调用核函数,筛选差值数组,计算每个像素的显著性值。 _saliencyMapByDiffValueKer<<<gridsize, blocksize>>>( insubimgCud, outsubimgCud, devradius, iterationSM1, highPercent, lowPercent); } else { // 调用核函数,不筛选差值数组,计算每个像素的显著性值。 _saliencyMapByDiffValueKer<<<gridsize, blocksize>>>( insubimgCud, outsubimgCud, devradius, iterationSM1); } // 判断核函数是否出错。 if (cudaGetLastError() != cudaSuccess) { cudaFree(devradius); return CUDA_ERROR; } // 释放 Device 端半径空间。 cudaFree(devradius); // 处理完毕,退出。 return NO_ERROR; } // Kernel 函数:_saliencyMapBySmoothKer(高斯平滑法计算显著值) static __global__ void _saliencyMapBySmoothKer( ImageCuda inimg, ImageCuda outimg, int *smoothwidth, int iteration) { // 计算想成对应的输出点的位置,其中 dstc 和 dstr 分别表示线程处理的像素点的 // 坐标的 x 和 y 分量(其中,c 表示 column;r 表示 row)。 int dstc = blockIdx.x * blockDim.x + threadIdx.x; int dstr = blockIdx.y * blockDim.y + threadIdx.y; // 检查第一个像素点是否越界,如果越界,则不进行处理,一方面节省计算 // 资源,另一方面防止由于段错误导致程序崩溃。 if (dstc >= inimg.imgMeta.width || dstr >= inimg.imgMeta.height) return; // 用来保存临时像素点的坐标的 x 和 y 分量。 int dx, dy; // 存放邻域像素的灰度值。 unsigned char neighpixel; float gaussitera = 0.0f, meanitera = 0.0f; for (int num = 0; num < iteration; num++) { int w = smoothwidth[num]; int neighbors = w * w; // 获取高斯模板权重。 float *gaussweight = _gaussCoeffDev[num]; // 算术平均权重。 float meanweight = 1.0f / neighbors; // 统计加权和。 float gausssum = 0.0f, meansum = 0.0f; // 循环处理邻域内每个像素点。 for (int i = 0; i < neighbors; i++) { // 分别计算每一个点的横坐标和纵坐标 dx = dstc + i % w - 1; dy = dstr + i / w - 1; // 先判断当前像素是否越界,如果越界,则跳过,扫描下一个点。 if (dx >= 0 && dx < inimg.imgMeta.width && dy >= 0 && dy < inimg.imgMeta.height) { // 根据 dx 和 dy 获取邻域像素的灰度值。 neighpixel = *(inimg.imgMeta.imgData + dx + dy * inimg.pitchBytes); // 累积一次迭代中高斯平滑的加权和。 gausssum += neighpixel * gaussweight[i]; // 累积一次迭代中算术平均的加权和。 meansum += neighpixel * meanweight; } } // 累积多次迭代的结果。 gaussitera += gausssum; meanitera += meansum; } // 多次迭代结果计算平均显著性值输出到图像中。 unsigned char *curoutptr; curoutptr = outimg.imgMeta.imgData + dstc + dstr * outimg.pitchBytes; *curoutptr = (unsigned char)((gaussitera - meanitera) / iteration + 0.5f); } // Host 成员方法:saliencyMapBySmooth(高斯平滑法计算显著值) __host__ int SalientRegionDetect::saliencyMapBySmooth(Image *inimg, Image *outimg) { // 检查输入图像,输出图像是否为空 if (inimg == NULL || outimg == NULL) return NULL_POINTER; // 检查输入参数 smoothWidth 是否为空。 if (this->smoothWidth == NULL || iterationSM2 == 0) return NULL_POINTER; // 这一段代码进行图像的预处理工作。图像的预处理主要完成在 Device 内存上为 // 输入和输出图像准备内存空间,以便盛放数据。 int errcode; // 局部变量,错误码 // 将输入图像拷贝到 Device 内存中。 errcode = ImageBasicOp::copyToCurrentDevice(inimg); if (errcode != NO_ERROR) return errcode; // 将输出图像拷贝入 Device 内存。 errcode = ImageBasicOp::copyToCurrentDevice(outimg); if (errcode != NO_ERROR) { // 如果输出图像无数据(故上面的拷贝函数会失败),则会创建一个和输入图 // 像的 ROI 子图像尺寸相同的图像。 errcode = ImageBasicOp::makeAtCurrentDevice( outimg, inimg->roiX2 - inimg->roiX1, inimg->roiY2 - inimg->roiY1); // 如果创建图像也操作失败,则说明操作彻底失败,报错退出。 if (errcode != NO_ERROR) return errcode; } // 提取输入图像的 ROI 子图像。 ImageCuda insubimgCud; errcode = ImageBasicOp::roiSubImage(inimg, &insubimgCud); if (errcode != NO_ERROR) return errcode; // 提取输出图像的 ROI 子图像。 ImageCuda outsubimgCud; errcode = ImageBasicOp::roiSubImage(outimg, &outsubimgCud); if (errcode != NO_ERROR) return errcode; // 根据子图像的大小对长,宽进行调整,选择长度小的长,宽进行子图像的统一。 if (insubimgCud.imgMeta.width > outsubimgCud.imgMeta.width) insubimgCud.imgMeta.width = outsubimgCud.imgMeta.width; else outsubimgCud.imgMeta.width = insubimgCud.imgMeta.width; if (insubimgCud.imgMeta.height > outsubimgCud.imgMeta.height) insubimgCud.imgMeta.height = outsubimgCud.imgMeta.height; else outsubimgCud.imgMeta.height = insubimgCud.imgMeta.height; // 计算调用 Kernel 函数的线程块的尺寸和线程块的数量。 dim3 blocksize, gridsize; blocksize.x = DEF_BLOCK_X; blocksize.y = DEF_BLOCK_Y; gridsize.x = (outsubimgCud.imgMeta.width + blocksize.x - 1) / DEF_BLOCK_X; gridsize.y = (outsubimgCud.imgMeta.height + blocksize.y - 1) / DEF_BLOCK_Y; // 在 Device 端分配平滑模板数组。 int *devwidth; cudaError_t cudaerrcode; cudaerrcode = cudaMalloc((void** )&devwidth, iterationSM2 * sizeof (int)); if (cudaerrcode != cudaSuccess) return cudaerrcode; // 将 Host 上的 smoothWidth 拷贝到 Device 上的 devwidth 中。 cudaerrcode = cudaMemcpy(devwidth, smoothWidth, iterationSM2 * sizeof (int), cudaMemcpyHostToDevice); if (cudaerrcode != cudaSuccess) return cudaerrcode; // 调用核函数,计算高斯平滑平均值。 _saliencyMapBySmoothKer<<<gridsize, blocksize>>>( insubimgCud, outsubimgCud, devwidth, iterationSM2); // 判断核函数是否出错。 if (cudaGetLastError() != cudaSuccess) { cudaFree(devwidth); return CUDA_ERROR; } // 释放 Device 端空间。 cudaFree(devwidth); // 处理完毕,退出。 return NO_ERROR; } // Kernel 函数:_saliencyMapAverageKer(计算平均显著性值) static __global__ void _saliencyMapAverageKer( ImageCuda inimg, ImageCuda outimg, float w1, float w2) { // 计算想成对应的输出点的位置,其中 c 和 r 分别表示线程处理的像素点的 // 坐标的 x 和 y 分量(其中,c 表示 column;r 表示 row)。由于我们采用了并 // 行度缩减的策略,令一个线程处理 4 个输出像素,这四个像素位于统一列的相邻 // 4 行上,因此,对于 r 需要进行乘 4 计算。 int c = blockIdx.x * blockDim.x + threadIdx.x; int r = (blockIdx.y * blockDim.y + threadIdx.y) * 4; // 检查第一个像素点是否越界,如果越界,则不进行处理,一方面节省计算资源,一 // 方面防止由于段错误导致的程序崩溃。 if (c >= inimg.imgMeta.width || r >= inimg.imgMeta.height) return; // 计算第一个输入坐标点对应的图像数据数组下标。 int inidx = r * inimg.pitchBytes + c; // 计算第一个输出坐标点对应的图像数据数组下标。 int outidx = r * outimg.pitchBytes + c; // 读取第一个输入坐标点对应的像素值。 unsigned char intemp; intemp = inimg.imgMeta.imgData[inidx]; // 读取第一个输出坐标点对应的像素值。 unsigned char outtemp; outtemp = outimg.imgMeta.imgData[outidx]; // 一个线程处理四个像素点. // 将差值法计算显著性值和高斯平滑法计算显著值的结果进行加权平均,weightSM1 // 是差值法计算显著性值的权重,weightSM2 是高斯平滑法计算显著值的权重。 // 线程中处理的第一个点。 outimg.imgMeta.imgData[outidx] = (unsigned char)(intemp * w1 + outtemp * w2 + 0.5f); // 处理剩下的三个像素点。 for (int i =0; i < 3; i++) { // 这三个像素点,每个像素点都在前一个的下一行,而 x 分量保持不变。因 // 此,需要检查这个像素点是否越界。检查只针对 y 分量即可,x 分量在各点 // 之间没有变化,故不用检查。 if (++r >= outimg.imgMeta.height) return; // 根据上一个像素点,计算当前像素点的对应的输出图像的下标。由于只有 y // 分量增加 1,所以下标只需要加上一个 pitch 即可,不需要在进行乘法计 // 算。 inidx += inimg.pitchBytes; outidx += outimg.pitchBytes; intemp = inimg.imgMeta.imgData[inidx]; outtemp = outimg.imgMeta.imgData[outidx]; // 将显著性值输出到输出图像中。 outimg.imgMeta.imgData[outidx] = (unsigned char)(intemp * w1 + outtemp * w2 + 0.5f); } } // Kernel 函数:_regionSaliencyKer(计算区域累计显著性值) static __global__ void _regionSaliencyKer( ImageCuda smimg, ImageCuda connimg, unsigned int *regionsacy) { // 计算想成对应的输出点的位置,其中 c 和 r 分别表示线程处理的像素点的 // 坐标的 x 和 y 分量(其中,c 表示 column;r 表示 row)。由于我们采用了并 // 行度缩减的策略,令一个线程处理 4 个输出像素,这四个像素位于统一列的相邻 // 4 行上,因此,对于 r 需要进行乘 4 计算。 int c = blockIdx.x * blockDim.x + threadIdx.x; int r = (blockIdx.y * blockDim.y + threadIdx.y) * 4; // 检查第一个像素点是否越界,如果越界,则不进行处理,一方面节省计算资源,一 // 方面防止由于段错误导致的程序崩溃。 if (c >= smimg.imgMeta.width || r >= smimg.imgMeta.height) return; // 计算第一个输入坐标点对应的图像数据数组下标。 int smidx = r * smimg.pitchBytes + c; // 计算第一个输出坐标点对应的图像数据数组下标。 int connidx = r * connimg.pitchBytes + c; // 读取第一个输入坐标点对应的像素值。 unsigned char smtemp; smtemp = smimg.imgMeta.imgData[smidx]; unsigned char conntemp; conntemp = connimg.imgMeta.imgData[connidx]; // 一个线程处理四个像素点。 // 计算区域的累计显著性值。 // 线程中处理的第一个点。 regionsacy[conntemp] += smtemp ; // 处理剩下的三个像素点。 for (int i = 0; i < 3; i++) { // 这三个像素点,每个像素点都在前一个的下一行,而 x 分量保持不变。因 // 此,需要检查这个像素点是否越界。检查只针对 y 分量即可,x 分量在各点 // 之间没有变化,故不用检查。 if (++r >= connimg.imgMeta.height) return; // 根据上一个像素点,计算当前像素点的对应的输出图像的下标。由于只有 y // 分量增加 1,所以下标只需要加上一个 pitch 即可,不需要在进行乘法计 // 算。 smidx += smimg.pitchBytes; connidx += connimg.pitchBytes; smtemp = smimg.imgMeta.imgData[smidx]; conntemp = connimg.imgMeta.imgData[connidx]; // 如果输入图像的该像素值等于 label, 则将其拷贝到输出图像中; // 否则将输出图像中该位置清 0。 regionsacy[conntemp] += smtemp ; } } // Kernel 函数:_regionAverageKer(计算区域的平均显著性值) static __global__ void _regionAverageKer( unsigned int *regionaverg, unsigned int *regionarea, unsigned int *flag, int saliencythred) { // 读取线程号。 int tid = threadIdx.x; // 通过区域累计显著性数组除以区域面积,得到区域平均显著性值。 if (regionarea[tid] > 0) regionaverg[tid] = ((float)regionaverg[tid] + 0.5f) / regionarea[tid]; if (regionaverg[tid] > saliencythred) flag[tid] = 1; else flag[tid] = 0; } // Kernel 函数: _regionShowKer(显示显著性区域) static __global__ void _regionShowKer(ImageCuda inimg, unsigned int *flaglabel) { // 计算想成对应的输出点的位置,其中 c 和 r 分别表示线程处理的像素点的 // 坐标的 x 和 y 分量(其中,c 表示 column;r 表示 row)。由于我们采用了并 // 行度缩减的策略,令一个线程处理 4 个输出像素,这四个像素位于统一列的相邻 // 4 行上,因此,对于 r 需要进行乘 4 计算。 int c = blockIdx.x * blockDim.x + threadIdx.x; int r = (blockIdx.y * blockDim.y + threadIdx.y) * 4; // 检查第一个像素点是否越界,如果越界,则不进行处理,一方面节省计算资源,一 // 方面防止由于段错误导致的程序崩溃。 if (c >= inimg.imgMeta.width || r >= inimg.imgMeta.height) return; // 计算第一个输入坐标点对应的图像数据数组下标。 int inidx = r * inimg.pitchBytes + c; // 一个线程处理四个像素点. // 如果输入图像的该像素值对应的 flag 等于 0, 则将像素值设为 0; // 否则设为 255。 // 线程中处理的第一个点。 if (flaglabel[inimg.imgMeta.imgData[inidx]] == 0) inimg.imgMeta.imgData[inidx] = 0; else inimg.imgMeta.imgData[inidx] = 255; // 处理剩下的三个像素点。 for (int i =0; i < 3; i++) { // 这三个像素点,每个像素点都在前一个的下一行,而 x 分量保持不变。因 // 此,需要检查这个像素点是否越界。检查只针对 y 分量即可,x 分量在各点 // 之间没有变化,故不用检查。 if (++r >= inimg.imgMeta.height) return; // 根据上一个像素点,计算当前像素点的对应的输出图像的下标。由于只有 y // 分量增加 1,所以下标只需要加上一个 pitch 即可,不需要在进行乘法计 // 算。 inidx += inimg.pitchBytes; // 如果输入图像的该像素值对应的 flag 等于 0, 则将像素值设为 0; // 否则设为 255。 if (flaglabel[inimg.imgMeta.imgData[inidx]] == 0) inimg.imgMeta.imgData[inidx] = 0; else inimg.imgMeta.imgData[inidx] = 255; } } // Host 成员方法:saliencyRegionDetect(显著性区域检测) __host__ int SalientRegionDetect::saliencyRegionDetect(Image *inimg, Image *outimg) { // 检查输入图像,输出图像是否为空 if (inimg == NULL || outimg == NULL) return NULL_POINTER; // 检查输入参数 smoothWidth 是否为空。 if (this->smoothWidth == NULL || this->radius == NULL) return NULL_POINTER; // 这一段代码进行图像的预处理工作。图像的预处理主要完成在 Device 内存上为输 // 入和输出图像准备内存空间,以便盛放数据。 int errcode; // 局部变量,错误码。 // 将输入图像拷贝到 Device 内存中。 errcode = ImageBasicOp::copyToCurrentDevice(inimg); if (errcode != NO_ERROR) return errcode; // 将输出图像拷贝入 Device 内存。 errcode = ImageBasicOp::copyToCurrentDevice(outimg); if (errcode != NO_ERROR) { // 如果输出图像无数据(故上面的拷贝函数会失败),则会创建一个和输入图 // 像的 ROI 子图像尺寸相同的图像。 errcode = ImageBasicOp::makeAtCurrentDevice( outimg, inimg->roiX2 - inimg->roiX1, inimg->roiY2 - inimg->roiY1); // 如果创建图像也操作失败,则说明操作彻底失败,报错退出。 if (errcode != NO_ERROR) return errcode; } // 提取输入图像的 ROI 子图像。 ImageCuda insubimgCud; errcode = ImageBasicOp::roiSubImage(inimg, &insubimgCud); if (errcode != NO_ERROR) return errcode; // 提取输出图像的 ROI 子图像。 ImageCuda outsubimgCud; errcode = ImageBasicOp::roiSubImage(outimg, &outsubimgCud); if (errcode != NO_ERROR) return errcode; // 根据子图像的大小对长,宽进行调整,选择长度小的长,宽进行子图像的统一。 if (insubimgCud.imgMeta.width > outsubimgCud.imgMeta.width) insubimgCud.imgMeta.width = outsubimgCud.imgMeta.width; else outsubimgCud.imgMeta.width = insubimgCud.imgMeta.width; if (insubimgCud.imgMeta.height > outsubimgCud.imgMeta.height) insubimgCud.imgMeta.height = outsubimgCud.imgMeta.height; else outsubimgCud.imgMeta.height = insubimgCud.imgMeta.height; // 申请 SM1 和 SM2 中间图像。 Image *sm1, *sm2; ImageBasicOp::newImage(&sm1); ImageBasicOp::newImage(&sm2); ImageBasicOp::makeAtCurrentDevice(sm1, inimg->width, inimg->height); ImageBasicOp::makeAtCurrentDevice(sm2, inimg->width, inimg->height); // 差值法计算显著值。 errcode = saliencyMapByDiffValue(inimg, sm1); if (errcode != NO_ERROR) return errcode; // 高斯平滑法计算显著值。 errcode = saliencyMapBySmooth(inimg, sm2); if (errcode != NO_ERROR) return errcode; // 提取 sm1 图像的 ROI 子图像。 ImageCuda sm1subimgCud; errcode = ImageBasicOp::roiSubImage(sm1, &sm1subimgCud); if (errcode != NO_ERROR) return errcode; // 提取 sm2 图像的 ROI 子图像。 ImageCuda sm2subimgCud; errcode = ImageBasicOp::roiSubImage(sm2, &sm2subimgCud); if (errcode != NO_ERROR) return errcode; // 根据子图像的大小对长,宽进行调整,选择长度小的长,宽进行子图像的统一。 if (sm1subimgCud.imgMeta.width > sm2subimgCud.imgMeta.width) sm1subimgCud.imgMeta.width = sm2subimgCud.imgMeta.width; else sm2subimgCud.imgMeta.width = sm1subimgCud.imgMeta.width; if (sm1subimgCud.imgMeta.height > sm2subimgCud.imgMeta.height) sm1subimgCud.imgMeta.height = sm2subimgCud.imgMeta.height; else sm2subimgCud.imgMeta.height = sm1subimgCud.imgMeta.height; // 计算调用 Kernel 函数的线程块的尺寸和线程块的数量。 dim3 blocksize, gridsize; blocksize.x = DEF_BLOCK_X; blocksize.y = DEF_BLOCK_Y; gridsize.x = (sm2subimgCud.imgMeta.width + blocksize.x - 1) / blocksize.x; gridsize.y = (sm2subimgCud.imgMeta.height + blocksize.y * 4 - 1) / (blocksize.y * 4); // 合并两种计算显著性值的算法,加权求和,结果保存在 sm2 中。 _saliencyMapAverageKer<<<gridsize, blocksize>>>(sm1subimgCud, sm2subimgCud, weightSM1, weightSM2); // 判断核函数是否出错。 if (cudaGetLastError() != cudaSuccess) { ImageBasicOp::deleteImage(sm1); ImageBasicOp::deleteImage(sm2); return CUDA_ERROR; } // 利用连通区域分割 saliency map。 ConnectRegion cr; cr.setThreshold(this->threshold); cr.setMinArea(this->minRegion); cr.setMaxArea(this->maxRegion); cr.connectRegion(sm2, outimg); // 计算每个区域的平均显著性值。 // 在 Device 上分配临时空间。一次申请所有空间,然后通过偏移索引各个数组。 cudaError_t cudaerrcode; unsigned int *alldevicedata; unsigned int *devhistogram, *devregionAverg, *devflag; cudaerrcode = cudaMalloc((void** )&alldevicedata, 3 * GRAY_LEVEL * sizeof (unsigned int)); if (cudaerrcode != cudaSuccess) return cudaerrcode; // 初始化 Device 上的内存空间。 cudaerrcode = cudaMemset(alldevicedata, 0, 3 * GRAY_LEVEL * sizeof (unsigned int)); if (cudaerrcode != cudaSuccess) return cudaerrcode; // 通过偏移读取 devhistogram 内存空间。 devhistogram = alldevicedata; // 通过直方图计算区域面积,保存到 devhistogram 中。 Histogram hist; errcode = hist.histogram(outimg, devhistogram, 0); if (errcode != NO_ERROR) return errcode; // 计算每个区域的累积显著性值。 devregionAverg = alldevicedata + GRAY_LEVEL; _regionSaliencyKer<<<gridsize, blocksize>>>(sm2subimgCud, outsubimgCud, devregionAverg); // 判断核函数是否出错。 if (cudaGetLastError() != cudaSuccess) { ImageBasicOp::deleteImage(sm1); ImageBasicOp::deleteImage(sm2); cudaFree(alldevicedata); return CUDA_ERROR; } devflag = devregionAverg + GRAY_LEVEL; // 计算每个区域的平均显著性值。 _regionAverageKer<<<1, GRAY_LEVEL>>>(devregionAverg, devhistogram, devflag, saliencyThred); // 判断核函数是否出错。 if (cudaGetLastError() != cudaSuccess) { ImageBasicOp::deleteImage(sm1); ImageBasicOp::deleteImage(sm2); cudaFree(alldevicedata); return CUDA_ERROR; } // 将显著性区域设为白色,背景设为黑色。 _regionShowKer<<<gridsize, blocksize>>>(outsubimgCud, devflag); // 判断核函数是否出错。 if (cudaGetLastError() != cudaSuccess) { ImageBasicOp::deleteImage(sm1); ImageBasicOp::deleteImage(sm2); cudaFree(alldevicedata); return CUDA_ERROR; } // 释放中间图像。 ImageBasicOp::deleteImage(sm1); ImageBasicOp::deleteImage(sm2); // 释放显存上的临时空间 cudaFree(alldevicedata); return NO_ERROR; }
the_stack
#include <map> #define kBsdfSamples 1.0f #define kProbeSamples 1.0f #define kRayEpsilon 0.001f #define USE_LIGHT_SAMPLING 1 namespace { struct GPUScene { Primitive* primitives; int numPrimitives; Primitive* lights; int numLights; Sky sky; BVH bvh; }; // create a texture object from memory and store it in a 64-bit pointer void CreateIntTexture(int** deviceBuffer, const int* hostBuffer, int sizeInBytes) { int* buffer; cudaMalloc(&buffer, sizeInBytes); cudaMemcpy(buffer, hostBuffer, sizeInBytes, cudaMemcpyHostToDevice); #if USE_TEXTURES // create texture object cudaResourceDesc resDesc; memset(&resDesc, 0, sizeof(resDesc)); resDesc.resType = cudaResourceTypeLinear; resDesc.res.linear.devPtr = (void*)buffer; resDesc.res.linear.desc.f = cudaChannelFormatKindSigned; resDesc.res.linear.desc.x = 32; // bits per channel resDesc.res.linear.sizeInBytes = sizeInBytes; cudaTextureDesc texDesc; memset(&texDesc, 0, sizeof(texDesc)); texDesc.readMode = cudaReadModeElementType; cudaTextureObject_t tex; cudaCreateTextureObject(&tex, &resDesc, &texDesc, NULL); // cast to pointer *deviceBuffer = (int*)tex; #else *deviceBuffer = buffer; #endif } // create a texture object from memory and store it in a 64-bit pointer void CreateFloatTexture(float** deviceBuffer, const float* hostBuffer, int sizeInBytes) { float* buffer; cudaMalloc(&buffer, sizeInBytes); cudaMemcpy(buffer, hostBuffer, sizeInBytes, cudaMemcpyHostToDevice); #if USE_TEXTURES // create texture object cudaResourceDesc resDesc; memset(&resDesc, 0, sizeof(resDesc)); resDesc.resType = cudaResourceTypeLinear; resDesc.res.linear.devPtr = (void*)buffer; resDesc.res.linear.desc.f = cudaChannelFormatKindFloat; resDesc.res.linear.desc.x = 32; // bits per channel resDesc.res.linear.sizeInBytes = sizeInBytes; cudaTextureDesc texDesc; memset(&texDesc, 0, sizeof(texDesc)); texDesc.readMode = cudaReadModeElementType; cudaTextureObject_t tex; cudaCreateTextureObject(&tex, &resDesc, &texDesc, NULL); // cast to pointer *deviceBuffer = (float*)tex; #else *deviceBuffer = buffer; #endif } // create a texture object from memory and store it in a 64-bit pointer void CreateVec4Texture(Vec4** deviceBuffer, const Vec4* hostBuffer, int sizeInBytes) { Vec4* buffer; cudaMalloc(&buffer, sizeInBytes); cudaMemcpy(buffer, hostBuffer, sizeInBytes, cudaMemcpyHostToDevice); #if USE_TEXTURES // create texture object cudaResourceDesc resDesc; memset(&resDesc, 0, sizeof(resDesc)); resDesc.resType = cudaResourceTypeLinear; resDesc.res.linear.devPtr = (void*)buffer; resDesc.res.linear.desc.f = cudaChannelFormatKindFloat; resDesc.res.linear.desc.x = 32; // bits per channel resDesc.res.linear.desc.y = 32; // bits per channel resDesc.res.linear.desc.z = 32; // bits per channel resDesc.res.linear.desc.w = 32; // bits per channel resDesc.res.linear.sizeInBytes = sizeInBytes; cudaTextureDesc texDesc; memset(&texDesc, 0, sizeof(texDesc)); texDesc.readMode = cudaReadModeElementType; cudaTextureObject_t tex; cudaCreateTextureObject(&tex, &resDesc, &texDesc, NULL); // cast to pointer *deviceBuffer = (Vec4*)tex; #else *deviceBuffer = buffer; #endif } void DestroyTexture(const void* texture) { cudaFree((void*)texture); #if USE_TEXTURES #error todo #endif } MeshGeometry CreateGPUMesh(const MeshGeometry& hostMesh) { const int numVertices = hostMesh.numVertices; const int numIndices = hostMesh.numIndices; const int numNodes = hostMesh.numNodes; MeshGeometry gpuMesh; #if USE_TEXTURES // expand positions out to vec4 std::vector<Vec4> positions; std::vector<Vec4> normals; for (int i=0; i < numVertices; ++i) { positions.push_back(Vec4(hostMesh.positions[i], 1.0f)); normals.push_back(Vec4(hostMesh.normals[i], 0.0f)); } CreateVec4Texture((Vec4**)&gpuMesh.positions, (Vec4*)&positions[0], sizeof(Vec4)*numVertices); CreateVec4Texture((Vec4**)&gpuMesh.normals, (Vec4*)&normals[0], sizeof(Vec4)*numVertices); #else CreateFloatTexture((float**)&gpuMesh.positions, (float*)&hostMesh.positions[0], sizeof(Vec3)*numVertices); CreateFloatTexture((float**)&gpuMesh.normals, (float*)&hostMesh.normals[0], sizeof(Vec3)*numVertices); #endif CreateIntTexture((int**)&gpuMesh.indices, (int*)&hostMesh.indices[0], sizeof(int)*numIndices); CreateVec4Texture((Vec4**)&gpuMesh.nodes, (Vec4*)&hostMesh.nodes[0], sizeof(BVHNode)*numNodes); cudaMalloc((float**)&gpuMesh.cdf, sizeof(float)*numIndices/3); cudaMemcpy((float*)gpuMesh.cdf, &hostMesh.cdf[0], sizeof(float)*numIndices/3, cudaMemcpyHostToDevice); gpuMesh.numIndices = numIndices; gpuMesh.numVertices = numVertices; gpuMesh.numNodes = numNodes; gpuMesh.area = hostMesh.area; return gpuMesh; } void DestroyGPUMesh(const MeshGeometry& gpuMesh) { DestroyTexture(gpuMesh.positions); DestroyTexture(gpuMesh.normals); DestroyTexture(gpuMesh.indices); DestroyTexture(gpuMesh.nodes); cudaFree((void*)gpuMesh.cdf); } Texture CreateGPUTexture(const Texture& tex) { const int numTexels = tex.width*tex.height*tex.depth; Texture gpuTex = tex; cudaMalloc((void**)&gpuTex.data, sizeof(float)*numTexels); cudaMemcpy(gpuTex.data, tex.data, sizeof(float)*numTexels, cudaMemcpyHostToDevice); return gpuTex; } Sky CreateGPUSky(const Sky& sky) { Sky gpuSky = sky; // copy probe if (sky.probe.valid) { const int numPixels = sky.probe.width*sky.probe.height; // copy pixel data CreateVec4Texture((Vec4**)&gpuSky.probe.data, sky.probe.data, numPixels*sizeof(float)*4); // copy cdf tables CreateFloatTexture((float**)&gpuSky.probe.cdfValuesX, sky.probe.cdfValuesX, numPixels*sizeof(float)); CreateFloatTexture((float**)&gpuSky.probe.pdfValuesX, sky.probe.pdfValuesX, numPixels*sizeof(float)); CreateFloatTexture((float**)&gpuSky.probe.cdfValuesY, sky.probe.cdfValuesY, sky.probe.height*sizeof(float)); CreateFloatTexture((float**)&gpuSky.probe.pdfValuesY, sky.probe.pdfValuesY, sky.probe.height*sizeof(float)); } return gpuSky; } void DestroyGPUSky(const Sky& gpuSky) { if (gpuSky.probe.valid) { // todo } } } // anonymous #if 1 // a combined intersection routine that shares the traversal stack for the scene BVH and triangle mesh BVH inline __device__ bool Trace(const GPUScene& scene, const Vec3& rayOrigin, const Vec3& rayDir, float rayTime, float& outT, Vec3& outNormal, int& outPrimitive) { int stack[32]; stack[0] = 0; unsigned int count = 1; Vec3 dir, rcpDir; Vec3 origin; rcpDir.x = 1.0f/rayDir.x; rcpDir.y = 1.0f/rayDir.y; rcpDir.z = 1.0f/rayDir.z; origin = rayOrigin; dir = rayDir; const BVHNode* RESTRICT root = scene.bvh.nodes; MeshGeometry mesh; int primitiveIndex = -1; float closestT = FLT_MAX; //float closestU; float closestV; float closestW; Vec3 closestNormal; int closestPrimitive = -1; int closestTri; while(count) { const int nodeIndex = stack[--count]; if (nodeIndex < 0) { // reset to scene bvh dir and address rcpDir.x = 1.0f/rayDir.x; rcpDir.y = 1.0f/rayDir.y; rcpDir.z = 1.0f/rayDir.z; origin = rayOrigin; dir = rayDir; root = scene.bvh.nodes; primitiveIndex = -1; continue; } BVHNode node = fetchNode(root, nodeIndex); int leftIndex = node.leftIndex; int rightIndex = node.rightIndex; if (node.leaf) { if (primitiveIndex < 0) { const Primitive& p = scene.primitives[leftIndex]; Transform transform = InterpolateTransform(p.startTransform, p.endTransform, rayTime); switch (p.type) { case eSphere: { float minT, maxT; Vec3 n; bool hit = IntersectRaySphere(transform.p, p.sphere.radius*transform.s, origin, dir, minT, maxT, &n); if (hit && minT < closestT) { closestT = minT; closestNormal = n; closestPrimitive = leftIndex; } break; } case ePlane: { float t; bool hit = IntersectRayPlane(origin, dir, (const Vec4&)p.plane, t); if (hit && t < closestT) { closestT = t; closestNormal = (const Vec3&)p.plane; closestPrimitive = leftIndex; } break; } case eMesh: { // push a back-tracking marker in the stack stack[count++] = -1; // push root of the mesh bvh stack[count++] = 0; // transform ray to primitive local space origin = InverseTransformPoint(transform, rayOrigin); dir = InverseTransformVector(transform, rayDir); rcpDir.x = 1.0f/dir.x; rcpDir.y = 1.0f/dir.y; rcpDir.z = 1.0f/dir.z; // set bvh and mesh sources root = p.mesh.nodes; mesh = p.mesh; primitiveIndex = leftIndex; break; } }; } else { // mesh mode int i0 = fetchInt(mesh.indices, leftIndex*3+0); int i1 = fetchInt(mesh.indices, leftIndex*3+1); int i2 = fetchInt(mesh.indices, leftIndex*3+2); const Vec3 a = fetchVec3(mesh.positions, i0); const Vec3 b = fetchVec3(mesh.positions, i1); const Vec3 c = fetchVec3(mesh.positions, i2); float t, u, v, w; float sign; Vec3 n; //if (IntersectRayTri(rayOrigin, rayDir, a, b, c, t, u, v, w, &n)) if (IntersectRayTriTwoSided(origin, dir, a, b, c, t, u, v, w, sign, &n)) { if (t > 0.0f && t < closestT) { closestT = t; //closestU = u; closestV = v; closestW = w; closestTri = leftIndex; closestNormal = n*sign; closestPrimitive = primitiveIndex; } } } } else { // check children BVHNode left = fetchNode(root, leftIndex); BVHNode right = fetchNode(root, rightIndex); float tLeft; bool hitLeft = IntersectRayAABBFast(origin, rcpDir, left.bounds.lower, left.bounds.upper, tLeft) && tLeft < closestT; float tRight; bool hitRight = IntersectRayAABBFast(origin, rcpDir, right.bounds.lower, right.bounds.upper, tRight) && tRight < closestT; // traverse closest first if (hitLeft && hitRight && (tLeft < tRight)) { Swap(leftIndex, rightIndex); } if (hitLeft) stack[count++] = leftIndex; if (hitRight) stack[count++] = rightIndex; } } if (closestPrimitive >= 0) { const Primitive& p = scene.primitives[closestPrimitive]; if (p.type == eMesh) { Transform transform = InterpolateTransform(p.startTransform, p.endTransform, rayTime); // interpolate vertex normals int i0 = fetchInt(p.mesh.indices, closestTri*3+0); int i1 = fetchInt(p.mesh.indices, closestTri*3+1); int i2 = fetchInt(p.mesh.indices, closestTri*3+2); const Vec3 n1 = fetchVec3(p.mesh.normals, i0); const Vec3 n2 = fetchVec3(p.mesh.normals, i1); const Vec3 n3 = fetchVec3(p.mesh.normals, i2); Vec3 smoothNormal = (1.0f-closestV-closestW)*n1 + closestV*n2 + closestW*n3; // ensure smooth normal lies on the same side of the geometric normal if (Dot(smoothNormal, closestNormal) < 0.0f) smoothNormal *= -1.0f; closestNormal = SafeNormalize(TransformVector(transform, smoothNormal), closestNormal); } outT = closestT; outNormal = FaceForward(closestNormal, -rayDir); outPrimitive = &p-scene.primitives; return true; } else { // no hit return false; } } #else // trace a ray against the scene returning the closest intersection inline __device__ bool Trace(const GPUScene& scene, const Vec3& rayOrigin, const Vec3& rayDir, float rayTime, float& outT, Vec3& outNormal, int& outPrimitive) { struct Callback { float minT; Vec3 closestNormal; const Primitive* closestPrimitive; const Ray& ray; const GPUScene& scene; CUDA_CALLABLE inline Callback(const GPUScene& s, const Ray& r) : minT(REAL_MAX), closestPrimitive(NULL), ray(r), scene(s) { } CUDA_CALLABLE inline void operator()(int index) { float t; Vec3 n, ns; const Primitive& primitive = scene.primitives[index]; if (PrimitiveIntersect(primitive, ray, t, &n)) { if (t < minT && t > 0.0f) { minT = t; closestPrimitive = &primitive; closestNormal = n; } } } }; Callback callback(scene, ray); QueryBVH(callback, scene.bvh.nodes, ray.origin, ray.dir); outT = callback.minT; outNormal = FaceForward(callback.closestNormal, -ray.dir); outPrimitive = callback.closestPrimitive-scene.primitives; return callback.closestPrimitive != NULL; /* // reference trace method, no scene BVH float minT = REAL_MAX; const Primitive* closestPrimitive = NULL; Vec3 closestNormal(0.0f); for (int i=0; i < scene.numPrimitives; ++i) { const Primitive& primitive = scene.primitives[i]; float t; Vec3 n; if (PrimitiveIntersect(primitive, Ray(rayOrigin, rayDir, rayTime), t, &n)) { if (t < minT && t > 0.0f) { minT = t; closestPrimitive = &primitive; closestNormal = n; } } } outT = minT; outNormal = FaceForward(closestNormal, -rayDir); outPrimitive = closestPrimitive-scene.primitives;; return closestPrimitive != NULL; */ } #endif __device__ inline float SampleTexture(const Texture& map, int i, int j, int k) { int x = int(Abs(i))%map.width; int y = int(Abs(j))%map.height; int z = int(Abs(k))%map.depth; return map.data[z*map.width*map.height + y*map.width + x]; } __device__ inline float LinearInterp(const Texture& map, const Vec3& pos) { int i = floorf(pos.x*map.width); int j = floorf(pos.y*map.height); int k = floorf(pos.z*map.depth); // trilinear interpolation float tx = pos.x*map.width-i; float ty = pos.y*map.height-j; float tz = pos.z*map.depth-k; float a = Lerp(SampleTexture(map, i, j, k), SampleTexture(map, i, j, k+1), tz); float b = Lerp(SampleTexture(map, i+1, j, k), SampleTexture(map, i+1, j, k+1), tz); float c = Lerp(SampleTexture(map, i, j+1, k), SampleTexture(map, i, j+1, k+1), tz); float d = Lerp(SampleTexture(map, i+1, j+1, k), SampleTexture(map, i+1, j+1, k+1), tz); float e = Lerp(a, b, tx); float f = Lerp(c, d, tx); float g = Lerp(e, f, ty); return g; } __device__ inline Vec3 EvaluateBumpNormal(const Vec3& surfaceNormal, const Vec3& surfacePos, const Texture& bumpMap, const Vec3& bumpTile, float bumpStrength, Random& rand) { Vec3 u, v; BasisFromVector(surfaceNormal, &u, &v); float eps = 0.01f; Vec3 dpdu = u + bumpStrength*surfaceNormal*(LinearInterp(bumpMap, bumpTile*(surfacePos)+u*eps) - LinearInterp(bumpMap, bumpTile*surfacePos))/eps; Vec3 dpdv = v + bumpStrength*surfaceNormal*(LinearInterp(bumpMap, bumpTile*(surfacePos)+v*eps) - LinearInterp(bumpMap, bumpTile*surfacePos))/eps; return SafeNormalize(Cross(dpdu, dpdv), surfaceNormal); } __device__ inline Vec3 SampleLights(const GPUScene& scene, const Primitive& surfacePrimitive, float etaI, float etaO, const Vec3& surfacePos, const Vec3& surfaceNormal, const Vec3& shadingNormal, const Vec3& wo, float time, Random& rand) { Vec3 sum(0.0f); if (scene.sky.probe.valid) { for (int i=0; i < kProbeSamples; ++i) { Vec3 skyColor; float skyPdf; Vec3 wi; ProbeSample(scene.sky.probe, wi, skyColor, skyPdf, rand); // check if occluded float t; Vec3 n; int hit; if (Trace(scene, surfacePos + FaceForward(surfaceNormal, wi)*kRayEpsilon, wi, time, t, n, hit) == false) { float bsdfPdf = BSDFPdf(surfacePrimitive.material, etaI, etaO, surfacePos, surfaceNormal, wo, wi); Vec3 f = BSDFEval(surfacePrimitive.material, etaI, etaO, surfacePos, surfaceNormal, wo, wi); if (bsdfPdf > 0.0f) { int N = kProbeSamples+kBsdfSamples; float cbsdf = kBsdfSamples/N; float csky = float(kProbeSamples)/N; float weight = csky*skyPdf/(cbsdf*bsdfPdf + csky*skyPdf); Validate(weight); if (weight > 0.0f) sum += weight*skyColor*f*Abs(Dot(wi, surfaceNormal))/skyPdf; } } } if (kProbeSamples > 0) sum /= float(kProbeSamples); } for (int i=0; i < scene.numLights; ++i) { // assume all lights are area lights for now const Primitive& lightPrimitive = scene.lights[i]; Vec3 L(0.0f); int numSamples = lightPrimitive.lightSamples; if (numSamples == 0) continue; for (int s=0; s < numSamples; ++s) { // sample light source Vec3 lightPos; Vec3 lightNormal; PrimitiveSample(lightPrimitive, time, lightPos, lightNormal, rand); Vec3 wi = lightPos-surfacePos; float dSq = LengthSq(wi); wi /= sqrtf(dSq); // check visibility float t; Vec3 n; int hit; if (Trace(scene, surfacePos + FaceForward(surfaceNormal, wi)*kRayEpsilon, wi, time, t, n, hit)) { float tSq = t*t; // if our next hit was further than distance to light then accept // sample, this works for portal sampling where you have a large light // that you sample through a small window const float kTolerance = 1.e-2f; if (fabsf(t - sqrtf(dSq)) <= kTolerance) { const float nl = Abs(Dot(lightNormal, wi)); // for glancing rays, note we use abs to include cases // where light surface is backfacing, e.g.: inside the weak furnace if (Abs(nl) < 1.e-6f) continue; // light pdf with respect to area and convert to pdf with respect to solid angle float lightArea = PrimitiveArea(lightPrimitive); float lightPdf = ((1.0f/lightArea)*tSq)/nl; // bsdf pdf for light's direction float bsdfPdf = BSDFPdf(surfacePrimitive.material, etaI, etaO, surfacePos, shadingNormal, wo, wi); Vec3 f = BSDFEval(surfacePrimitive.material, etaI, etaO, surfacePos, shadingNormal, wo, wi); // this branch is only necessary to exclude specular paths from light sampling (always have zero brdf) // todo: make BSDFEval always return zero for pure specular paths and roll specular eval into BSDFSample() if (bsdfPdf > 0.0f) { // calculate relative weighting of the light and bsdf sampling int N = lightPrimitive.lightSamples+kBsdfSamples; float cbsdf = kBsdfSamples/N; float clight = float(lightPrimitive.lightSamples)/N; float weight = clight*lightPdf/(cbsdf*bsdfPdf + clight*lightPdf); L += weight*f*lightPrimitive.material.emission*(Abs(Dot(wi, shadingNormal))/Max(1.e-3f, lightPdf)); } } } } sum += L * (1.0f/numSamples); } return sum; } // reference, no light sampling, uniform hemisphere sampling inline __device__ Vec3 PathTrace(const GPUScene& scene, const Vec3& origin, const Vec3& dir, float time, int maxDepth, Random& rand) { // path throughput Vec3 pathThroughput(1.0f, 1.0f, 1.0f); // accumulated radiance Vec3 totalRadiance(0.0f); Vec3 rayOrigin = origin; Vec3 rayDir = dir; float rayTime = time; float rayEta = 1.0f; Vec3 rayAbsorption = 0.0f; BSDFType rayType = eReflected; float bsdfPdf = 1.0f; for (int i=0; i < maxDepth; ++i) { // find closest hit float t; Vec3 n, ns; int hit; if (Trace(scene, rayOrigin, rayDir, rayTime, t, n, hit)) { const Primitive prim = scene.primitives[hit]; float outEta; Vec3 outAbsorption; // index of refraction for transmission, 1.0 corresponds to air if (rayEta == 1.0f) { outEta = prim.material.GetIndexOfRefraction(); outAbsorption = prim.material.absorption; } else { // returning to free space outEta = 1.0f; outAbsorption = 0.0f; } // update throughput based on absorption through the medium pathThroughput *= Exp(-rayAbsorption*t); // calculate a basis for this hit point const Vec3 p = rayOrigin + rayDir*t; #if USE_LIGHT_SAMPLING if (i == 0) { // first trace is our only chance to add contribution from directly visible light sources totalRadiance += prim.material.emission; } else if (kBsdfSamples > 0) { // area pdf that this dir was already included by the light sampling from previous step float lightArea = PrimitiveArea(prim); if (lightArea > 0.0f) { // convert to pdf with respect to solid angle float lightPdf = ((1.0f/lightArea)*t*t)/Abs(Dot(rayDir, n)); // calculate weight for bsdf sampling int N = prim.lightSamples+kBsdfSamples; float cbsdf = kBsdfSamples/N; float clight = float(prim.lightSamples)/N; float weight = cbsdf*bsdfPdf/(cbsdf*bsdfPdf+ clight*lightPdf); Validate(weight); // specular paths have zero chance of being included by direct light sampling (zero pdf) if (rayType == eSpecular) weight = 1.0f; // pathThroughput already includes the bsdf pdf totalRadiance += weight*pathThroughput*prim.material.emission; } } // terminate ray if we hit a light source if (prim.lightSamples) break; // integrate direct light over hemisphere totalRadiance += pathThroughput*SampleLights(scene, prim, rayEta, outEta, p, n, n, -rayDir, rayTime, rand); #else totalRadiance += pathThroughput*prim.material.emission; #endif // integrate indirect light by sampling BSDF Vec3 u, v; BasisFromVector(n, &u, &v); Vec3 bsdfDir; BSDFType bsdfType; BSDFSample(prim.material, rayEta, outEta, p, u, v, n, -rayDir, bsdfDir, bsdfPdf, bsdfType, rand); if (bsdfPdf <= 0.0f) break; Validate(bsdfPdf); // reflectance Vec3 f = BSDFEval(prim.material, rayEta, outEta, p, n, -rayDir, bsdfDir); // update ray medium if we are transmitting through the material if (Dot(bsdfDir, n) <= 0.0f) { rayEta = outEta; rayAbsorption = outAbsorption; } // update throughput with primitive reflectance pathThroughput *= f * Abs(Dot(n, bsdfDir))/bsdfPdf; // update ray direction and type rayType = bsdfType; rayDir = bsdfDir; rayOrigin = p + FaceForward(n, bsdfDir)*kRayEpsilon; } else { // hit nothing, sample sky dome and terminate float weight = 1.0f; if (scene.sky.probe.valid && i > 0 && rayType != eSpecular) { // probability that this dir was already sampled by probe sampling float skyPdf = ProbePdf(scene.sky.probe, rayDir); int N = kProbeSamples+kBsdfSamples; float cbsdf = kBsdfSamples/N; float csky = float(kProbeSamples)/N; weight = cbsdf*bsdfPdf/(cbsdf*bsdfPdf+ csky*skyPdf); Validate(bsdfPdf); Validate(skyPdf); } Validate(weight); totalRadiance += weight*scene.sky.Eval(rayDir)*pathThroughput; break; } } return totalRadiance; } __device__ void AddSample(Color* output, int width, int height, float rasterX, float rasterY, float clamp, Filter filter, const Vec3& sample) { switch (filter.type) { case eFilterBox: { int x = int(rasterX); int y = int(rasterY); output[y*width+x] += Color(sample.x, sample.y, sample.z, 1.0f); break; } case eFilterGaussian: { int startX = Max(0, int(rasterX - filter.width)); int startY = Max(0, int(rasterY - filter.width)); int endX = Min(int(rasterX + filter.width), width-1); int endY = Min(int(rasterY + filter.width), height-1); Vec3 c = ClampLength(sample, clamp); for (int x=startX; x <= endX; ++x) { for (int y=startY; y <= endY; ++y) { float w = filter.Eval(x-rasterX, y-rasterY); //output[(height-1-y)*width+x] += Vec3(Min(sample.x, clamp), Min(sample.y, clamp), Min(sample.z, clamp), 1.0f)*w; const int index = y*width+x; atomicAdd(&output[index].x, c.x*w); atomicAdd(&output[index].y, c.y*w); atomicAdd(&output[index].z, c.z*w); atomicAdd(&output[index].w, w); } } break; } }; } __launch_bounds__(256, 4) __global__ void RenderGpu(GPUScene scene, Camera camera, CameraSampler sampler, Options options, int seed, Color* output) { const int tx = blockIdx.x*blockDim.x; const int ty = blockIdx.y*blockDim.y; const int i = tx + threadIdx.x; const int j = ty + threadIdx.y; if (i < options.width && j < options.height) { // initialize a per-thread PRNG Random rand(i + j*options.width + seed); if (options.mode == eNormals) { Vec3 origin, dir; sampler.GenerateRay(i, j, origin, dir); int p; float t; Vec3 n; if (Trace(scene, origin, dir, 1.0f, t, n, p)) { n = n*0.5f+0.5f; output[j*options.width+i] = Color(n.x, n.y, n.z, 1.0f); } else { output[j*options.width+i] = Color(0.5f); } } else if (options.mode == ePathTrace) { const float time = rand.Randf(camera.shutterStart, camera.shutterEnd); const float fx = i + rand.Randf(-0.5f, 0.5f) + 0.5f; const float fy = j + rand.Randf(-0.5f, 0.5f) + 0.5f; Vec3 origin, dir; sampler.GenerateRay(fx, fy, origin, dir); //output[(height-1-j)*width+i] += PathTrace(*scene, origin, dir); Vec3 sample = PathTrace(scene, origin, dir, time, options.maxDepth, rand); AddSample(output, options.width, options.height, fx, fy, options.clamp, options.filter, sample); } } } struct GpuRenderer : public Renderer { Color* output = NULL; GPUScene sceneGPU; Random seed; // map id to geometry struct std::map<int, MeshGeometry> gpuMeshes; GpuRenderer(const Scene* s) { // build GPU primitive and light lists std::vector<Primitive> primitives; std::vector<Primitive> lights; for (int i=0; i < s->primitives.size(); ++i) { Primitive primitive = s->primitives[i]; // if mesh primitive then copy to the GPU if (primitive.type == eMesh) { // see if we have already uploaded the mesh to the GPU if (gpuMeshes.find(primitive.mesh.id) == gpuMeshes.end()) { MeshGeometry geo = CreateGPUMesh(primitive.mesh); gpuMeshes[geo.id] = geo; // replace CPU mesh with GPU copy primitive.mesh = geo; } } if (primitive.material.bump > 0.0f) { primitive.material.bumpMap = CreateGPUTexture(primitive.material.bumpMap); } // create explicit list of light primitives if (primitive.lightSamples) { lights.push_back(primitive); } primitives.push_back(primitive); } // convert scene BVH CreateVec4Texture((Vec4**)&(sceneGPU.bvh.nodes), (Vec4*)s->bvh.nodes, sizeof(BVHNode)*s->bvh.numNodes); sceneGPU.bvh.numNodes = s->bvh.numNodes; // upload to the GPU sceneGPU.numPrimitives = primitives.size(); sceneGPU.numLights = lights.size(); if (sceneGPU.numLights > 0) { cudaMalloc(&sceneGPU.lights, sizeof(Primitive)*lights.size()); cudaMemcpy(sceneGPU.lights, &lights[0], sizeof(Primitive)*lights.size(), cudaMemcpyHostToDevice); } if (sceneGPU.numPrimitives > 0) { cudaMalloc(&sceneGPU.primitives, sizeof(Primitive)*primitives.size()); cudaMemcpy(sceneGPU.primitives, &primitives[0], sizeof(Primitive)*primitives.size(), cudaMemcpyHostToDevice); } // copy sky and probe texture sceneGPU.sky = CreateGPUSky(s->sky); static int frame; ++frame; seed = Random(frame); } virtual ~GpuRenderer() { cudaFree(output); cudaFree(sceneGPU.primitives); cudaFree(sceneGPU.lights); DestroyTexture(sceneGPU.bvh.nodes); // free meshes for (auto iter=gpuMeshes.begin(); iter != gpuMeshes.end(); ++iter) { DestroyGPUMesh(iter->second); } } void Init(int width, int height) { cudaFree(output); cudaMalloc(&output, sizeof(Color)*width*height); cudaMemset(output, 0, sizeof(Color)*width*height); } void Render(const Camera& camera, const Options& options, Color* outputHost) { // create a sampler for the camera CameraSampler sampler( Transform(camera.position, camera.rotation), camera.fov, 0.001f, 1.0f, options.width, options.height); // assign threads in 2d tile layout const int blockWidth = 16; const int blockHeight = 16; const int gridWidth = (options.width + blockWidth - 1)/blockWidth; const int gridHeight = (options.height + blockHeight - 1)/blockHeight; dim3 blockDim(blockWidth, blockHeight); dim3 gridDim(gridWidth, gridHeight); RenderGpu<<<gridDim, blockDim>>>(sceneGPU, camera, sampler, options, seed.Rand(), output); // copy back to output cudaMemcpy(outputHost, output, sizeof(Color)*options.width*options.height, cudaMemcpyDeviceToHost); } }; Renderer* CreateGpuRenderer(const Scene* s) { return new GpuRenderer(s); }
the_stack
// Copyright (c) 2009-2021 The Regents of the University of Michigan // This file is part of the HOOMD-blue project, released under the BSD 3-Clause License. // Maintainer: phillicl #include "TableDihedralForceGPU.cuh" #include "hoomd/TextureTools.h" #include "hoomd/VectorMath.h" #include <assert.h> // SMALL a relatively small number #define SMALL 0.001f /*! \file TableDihedralForceGPU.cu \brief Defines GPU kernel code for calculating the table dihedral forces. Used by TableDihedralForceComputeGPU. */ /*! This kernel is called to calculate the table dihedral forces on all triples this is defined or \param d_force Device memory to write computed forces \param d_virial Device memory to write computed virials \param virial_pitch Pitch of 2D virial array \param N number of particles in system \param device_pos device array of particle positions \param box Box dimensions used to implement periodic boundary conditions \param dlist List of dihedrals stored on the GPU \param pitch Pitch of 2D dihedral list \param n_dihedrals_list List of numbers of dihedrals stored on the GPU \param n_dihedral_type number of dihedral types \param d_tables Tables of the potential and force \param table_value index helper function \param delta_phi dihedral delta of the table See TableDihedralForceCompute for information on the memory layout. */ __global__ void gpu_compute_table_dihedral_forces_kernel(Scalar4* d_force, Scalar* d_virial, const size_t virial_pitch, const unsigned int N, const Scalar4* device_pos, const BoxDim box, const group_storage<4>* dlist, const unsigned int* dihedral_ABCD, const unsigned int pitch, const unsigned int* n_dihedrals_list, const Scalar2* d_tables, const Index2D table_value, const Scalar delta_phi) { // start by identifying which particle we are to handle unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx >= N) return; // load in the length of the list for this thread (MEM TRANSFER: 4 bytes) int n_dihedrals = n_dihedrals_list[idx]; // read in the position of our b-particle from the a-b-c triplet. (MEM TRANSFER: 16 bytes) Scalar4 idx_postype = device_pos[idx]; // we can be either a, b, or c in the a-b-c triplet Scalar3 idx_pos = make_scalar3(idx_postype.x, idx_postype.y, idx_postype.z); Scalar3 pos_a, pos_b, pos_c, pos_d; // allocate space for the a,b,c, and d atom in the a-b-c-d set // initialize the force to 0 Scalar4 force_idx = make_scalar4(0.0f, 0.0f, 0.0f, 0.0f); // initialize the virial tensor to 0 Scalar virial_idx[6]; for (unsigned int i = 0; i < 6; i++) virial_idx[i] = 0; for (int dihedral_idx = 0; dihedral_idx < n_dihedrals; dihedral_idx++) { group_storage<4> cur_dihedral = dlist[pitch * dihedral_idx + idx]; unsigned int cur_ABCD = dihedral_ABCD[pitch * dihedral_idx + idx]; int cur_dihedral_x_idx = cur_dihedral.idx[0]; int cur_dihedral_y_idx = cur_dihedral.idx[1]; int cur_dihedral_z_idx = cur_dihedral.idx[2]; int cur_dihedral_type = cur_dihedral.idx[3]; int cur_dihedral_abcd = cur_ABCD; // get the a-particle's position (MEM TRANSFER: 16 bytes) Scalar4 x_postype = device_pos[cur_dihedral_x_idx]; Scalar3 x_pos = make_scalar3(x_postype.x, x_postype.y, x_postype.z); // get the c-particle's position (MEM TRANSFER: 16 bytes) Scalar4 y_postype = device_pos[cur_dihedral_y_idx]; Scalar3 y_pos = make_scalar3(y_postype.x, y_postype.y, y_postype.z); // get the d-particle's position (MEM TRANSFER: 16 bytes) Scalar4 z_postype = device_pos[cur_dihedral_z_idx]; Scalar3 z_pos = make_scalar3(z_postype.x, z_postype.y, z_postype.z); if (cur_dihedral_abcd == 0) { pos_a = idx_pos; pos_b = x_pos; pos_c = y_pos; pos_d = z_pos; } if (cur_dihedral_abcd == 1) { pos_b = idx_pos; pos_a = x_pos; pos_c = y_pos; pos_d = z_pos; } if (cur_dihedral_abcd == 2) { pos_c = idx_pos; pos_a = x_pos; pos_b = y_pos; pos_d = z_pos; } if (cur_dihedral_abcd == 3) { pos_d = idx_pos; pos_a = x_pos; pos_b = y_pos; pos_c = z_pos; } // calculate dr for a-b,c-b,and a-c Scalar3 dab = pos_a - pos_b; Scalar3 dcb = pos_c - pos_b; Scalar3 ddc = pos_d - pos_c; dab = box.minImage(dab); dcb = box.minImage(dcb); ddc = box.minImage(ddc); Scalar3 dcbm = -dcb; dcbm = box.minImage(dcbm); // c0 calculation Scalar sb1 = Scalar(1.0) / (dab.x * dab.x + dab.y * dab.y + dab.z * dab.z); Scalar sb3 = Scalar(1.0) / (ddc.x * ddc.x + ddc.y * ddc.y + ddc.z * ddc.z); Scalar rb1 = fast::sqrt(sb1); Scalar rb3 = fast::sqrt(sb3); Scalar c0 = (dab.x * ddc.x + dab.y * ddc.y + dab.z * ddc.z) * rb1 * rb3; // 1st and 2nd angle Scalar b1mag2 = dab.x * dab.x + dab.y * dab.y + dab.z * dab.z; Scalar b1mag = fast::sqrt(b1mag2); Scalar b2mag2 = dcb.x * dcb.x + dcb.y * dcb.y + dcb.z * dcb.z; Scalar b2mag = fast::sqrt(b2mag2); Scalar b3mag2 = ddc.x * ddc.x + ddc.y * ddc.y + ddc.z * ddc.z; Scalar b3mag = fast::sqrt(b3mag2); Scalar ctmp = dab.x * dcb.x + dab.y * dcb.y + dab.z * dcb.z; Scalar r12c1 = Scalar(1.0) / (b1mag * b2mag); Scalar c1mag = ctmp * r12c1; ctmp = dcbm.x * ddc.x + dcbm.y * ddc.y + dcbm.z * ddc.z; Scalar r12c2 = Scalar(1.0) / (b2mag * b3mag); Scalar c2mag = ctmp * r12c2; // cos and sin of 2 angles and final c Scalar sin2 = Scalar(1.0) - c1mag * c1mag; if (sin2 < 0.0f) sin2 = 0.0f; Scalar sc1 = fast::sqrt(sin2); if (sc1 < SMALL) sc1 = SMALL; sc1 = Scalar(1.0) / sc1; sin2 = Scalar(1.0) - c2mag * c2mag; if (sin2 < 0.0f) sin2 = 0.0f; Scalar sc2 = fast::sqrt(sin2); if (sc2 < SMALL) sc2 = SMALL; sc2 = Scalar(1.0) / sc2; Scalar s12 = sc1 * sc2; Scalar c = (c0 + c1mag * c2mag) * s12; if (c > Scalar(1.0)) c = Scalar(1.0); if (c < -Scalar(1.0)) c = -Scalar(1.0); // determinant Scalar det = dot(dab, make_scalar3(ddc.y * dcb.z - ddc.z * dcb.y, ddc.z * dcb.x - ddc.x * dcb.z, ddc.x * dcb.y - ddc.y * dcb.x)); // phi Scalar phi = acosf(c); if (det < 0) phi = -phi; // precomputed term Scalar value_f = (Scalar(M_PI) + phi) / delta_phi; // compute index into the table and read in values unsigned int value_i = value_f; Scalar2 VT0 = __ldg(d_tables + table_value(value_i, cur_dihedral_type)); Scalar2 VT1 = __ldg(d_tables + table_value(value_i + 1, cur_dihedral_type)); // unpack the data Scalar V0 = VT0.x; Scalar V1 = VT1.x; Scalar T0 = VT0.y; Scalar T1 = VT1.y; // compute the linear interpolation coefficient Scalar f = value_f - Scalar(value_i); // interpolate to get V and T; Scalar V = V0 + f * (V1 - V0); Scalar T = T0 + f * (T1 - T0); // from Blondel and Karplus 1995 vec3<Scalar> A = cross(vec3<Scalar>(dab), vec3<Scalar>(dcbm)); Scalar Asq = dot(A, A); vec3<Scalar> B = cross(vec3<Scalar>(ddc), vec3<Scalar>(dcbm)); Scalar Bsq = dot(B, B); Scalar3 f_a = -T * vec_to_scalar3(b2mag / Asq * A); Scalar3 f_b = -f_a + T / b2mag * vec_to_scalar3(dot(dab, dcbm) / Asq * A - dot(ddc, dcbm) / Bsq * B); Scalar3 f_c = T * vec_to_scalar3(dot(ddc, dcbm) / Bsq / b2mag * B - dot(dab, dcbm) / Asq / b2mag * A - b2mag / Bsq * B); Scalar3 f_d = T * b2mag / Bsq * vec_to_scalar3(B); // Now, apply the force to each individual atom a,b,c,d // and accumulate the energy/virial // compute 1/4 of the energy, 1/4 for each atom in the dihedral Scalar dihedral_eng = V * Scalar(1.0 / 4.0); // compute 1/4 of the virial, 1/4 for each atom in the dihedral // upper triangular version of virial tensor Scalar dihedral_virial[6]; dihedral_virial[0] = (1. / 4.) * (dab.x * f_a.x + dcb.x * f_c.x + (ddc.x + dcb.x) * f_d.x); dihedral_virial[1] = (1. / 4.) * (dab.y * f_a.x + dcb.y * f_c.x + (ddc.y + dcb.y) * f_d.x); dihedral_virial[2] = (1. / 4.) * (dab.z * f_a.x + dcb.z * f_c.x + (ddc.z + dcb.z) * f_d.x); dihedral_virial[3] = (1. / 4.) * (dab.y * f_a.y + dcb.y * f_c.y + (ddc.y + dcb.y) * f_d.y); dihedral_virial[4] = (1. / 4.) * (dab.z * f_a.y + dcb.z * f_c.y + (ddc.z + dcb.z) * f_d.y); dihedral_virial[5] = (1. / 4.) * (dab.z * f_a.z + dcb.z * f_c.z + (ddc.z + dcb.z) * f_d.z); if (cur_dihedral_abcd == 0) { force_idx.x += f_a.x; force_idx.y += f_a.y; force_idx.z += f_a.z; } if (cur_dihedral_abcd == 1) { force_idx.x += f_b.x; force_idx.y += f_b.y; force_idx.z += f_b.z; } if (cur_dihedral_abcd == 2) { force_idx.x += f_c.x; force_idx.y += f_c.y; force_idx.z += f_c.z; } if (cur_dihedral_abcd == 3) { force_idx.x += f_d.x; force_idx.y += f_d.y; force_idx.z += f_d.z; } force_idx.w += dihedral_eng; for (int k = 0; k < 6; k++) virial_idx[k] += dihedral_virial[k]; } // now that the force calculation is complete, write out the result (MEM TRANSFER: 20 bytes) d_force[idx] = force_idx; for (int k = 0; k < 6; k++) d_virial[k * virial_pitch + idx] = virial_idx[k]; } /*! \param d_force Device memory to write computed forces \param d_virial Device memory to write computed virials \param virial_pitch pitch of 2D virial array \param N number of particles \param device_pos particle positions on the device \param box Box dimensions used to implement periodic boundary conditions \param dlist List of dihedrals stored on the GPU \param pitch Pitch of 2D dihedral list \param n_dihedrals_list List of numbers of dihedrals stored on the GPU \param n_dihedral_type number of dihedral types \param d_tables Tables of the potential and force \param table_width Number of points in each table \param table_value indexer helper \param block_size Block size at which to run the kernel \param compute_capability Compute capability of the device (200, 300, 350, ...) \note This is just a kernel driver. See gpu_compute_table_dihedral_forces_kernel for full documentation. */ hipError_t gpu_compute_table_dihedral_forces(Scalar4* d_force, Scalar* d_virial, const size_t virial_pitch, const unsigned int N, const Scalar4* device_pos, const BoxDim& box, const group_storage<4>* dlist, const unsigned int* dihedral_ABCD, const unsigned int pitch, const unsigned int* n_dihedrals_list, const Scalar2* d_tables, const unsigned int table_width, const Index2D& table_value, const unsigned int block_size) { assert(d_tables); assert(table_width > 1); if (N == 0) return hipSuccess; unsigned int max_block_size; hipFuncAttributes attr; hipFuncGetAttributes(&attr, (const void*)gpu_compute_table_dihedral_forces_kernel); max_block_size = attr.maxThreadsPerBlock; unsigned int run_block_size = min(block_size, max_block_size); // setup the grid to run the kernel dim3 grid(N / run_block_size + 1, 1, 1); dim3 threads(run_block_size, 1, 1); Scalar delta_phi = Scalar(2.0 * M_PI) / (Scalar)(table_width - 1); hipLaunchKernelGGL((gpu_compute_table_dihedral_forces_kernel), dim3(grid), dim3(threads), 0, 0, d_force, d_virial, virial_pitch, N, device_pos, box, dlist, dihedral_ABCD, pitch, n_dihedrals_list, d_tables, table_value, delta_phi); return hipSuccess; } // vim:syntax=cpp
the_stack
#include <cassert> #include <fstream> #include <iostream> #include <sstream> #include "Timer.h" #include <cuda.h> #include <cuda_runtime_api.h> #include "OptionParser.h" #include "ResultDatabase.h" #include "cudacommon.h" using namespace std; // leftrotate function definition #define LEFTROTATE(x, c) (((x) << (c)) | ((x) >> (32 - (c)))) #define F(x,y,z) ((x & y) | ((~x) & z)) #define G(x,y,z) ((x & z) | ((~z) & y)) #define H(x,y,z) (x ^ y ^ z) #define I(x,y,z) (y ^ (x | (~z))) // This version of the round shifts the interpretation of a,b,c,d by one // and must be called with v/x/y/z in a matching shuffle pattern. // Every four Rounds, a,b,c,d are back to their original interpretation, // thogh, so it all works out in the end (we have 64 rounds per block). #define ROUND_INPLACE_VIA_SHIFT(w, r, k, v, x, y, z, func) \ { \ v += func(x,y,z) + w + k; \ v = x + LEFTROTATE(v, r); \ } // This version ignores the mapping of a/b/c/d to v/x/y/z and simply // uses a temporary variable to keep the interpretation of a/b/c/d // consistent. Whether this one or the previous one performs better // probably depends on the compiler.... #define ROUND_USING_TEMP_VARS(w, r, k, v, x, y, z, func) \ { \ a = a + func(b,c,d) + k + w; \ unsigned int temp = d; \ d = c; \ c = b; \ b = b + LEFTROTATE(a, r); \ a = temp; \ } // Here, we pick which style of ROUND we use. #define ROUND ROUND_USING_TEMP_VARS //#define ROUND ROUND_INPLACE_VIA_SHIFT /// NOTE: this really only allows a length up to 7 bytes, not 8, because /// we need to start the padding in the first byte following the message, /// and we only have two words to work with here.... /// It also assumes words[] has all zero bits except the chars of interest. __host__ __device__ inline void md5_2words(unsigned int *words, unsigned int len, unsigned int *digest) { // For any block but the first one, these should be passed in, not // initialized, but we are assuming we only operate on a single block. unsigned int h0 = 0x67452301; unsigned int h1 = 0xefcdab89; unsigned int h2 = 0x98badcfe; unsigned int h3 = 0x10325476; unsigned int a = h0; unsigned int b = h1; unsigned int c = h2; unsigned int d = h3; unsigned int WL = len * 8; unsigned int W0 = words[0]; unsigned int W1 = words[1]; switch (len) { case 0: W0 |= 0x00000080; break; case 1: W0 |= 0x00008000; break; case 2: W0 |= 0x00800000; break; case 3: W0 |= 0x80000000; break; case 4: W1 |= 0x00000080; break; case 5: W1 |= 0x00008000; break; case 6: W1 |= 0x00800000; break; case 7: W1 |= 0x80000000; break; } // args: word data, per-round shift amt, constant, 4 vars, function macro ROUND(W0, 7, 0xd76aa478, a, b, c, d, F); ROUND(W1, 12, 0xe8c7b756, d, a, b, c, F); ROUND(0, 17, 0x242070db, c, d, a, b, F); ROUND(0, 22, 0xc1bdceee, b, c, d, a, F); ROUND(0, 7, 0xf57c0faf, a, b, c, d, F); ROUND(0, 12, 0x4787c62a, d, a, b, c, F); ROUND(0, 17, 0xa8304613, c, d, a, b, F); ROUND(0, 22, 0xfd469501, b, c, d, a, F); ROUND(0, 7, 0x698098d8, a, b, c, d, F); ROUND(0, 12, 0x8b44f7af, d, a, b, c, F); ROUND(0, 17, 0xffff5bb1, c, d, a, b, F); ROUND(0, 22, 0x895cd7be, b, c, d, a, F); ROUND(0, 7, 0x6b901122, a, b, c, d, F); ROUND(0, 12, 0xfd987193, d, a, b, c, F); ROUND(WL, 17, 0xa679438e, c, d, a, b, F); ROUND(0, 22, 0x49b40821, b, c, d, a, F); ROUND(W1, 5, 0xf61e2562, a, b, c, d, G); ROUND(0, 9, 0xc040b340, d, a, b, c, G); ROUND(0, 14, 0x265e5a51, c, d, a, b, G); ROUND(W0, 20, 0xe9b6c7aa, b, c, d, a, G); ROUND(0, 5, 0xd62f105d, a, b, c, d, G); ROUND(0, 9, 0x02441453, d, a, b, c, G); ROUND(0, 14, 0xd8a1e681, c, d, a, b, G); ROUND(0, 20, 0xe7d3fbc8, b, c, d, a, G); ROUND(0, 5, 0x21e1cde6, a, b, c, d, G); ROUND(WL, 9, 0xc33707d6, d, a, b, c, G); ROUND(0, 14, 0xf4d50d87, c, d, a, b, G); ROUND(0, 20, 0x455a14ed, b, c, d, a, G); ROUND(0, 5, 0xa9e3e905, a, b, c, d, G); ROUND(0, 9, 0xfcefa3f8, d, a, b, c, G); ROUND(0, 14, 0x676f02d9, c, d, a, b, G); ROUND(0, 20, 0x8d2a4c8a, b, c, d, a, G); ROUND(0, 4, 0xfffa3942, a, b, c, d, H); ROUND(0, 11, 0x8771f681, d, a, b, c, H); ROUND(0, 16, 0x6d9d6122, c, d, a, b, H); ROUND(WL, 23, 0xfde5380c, b, c, d, a, H); ROUND(W1, 4, 0xa4beea44, a, b, c, d, H); ROUND(0, 11, 0x4bdecfa9, d, a, b, c, H); ROUND(0, 16, 0xf6bb4b60, c, d, a, b, H); ROUND(0, 23, 0xbebfbc70, b, c, d, a, H); ROUND(0, 4, 0x289b7ec6, a, b, c, d, H); ROUND(W0, 11, 0xeaa127fa, d, a, b, c, H); ROUND(0, 16, 0xd4ef3085, c, d, a, b, H); ROUND(0, 23, 0x04881d05, b, c, d, a, H); ROUND(0, 4, 0xd9d4d039, a, b, c, d, H); ROUND(0, 11, 0xe6db99e5, d, a, b, c, H); ROUND(0, 16, 0x1fa27cf8, c, d, a, b, H); ROUND(0, 23, 0xc4ac5665, b, c, d, a, H); ROUND(W0, 6, 0xf4292244, a, b, c, d, I); ROUND(0, 10, 0x432aff97, d, a, b, c, I); ROUND(WL, 15, 0xab9423a7, c, d, a, b, I); ROUND(0, 21, 0xfc93a039, b, c, d, a, I); ROUND(0, 6, 0x655b59c3, a, b, c, d, I); ROUND(0, 10, 0x8f0ccc92, d, a, b, c, I); ROUND(0, 15, 0xffeff47d, c, d, a, b, I); ROUND(W1, 21, 0x85845dd1, b, c, d, a, I); ROUND(0, 6, 0x6fa87e4f, a, b, c, d, I); ROUND(0, 10, 0xfe2ce6e0, d, a, b, c, I); ROUND(0, 15, 0xa3014314, c, d, a, b, I); ROUND(0, 21, 0x4e0811a1, b, c, d, a, I); ROUND(0, 6, 0xf7537e82, a, b, c, d, I); ROUND(0, 10, 0xbd3af235, d, a, b, c, I); ROUND(0, 15, 0x2ad7d2bb, c, d, a, b, I); ROUND(0, 21, 0xeb86d391, b, c, d, a, I); h0 += a; h1 += b; h2 += c; h3 += d; // write the final result out digest[0] = h0; digest[1] = h1; digest[2] = h2; digest[3] = h3; } // **************************************************************************** // Function: FindKeyspaceSize // // Purpose: /// Multiply out the byteLength by valsPerByte to find the /// total size of the key space, with error checking. // // Arguments: // byteLength number of bytes in a key // valsPerByte number of values each byte can take on // // Programmer: Jeremy Meredith // Creation: July 23, 2014 // // Modifications: // **************************************************************************** __host__ __device__ int FindKeyspaceSize(int byteLength, int valsPerByte) { int keyspace = 1; for (int i=0; i<byteLength; ++i) { if (keyspace >= 0x7fffffff / valsPerByte) { // error, we're about to overflow a signed int return -1; } keyspace *= valsPerByte; } return keyspace; } // **************************************************************************** // Function: IndexToKey // // Purpose: /// For a given index in the keyspace, find the actual key string /// which is at that index. // // Arguments: // index index in key space // byteLength number of bytes in a key // valsPerByte number of values each byte can take on // vals output key string // // Programmer: Jeremy Meredith // Creation: July 23, 2014 // // Modifications: // **************************************************************************** __host__ __device__ void IndexToKey(unsigned int index, int byteLength, int valsPerByte, unsigned char vals[8]) { // loop pointlessly unrolled to avoid CUDA compiler complaints // about unaligned accesses (!?) on older compute capabilities vals[0] = index % valsPerByte; index /= valsPerByte; vals[1] = index % valsPerByte; index /= valsPerByte; vals[2] = index % valsPerByte; index /= valsPerByte; vals[3] = index % valsPerByte; index /= valsPerByte; vals[4] = index % valsPerByte; index /= valsPerByte; vals[5] = index % valsPerByte; index /= valsPerByte; vals[6] = index % valsPerByte; index /= valsPerByte; vals[7] = index % valsPerByte; index /= valsPerByte; } // **************************************************************************** // Function: AsHex // // Purpose: /// For a given key string, return the raw hex string for its bytes. // // Arguments: // vals key string // len length of key string // // Programmer: Jeremy Meredith // Creation: July 23, 2014 // // Modifications: // **************************************************************************** std::string AsHex(unsigned char *vals, int len) { ostringstream out; char tmp[256]; for (int i=0; i<len; ++i) { sprintf(tmp, "%2.2X", vals[i]); out << tmp; } return out.str(); } // **************************************************************************** // Function: addBenchmarkSpecOptions // // Purpose: // Add benchmark specific options parsing // // Arguments: // op: the options parser / parameter database // // Returns: nothing // // Programmer: Jeremy Meredith // Creation: July 23, 2014 // // Modifications: // // **************************************************************************** void addBenchmarkSpecOptions(OptionParser &op) { } // **************************************************************************** // Function: FindKeyWithDigest_CPU // // Purpose: /// On the CPU, search the key space to find a key with the given digest. // // Arguments: // searchDigest the digest to search for // byteLength number of bytes in a key // valsPerByte number of values each byte can take on // foundIndex output - the index of the found key (if found) // foundKey output - the string of the found key (if found) // foundDigest output - the digest of the found key (if found) // // Programmer: Jeremy Meredith // Creation: July 23, 2014 // // Modifications: // **************************************************************************** double FindKeyWithDigest_CPU(const unsigned int searchDigest[4], const int byteLength, const int valsPerByte, int *foundIndex, unsigned char foundKey[8], unsigned int foundDigest[4]) { int timer = Timer::Start(); int keyspace = FindKeyspaceSize(byteLength, valsPerByte); for (int i=0; i<keyspace; i += valsPerByte) { unsigned char key[8] = {0,0,0,0,0,0,0,0}; IndexToKey(i, byteLength, valsPerByte, key); for (int j=0; j < valsPerByte; ++j) { unsigned int digest[4]; md5_2words((unsigned int*)key, byteLength, digest); if (digest[0] == searchDigest[0] && digest[1] == searchDigest[1] && digest[2] == searchDigest[2] && digest[3] == searchDigest[3]) { *foundIndex = i + j; foundKey[0] = key[0]; foundKey[1] = key[1]; foundKey[2] = key[2]; foundKey[3] = key[3]; foundKey[4] = key[4]; foundKey[5] = key[5]; foundKey[6] = key[6]; foundKey[7] = key[7]; foundDigest[0] = digest[0]; foundDigest[1] = digest[1]; foundDigest[2] = digest[2]; foundDigest[3] = digest[3]; } ++key[0]; } } double runtime = Timer::Stop(timer, "md5 runtime"); return runtime; } // **************************************************************************** // Function: FindKeyWithDigest_Kernel // // Purpose: /// Within each thread of a GPU, search part of the key space /// to find a key with the given digest. // // Arguments: // searchDigest the digest to search for // keyspace the size of the key space to search // byteLength number of bytes in a key // valsPerByte number of values each byte can take on // foundIndex output - the index of the found key (if found) // foundKey output - the string of the found key (if found) // foundDigest output - the digest of the found key (if found) // // Programmer: Jeremy Meredith // Creation: July 23, 2014 // // Modifications: // **************************************************************************** __global__ void FindKeyWithDigest_Kernel(unsigned int searchDigest0, unsigned int searchDigest1, unsigned int searchDigest2, unsigned int searchDigest3, int keyspace, int byteLength, int valsPerByte, int *foundIndex, unsigned char *foundKey, unsigned int *foundDigest) { int threadid = blockIdx.x*blockDim.x + threadIdx.x; int startindex = threadid * valsPerByte; unsigned char key[8] = {0,0,0,0, 0,0,0,0}; IndexToKey(startindex, byteLength, valsPerByte, key); for (int j=0; j < valsPerByte && startindex+j < keyspace; ++j) { unsigned int digest[4]; md5_2words((unsigned int*)key, byteLength, digest); if (digest[0] == searchDigest0 && digest[1] == searchDigest1 && digest[2] == searchDigest2 && digest[3] == searchDigest3) { *foundIndex = startindex + j; foundKey[0] = key[0]; foundKey[1] = key[1]; foundKey[2] = key[2]; foundKey[3] = key[3]; foundKey[4] = key[4]; foundKey[5] = key[5]; foundKey[6] = key[6]; foundKey[7] = key[7]; foundDigest[0] = digest[0]; foundDigest[1] = digest[1]; foundDigest[2] = digest[2]; foundDigest[3] = digest[3]; } ++key[0]; } } // **************************************************************************** // Function: FindKeyWithDigest_GPU // // Purpose: /// On the GPU, search the key space to find a key with the given digest. // // Arguments: // searchDigest the digest to search for // byteLength number of bytes in a key // valsPerByte number of values each byte can take on // foundIndex output - the index of the found key (if found) // foundKey output - the string of the found key (if found) // foundDigest output - the digest of the found key (if found) // // Programmer: Jeremy Meredith // Creation: July 23, 2014 // // Modifications: // **************************************************************************** double FindKeyWithDigest_GPU(const unsigned int searchDigest[4], const int byteLength, const int valsPerByte, int *foundIndex, unsigned char foundKey[8], unsigned int foundDigest[4]) { int keyspace = FindKeyspaceSize(byteLength, valsPerByte); // // allocate output buffers // int *d_foundIndex; cudaMalloc((void**)&d_foundIndex, sizeof(int) * 1); CHECK_CUDA_ERROR(); unsigned char *d_foundKey; cudaMalloc((void**)&d_foundKey, 8); CHECK_CUDA_ERROR(); unsigned int *d_foundDigest; cudaMalloc((void**)&d_foundDigest, sizeof(unsigned int) * 4); CHECK_CUDA_ERROR(); // // initialize output buffers to show no found result // cudaMemcpy(d_foundIndex, foundIndex, sizeof(int) * 1, cudaMemcpyHostToDevice); CHECK_CUDA_ERROR(); cudaMemcpy(d_foundKey, foundKey, 8, cudaMemcpyHostToDevice); CHECK_CUDA_ERROR(); cudaMemcpy(d_foundDigest, foundDigest, sizeof(unsigned int) * 4, cudaMemcpyHostToDevice); CHECK_CUDA_ERROR(); cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); CHECK_CUDA_ERROR(); // // calculate work thread shape // int nthreads = 384; size_t nblocks = ceil((double(keyspace) / double(valsPerByte)) / double(nthreads)); // // run the kernel // cudaEventRecord(start, 0); FindKeyWithDigest_Kernel<<<nblocks, nthreads>>>(searchDigest[0], searchDigest[1], searchDigest[2], searchDigest[3], keyspace, byteLength, valsPerByte, d_foundIndex, d_foundKey, d_foundDigest); CHECK_CUDA_ERROR(); cudaEventRecord(stop, 0); cudaEventSynchronize(stop); CHECK_CUDA_ERROR(); // // get the timing/rate info // float millisec = 0; cudaEventElapsedTime(&millisec, start, stop); // // read the (presumably) found key // cudaMemcpy(foundIndex, d_foundIndex, sizeof(int) * 1, cudaMemcpyDeviceToHost); CHECK_CUDA_ERROR(); cudaMemcpy(foundKey, d_foundKey, 8, cudaMemcpyDeviceToHost); CHECK_CUDA_ERROR(); cudaMemcpy(foundDigest, d_foundDigest, sizeof(unsigned int) * 4, cudaMemcpyDeviceToHost); CHECK_CUDA_ERROR(); // // free device memory // cudaFree(d_foundIndex); CHECK_CUDA_ERROR(); cudaFree(d_foundKey); CHECK_CUDA_ERROR(); cudaFree(d_foundDigest); CHECK_CUDA_ERROR(); // // return the runtime in seconds // return millisec / 1.e3; } // **************************************************************************** // Function: RunBenchmark // // Purpose: // Executes the MD5 Hash benchmark // // Arguments: // resultDB: results from the benchmark are stored in this db // op: the options parser / parameter database // // Returns: nothing // // Programmer: Jeremy Meredith // Creation: July 23, 2014 // // Modifications: // // **************************************************************************** void RunBenchmark(ResultDatabase &resultDB, OptionParser &op) { bool verbose = op.getOptionBool("verbose"); int size = op.getOptionInt("size"); if (size < 1 || size > 4) { cerr << "ERROR: Invalid size parameter\n"; return; } // // Determine the shape/size of key space // const int sizes_byteLength[] = { 7, 5, 6, 5}; const int sizes_valsPerByte[] = {10, 35, 25, 70}; const int byteLength = sizes_byteLength[size-1]; const int valsPerByte = sizes_valsPerByte[size-1]; char atts[1024]; sprintf(atts, "%dx%d", byteLength, valsPerByte); if (verbose) cout << "Searching keys of length " << byteLength << " bytes " << "and " << valsPerByte << " values per byte" << endl; const int keyspace = FindKeyspaceSize(byteLength, valsPerByte); if (keyspace < 0) { cerr << "Error: more than 2^31 bits of entropy is unsupported.\n"; return; } if (byteLength > 7) { cerr << "Error: more than 7 byte key length is unsupported.\n"; return; } if (verbose) cout << "|keyspace| = " << keyspace << " ("<<int(keyspace/1e6)<<"M)" << endl; // // Choose a random key from the keyspace, and calculate its hash. // //srandom(12345); srandom(time(NULL)); int passes = op.getOptionInt("passes"); for (int pass = 0 ; pass < passes ; ++pass) { int randomIndex = random() % keyspace;; unsigned char randomKey[8] = {0,0,0,0, 0,0,0,0}; unsigned int randomDigest[4]; IndexToKey(randomIndex, byteLength, valsPerByte, randomKey); md5_2words((unsigned int*)randomKey, byteLength, randomDigest); if (verbose) { cout << endl; cout << "--- pass " << pass << " ---" << endl; cout << "Looking for random key:" << endl; cout << " randomIndex = " << randomIndex << endl; cout << " randomKey = 0x" << AsHex(randomKey, 8/*byteLength*/) << endl; cout << " randomDigest= " << AsHex((unsigned char*)randomDigest, 16) << endl; } // // Use the GPU to brute force search the keyspace for this key. // unsigned int foundDigest[4] = {0,0,0,0}; int foundIndex = -1; unsigned char foundKey[8] = {0,0,0,0, 0,0,0,0}; double t; // in seconds if (false) { t = FindKeyWithDigest_CPU(randomDigest, byteLength, valsPerByte, &foundIndex, foundKey, foundDigest); } else { t = FindKeyWithDigest_GPU(randomDigest, byteLength, valsPerByte, &foundIndex, foundKey, foundDigest); } // // Calculate the rate and add it to the results // double rate = (double(keyspace) / double(t)) / 1.e9; if (verbose) { cout << "time = " << t << " sec, rate = " << rate << " GHash/sec\n"; } // // Double check everything matches (index, key, hash). // if (foundIndex < 0) { cerr << "\nERROR: could not find a match.\n"; rate = FLT_MAX; } else if (foundIndex != randomIndex) { cerr << "\nERROR: mismatch in key index found.\n"; rate = FLT_MAX; } else if (foundKey[0] != randomKey[0] || foundKey[1] != randomKey[1] || foundKey[2] != randomKey[2] || foundKey[3] != randomKey[3] || foundKey[4] != randomKey[4] || foundKey[5] != randomKey[5] || foundKey[6] != randomKey[6] || foundKey[7] != randomKey[7]) { cerr << "\nERROR: mismatch in key value found.\n"; rate = FLT_MAX; } else if (foundDigest[0] != randomDigest[0] || foundDigest[1] != randomDigest[1] || foundDigest[2] != randomDigest[2] || foundDigest[3] != randomDigest[3]) { cerr << "\nERROR: mismatch in digest of key.\n"; rate = FLT_MAX; } else { if (verbose) cout << endl << "Successfully found match (index, key, hash):" << endl; } // // Add the calculated performancethe results // resultDB.AddResult("MD5Hash", atts, "GHash/s", rate); if (verbose) { cout << " foundIndex = " << foundIndex << endl; cout << " foundKey = 0x" << AsHex(foundKey, 8/*byteLength*/) << endl; cout << " foundDigest = " << AsHex((unsigned char*)foundDigest, 16) << endl; cout << endl; } } return; }
the_stack
#include <stdio.h> #include <stdlib.h> #include <string.h> #include <inttypes.h> #include <math.h> #include <float.h> #include <stdint.h> #include <cuda.h> #include "sleefinline_purec_scalar.h" #include "sleefinline_cuda.h" #define STDIN_FILENO 0 #define SIMD_SUFFIX _cuda_sleef #define CONCAT_SIMD_SUFFIX_(keyword, suffix) keyword ## suffix #define CONCAT_SIMD_SUFFIX(keyword, suffix) CONCAT_SIMD_SUFFIX_(keyword, suffix) #define vdouble2 CONCAT_SIMD_SUFFIX(vdouble2, SIMD_SUFFIX) #define vfloat2 CONCAT_SIMD_SUFFIX(vfloat2, SIMD_SUFFIX) // static int startsWith(const char *str, const char *prefix) { while(*prefix != '\0') if (*str++ != *prefix++) return 0; return *prefix == '\0'; } static double u2d(uint64_t u) { union { double f; uint64_t i; } tmp; tmp.i = u; return tmp.f; } static uint64_t d2u(double d) { union { double f; uint64_t i; } tmp; tmp.f = d; return tmp.i; } static float u2f(uint32_t u) { union { float f; uint32_t i; } tmp; tmp.i = u; return tmp.f; } static uint32_t f2u(float d) { union { float f; uint32_t i; } tmp; tmp.f = d; return tmp.i; } // __global__ void xsin(double *r, double *a0) { *r = Sleef_sind1_u35cuda(*a0); } __global__ void xcos(double *r, double *a0) { *r = Sleef_cosd1_u35cuda(*a0); } __global__ void xsincos(vdouble2 *r, double *a0) { *r = Sleef_sincosd1_u35cuda(*a0); } __global__ void xtan(double *r, double *a0) { *r = Sleef_tand1_u35cuda(*a0); } __global__ void xasin(double *r, double *a0) { *r = Sleef_asind1_u35cuda(*a0); } __global__ void xacos(double *r, double *a0) { *r = Sleef_acosd1_u35cuda(*a0); } __global__ void xatan(double *r, double *a0) { *r = Sleef_atand1_u35cuda(*a0); } __global__ void xatan2(double *r, double *a0, double *a1) { *r = Sleef_atan2d1_u35cuda(*a0, *a1); } __global__ void xlog(double *r, double *a0) { *r = Sleef_logd1_u35cuda(*a0); } __global__ void xcbrt(double *r, double *a0) { *r = Sleef_cbrtd1_u35cuda(*a0); } __global__ void xsin_u1(double *r, double *a0) { *r = Sleef_sind1_u10cuda(*a0); } __global__ void xcos_u1(double *r, double *a0) { *r = Sleef_cosd1_u10cuda(*a0); } __global__ void xsincos_u1(vdouble2 *r, double *a0) { *r = Sleef_sincosd1_u10cuda(*a0); } __global__ void xtan_u1(double *r, double *a0) { *r = Sleef_tand1_u10cuda(*a0); } __global__ void xasin_u1(double *r, double *a0) { *r = Sleef_asind1_u10cuda(*a0); } __global__ void xacos_u1(double *r, double *a0) { *r = Sleef_acosd1_u10cuda(*a0); } __global__ void xatan_u1(double *r, double *a0) { *r = Sleef_atand1_u10cuda(*a0); } __global__ void xatan2_u1(double *r, double *a0, double *a1) { *r = Sleef_atan2d1_u10cuda(*a0, *a1); } __global__ void xlog_u1(double *r, double *a0) { *r = Sleef_logd1_u10cuda(*a0); } __global__ void xcbrt_u1(double *r, double *a0) { *r = Sleef_cbrtd1_u10cuda(*a0); } __global__ void xexp(double *r, double *a0) { *r = Sleef_expd1_u10cuda(*a0); } __global__ void xpow(double *r, double *a0, double *a1) { *r = Sleef_powd1_u10cuda(*a0, *a1); } __global__ void xsinh(double *r, double *a0) { *r = Sleef_sinhd1_u10cuda(*a0); } __global__ void xcosh(double *r, double *a0) { *r = Sleef_coshd1_u10cuda(*a0); } __global__ void xtanh(double *r, double *a0) { *r = Sleef_tanhd1_u10cuda(*a0); } __global__ void xsinh_u35(double *r, double *a0) { *r = Sleef_sinhd1_u35cuda(*a0); } __global__ void xcosh_u35(double *r, double *a0) { *r = Sleef_coshd1_u35cuda(*a0); } __global__ void xtanh_u35(double *r, double *a0) { *r = Sleef_tanhd1_u35cuda(*a0); } __global__ void xasinh(double *r, double *a0) { *r = Sleef_asinhd1_u10cuda(*a0); } __global__ void xacosh(double *r, double *a0) { *r = Sleef_acoshd1_u10cuda(*a0); } __global__ void xatanh(double *r, double *a0) { *r = Sleef_atanhd1_u10cuda(*a0); } __global__ void xexp2(double *r, double *a0) { *r = Sleef_exp2d1_u10cuda(*a0); } __global__ void xexp2_u35(double *r, double *a0) { *r = Sleef_exp2d1_u35cuda(*a0); } __global__ void xexp10(double *r, double *a0) { *r = Sleef_exp10d1_u10cuda(*a0); } __global__ void xexp10_u35(double *r, double *a0) { *r = Sleef_exp10d1_u35cuda(*a0); } __global__ void xexpm1(double *r, double *a0) { *r = Sleef_expm1d1_u10cuda(*a0); } __global__ void xlog10(double *r, double *a0) { *r = Sleef_log10d1_u10cuda(*a0); } __global__ void xlog2(double *r, double *a0) { *r = Sleef_log2d1_u10cuda(*a0); } __global__ void xlog2_u35(double *r, double *a0) { *r = Sleef_log2d1_u35cuda(*a0); } __global__ void xlog1p(double *r, double *a0) { *r = Sleef_log1pd1_u10cuda(*a0); } __global__ void xsincospi_u05(vdouble2 *r, double *a0) { *r = Sleef_sincospid1_u05cuda(*a0); } __global__ void xsincospi_u35(vdouble2 *r, double *a0) { *r = Sleef_sincospid1_u35cuda(*a0); } __global__ void xsinpi_u05(double *r, double *a0) { *r = Sleef_sinpid1_u05cuda(*a0); } __global__ void xcospi_u05(double *r, double *a0) { *r = Sleef_cospid1_u05cuda(*a0); } __global__ void xldexp(double *r, double *a0, int *a1) { *r = Sleef_ldexpd1_cuda(*a0, *a1); } __global__ void xilogb(int *r, double *a0) { *r = Sleef_ilogbd1_cuda(*a0); } __global__ void xfma(double *r, double *a0, double *a1, double *a2) { *r = Sleef_fmad1_cuda(*a0, *a1, *a2); } __global__ void xsqrt(double *r, double *a0) { *r = Sleef_sqrtd1_cuda(*a0); } __global__ void xsqrt_u05(double *r, double *a0) { *r = Sleef_sqrtd1_u05cuda(*a0); } __global__ void xsqrt_u35(double *r, double *a0) { *r = Sleef_sqrtd1_u35cuda(*a0); } __global__ void xhypot_u05(double *r, double *a0, double *a1) { *r = Sleef_hypotd1_u05cuda(*a0, *a1); } __global__ void xhypot_u35(double *r, double *a0, double *a1) { *r = Sleef_hypotd1_u35cuda(*a0, *a1); } __global__ void xfabs(double *r, double *a0) { *r = Sleef_fabsd1_cuda(*a0); } __global__ void xcopysign(double *r, double *a0, double *a1) { *r = Sleef_copysignd1_cuda(*a0, *a1); } __global__ void xfmax(double *r, double *a0, double *a1) { *r = Sleef_fmaxd1_cuda(*a0, *a1); } __global__ void xfmin(double *r, double *a0, double *a1) { *r = Sleef_fmind1_cuda(*a0, *a1); } __global__ void xfdim(double *r, double *a0, double *a1) { *r = Sleef_fdimd1_cuda(*a0, *a1); } __global__ void xtrunc(double *r, double *a0) { *r = Sleef_truncd1_cuda(*a0); } __global__ void xfloor(double *r, double *a0) { *r = Sleef_floord1_cuda(*a0); } __global__ void xceil(double *r, double *a0) { *r = Sleef_ceild1_cuda(*a0); } __global__ void xround(double *r, double *a0) { *r = Sleef_roundd1_cuda(*a0); } __global__ void xrint(double *r, double *a0) { *r = Sleef_rintd1_cuda(*a0); } __global__ void xnextafter(double *r, double *a0, double *a1) { *r = Sleef_nextafterd1_cuda(*a0, *a1); } __global__ void xfrfrexp(double *r, double *a0) { *r = Sleef_frfrexpd1_cuda(*a0); } __global__ void xexpfrexp(int *r, double *a0) { *r = Sleef_expfrexpd1_cuda(*a0); } __global__ void xfmod(double *r, double *a0, double *a1) { *r = Sleef_fmodd1_cuda(*a0, *a1); } __global__ void xremainder(double *r, double *a0, double *a1) { *r = Sleef_remainderd1_cuda(*a0, *a1); } __global__ void xmodf(vdouble2 *r, double *a0) { *r = Sleef_modfd1_cuda(*a0); } __global__ void xlgamma_u1(double *r, double *a0) { *r = Sleef_lgammad1_u10cuda(*a0); } __global__ void xtgamma_u1(double *r, double *a0) { *r = Sleef_tgammad1_u10cuda(*a0); } __global__ void xerf_u1(double *r, double *a0) { *r = Sleef_erfd1_u10cuda(*a0); } __global__ void xerfc_u15(double *r, double *a0) { *r = Sleef_erfcd1_u15cuda(*a0); } __global__ void xsinf(float *r, float *a0) { *r = Sleef_sinf1_u35cuda(*a0); } __global__ void xcosf(float *r, float *a0) { *r = Sleef_cosf1_u35cuda(*a0); } __global__ void xsincosf(vfloat2 *r, float *a0) { *r = Sleef_sincosf1_u35cuda(*a0); } __global__ void xtanf(float *r, float *a0) { *r = Sleef_tanf1_u35cuda(*a0); } __global__ void xasinf(float *r, float *a0) { *r = Sleef_asinf1_u35cuda(*a0); } __global__ void xacosf(float *r, float *a0) { *r = Sleef_acosf1_u35cuda(*a0); } __global__ void xatanf(float *r, float *a0) { *r = Sleef_atanf1_u35cuda(*a0); } __global__ void xatan2f(float *r, float *a0, float *a1) { *r = Sleef_atan2f1_u35cuda(*a0, *a1); } __global__ void xlogf(float *r, float *a0) { *r = Sleef_logf1_u35cuda(*a0); } __global__ void xcbrtf(float *r, float *a0) { *r = Sleef_cbrtf1_u35cuda(*a0); } __global__ void xsinf_u1(float *r, float *a0) { *r = Sleef_sinf1_u10cuda(*a0); } __global__ void xcosf_u1(float *r, float *a0) { *r = Sleef_cosf1_u10cuda(*a0); } __global__ void xsincosf_u1(vfloat2 *r, float *a0) { *r = Sleef_sincosf1_u10cuda(*a0); } __global__ void xtanf_u1(float *r, float *a0) { *r = Sleef_tanf1_u10cuda(*a0); } __global__ void xasinf_u1(float *r, float *a0) { *r = Sleef_asinf1_u10cuda(*a0); } __global__ void xacosf_u1(float *r, float *a0) { *r = Sleef_acosf1_u10cuda(*a0); } __global__ void xatanf_u1(float *r, float *a0) { *r = Sleef_atanf1_u10cuda(*a0); } __global__ void xatan2f_u1(float *r, float *a0, float *a1) { *r = Sleef_atan2f1_u10cuda(*a0, *a1); } __global__ void xlogf_u1(float *r, float *a0) { *r = Sleef_logf1_u10cuda(*a0); } __global__ void xcbrtf_u1(float *r, float *a0) { *r = Sleef_cbrtf1_u10cuda(*a0); } __global__ void xexpf(float *r, float *a0) { *r = Sleef_expf1_u10cuda(*a0); } __global__ void xpowf(float *r, float *a0, float *a1) { *r = Sleef_powf1_u10cuda(*a0, *a1); } __global__ void xsinhf(float *r, float *a0) { *r = Sleef_sinhf1_u10cuda(*a0); } __global__ void xcoshf(float *r, float *a0) { *r = Sleef_coshf1_u10cuda(*a0); } __global__ void xtanhf(float *r, float *a0) { *r = Sleef_tanhf1_u10cuda(*a0); } __global__ void xsinhf_u35(float *r, float *a0) { *r = Sleef_sinhf1_u35cuda(*a0); } __global__ void xcoshf_u35(float *r, float *a0) { *r = Sleef_coshf1_u35cuda(*a0); } __global__ void xtanhf_u35(float *r, float *a0) { *r = Sleef_tanhf1_u35cuda(*a0); } __global__ void xfastsinf_u3500(float *r, float *a0) { *r = Sleef_fastsinf1_u3500cuda(*a0); } __global__ void xfastcosf_u3500(float *r, float *a0) { *r = Sleef_fastcosf1_u3500cuda(*a0); } __global__ void xfastpowf_u3500(float *r, float *a0, float *a1) { *r = Sleef_fastpowf1_u3500cuda(*a0, *a1); } __global__ void xasinhf(float *r, float *a0) { *r = Sleef_asinhf1_u10cuda(*a0); } __global__ void xacoshf(float *r, float *a0) { *r = Sleef_acoshf1_u10cuda(*a0); } __global__ void xatanhf(float *r, float *a0) { *r = Sleef_atanhf1_u10cuda(*a0); } __global__ void xexp2f(float *r, float *a0) { *r = Sleef_exp2f1_u10cuda(*a0); } __global__ void xexp2f_u35(float *r, float *a0) { *r = Sleef_exp2f1_u35cuda(*a0); } __global__ void xexp10f(float *r, float *a0) { *r = Sleef_exp10f1_u10cuda(*a0); } __global__ void xexp10f_u35(float *r, float *a0) { *r = Sleef_exp10f1_u35cuda(*a0); } __global__ void xexpm1f(float *r, float *a0) { *r = Sleef_expm1f1_u10cuda(*a0); } __global__ void xlog10f(float *r, float *a0) { *r = Sleef_log10f1_u10cuda(*a0); } __global__ void xlog2f(float *r, float *a0) { *r = Sleef_log2f1_u10cuda(*a0); } __global__ void xlog2f_u35(float *r, float *a0) { *r = Sleef_log2f1_u35cuda(*a0); } __global__ void xlog1pf(float *r, float *a0) { *r = Sleef_log1pf1_u10cuda(*a0); } __global__ void xsincospif_u05(vfloat2 *r, float *a0) { *r = Sleef_sincospif1_u05cuda(*a0); } __global__ void xsincospif_u35(vfloat2 *r, float *a0) { *r = Sleef_sincospif1_u35cuda(*a0); } __global__ void xsinpif_u05(float *r, float *a0) { *r = Sleef_sinpif1_u05cuda(*a0); } __global__ void xcospif_u05(float *r, float *a0) { *r = Sleef_cospif1_u05cuda(*a0); } __global__ void xldexpf(float *r, float *a0, int *a1) { *r = Sleef_ldexpf1_cuda(*a0, *a1); } __global__ void xilogbf(int *r, float *a0) { *r = Sleef_ilogbf1_cuda(*a0); } __global__ void xfmaf(float *r, float *a0, float *a1, float *a2) { *r = Sleef_fmaf1_cuda(*a0, *a1, *a2); } __global__ void xsqrtf(float *r, float *a0) { *r = Sleef_sqrtf1_cuda(*a0); } __global__ void xsqrtf_u05(float *r, float *a0) { *r = Sleef_sqrtf1_u05cuda(*a0); } __global__ void xsqrtf_u35(float *r, float *a0) { *r = Sleef_sqrtf1_u35cuda(*a0); } __global__ void xhypotf_u05(float *r, float *a0, float *a1) { *r = Sleef_hypotf1_u05cuda(*a0, *a1); } __global__ void xhypotf_u35(float *r, float *a0, float *a1) { *r = Sleef_hypotf1_u35cuda(*a0, *a1); } __global__ void xfabsf(float *r, float *a0) { *r = Sleef_fabsf1_cuda(*a0); } __global__ void xcopysignf(float *r, float *a0, float *a1) { *r = Sleef_copysignf1_cuda(*a0, *a1); } __global__ void xfmaxf(float *r, float *a0, float *a1) { *r = Sleef_fmaxf1_cuda(*a0, *a1); } __global__ void xfminf(float *r, float *a0, float *a1) { *r = Sleef_fminf1_cuda(*a0, *a1); } __global__ void xfdimf(float *r, float *a0, float *a1) { *r = Sleef_fdimf1_cuda(*a0, *a1); } __global__ void xtruncf(float *r, float *a0) { *r = Sleef_truncf1_cuda(*a0); } __global__ void xfloorf(float *r, float *a0) { *r = Sleef_floorf1_cuda(*a0); } __global__ void xceilf(float *r, float *a0) { *r = Sleef_ceilf1_cuda(*a0); } __global__ void xroundf(float *r, float *a0) { *r = Sleef_roundf1_cuda(*a0); } __global__ void xrintf(float *r, float *a0) { *r = Sleef_rintf1_cuda(*a0); } __global__ void xnextafterf(float *r, float *a0, float *a1) { *r = Sleef_nextafterf1_cuda(*a0, *a1); } __global__ void xfrfrexpf(float *r, float *a0) { *r = Sleef_frfrexpf1_cuda(*a0); } __global__ void xexpfrexpf(float *r, float *a0) { *r = Sleef_expfrexpf1_cuda(*a0); } __global__ void xfmodf(float *r, float *a0, float *a1) { *r = Sleef_fmodf1_cuda(*a0, *a1); } __global__ void xremainderf(float *r, float *a0, float *a1) { *r = Sleef_remainderf1_cuda(*a0, *a1); } __global__ void xmodff(vfloat2 *r, float *a0) { *r = Sleef_modff1_cuda(*a0); } __global__ void xlgammaf_u1(float *r, float *a0) { *r = Sleef_lgammaf1_u10cuda(*a0); } __global__ void xtgammaf_u1(float *r, float *a0) { *r = Sleef_tgammaf1_u10cuda(*a0); } __global__ void xerff_u1(float *r, float *a0) { *r = Sleef_erff1_u10cuda(*a0); } __global__ void xerfcf_u15(float *r, float *a0) { *r = Sleef_erfcf1_u15cuda(*a0); } // #define func_d_d(funcStr, funcName) { \ while (startsWith(buf, funcStr " ")) { \ uint64_t u; \ sscanf(buf, funcStr " %" PRIx64, &u); \ *a0 = u2d(u); \ funcName<<<1, 1>>>(r, a0); \ cudaDeviceSynchronize(); \ printf("%" PRIx64 "\n", d2u(*r)); \ fflush(stdout); \ if (fgets(buf, BUFSIZE-1, stdin) == NULL) break; \ } \ } #define func_d2_d(funcStr, funcName) { \ while (startsWith(buf, funcStr " ")) { \ uint64_t u; \ sscanf(buf, funcStr " %" PRIx64, &u); \ *a0 = u2d(u); \ funcName<<<1, 1>>>(r2, a0); \ cudaDeviceSynchronize(); \ printf("%" PRIx64 " %" PRIx64 "\n", d2u(r2->x), d2u(r2->y)); \ fflush(stdout); \ if (fgets(buf, BUFSIZE-1, stdin) == NULL) break; \ } \ } #define func_d_d_d(funcStr, funcName) { \ while (startsWith(buf, funcStr " ")) { \ uint64_t u, v; \ sscanf(buf, funcStr " %" PRIx64 " %" PRIx64, &u, &v); \ *a0 = u2d(u); \ *a1 = u2d(v); \ funcName<<<1, 1>>>(r, a0, a1); \ cudaDeviceSynchronize(); \ printf("%" PRIx64 "\n", d2u(*r)); \ fflush(stdout); \ if (fgets(buf, BUFSIZE-1, stdin) == NULL) break; \ } \ } #define func_d_d_i(funcStr, funcName) { \ while (startsWith(buf, funcStr " ")) { \ uint64_t u, v; \ sscanf(buf, funcStr " %" PRIx64 " %" PRIx64, &u, &v); \ *a0 = u2d(u); \ *i0 = (int)u2d(v); \ funcName<<<1, 1>>>(r, a0, i0); \ cudaDeviceSynchronize(); \ printf("%" PRIx64 "\n", d2u(*r)); \ fflush(stdout); \ if (fgets(buf, BUFSIZE-1, stdin) == NULL) break; \ } \ } #define func_i_d(funcStr, funcName) { \ while (startsWith(buf, funcStr " ")) { \ uint64_t u; \ sscanf(buf, funcStr " %" PRIx64, &u); \ *a0 = u2d(u); \ funcName<<<1, 1>>>(i0, a0); \ cudaDeviceSynchronize(); \ printf("%d\n", *i0); \ fflush(stdout); \ if (fgets(buf, BUFSIZE-1, stdin) == NULL) break; \ } \ } // #define func_f_f(funcStr, funcName) { \ while (startsWith(buf, funcStr " ")) { \ uint32_t u; \ sscanf(buf, funcStr " %x", &u); \ *b0 = u2f(u); \ funcName<<<1, 1>>>(s, b0); \ cudaDeviceSynchronize(); \ printf("%x\n", f2u(*s)); \ fflush(stdout); \ if (fgets(buf, BUFSIZE-1, stdin) == NULL) break; \ } \ } #define func_f2_f(funcStr, funcName) { \ while (startsWith(buf, funcStr " ")) { \ uint32_t u; \ sscanf(buf, funcStr " %x", &u); \ *b0 = u2f(u); \ funcName<<<1, 1>>>(s2, b0); \ cudaDeviceSynchronize(); \ printf("%x %x\n", f2u(s2->x), f2u(s2->y)); \ fflush(stdout); \ if (fgets(buf, BUFSIZE-1, stdin) == NULL) break; \ } \ } #define func_f_f_f(funcStr, funcName) { \ while (startsWith(buf, funcStr " ")) { \ uint32_t u, v; \ sscanf(buf, funcStr " %x %x", &u, &v); \ *b0 = u2f(u); \ *b1 = u2f(v); \ funcName<<<1, 1>>>(s, b0, b1); \ cudaDeviceSynchronize(); \ printf("%x\n", f2u(*s)); \ fflush(stdout); \ if (fgets(buf, BUFSIZE-1, stdin) == NULL) break; \ } \ } // #define BUFSIZE 1024 int main(int argc, char **argv) { #if 0 cuInit(0); int ndevice; cuDeviceGetCount(&ndevice); if (ndevice == 0) { fprintf(stderr, "No cuda device available\n"); exit(0); } CUdevice device; char deviceName[1024]; cuDeviceGet(&device, 0); cuDeviceGetName(deviceName, 1000, device); fprintf(stderr, "Device : %s\n", deviceName); #endif cudaSetDeviceFlags(cudaDeviceScheduleSpin); vdouble2 *r2; vfloat2 *s2; double *r, *a0, *a1, *a2; float *s, *b0, *b1, *b2; int *i0; cudaMallocManaged(&r , 1*sizeof(double)); cudaMallocManaged(&r2, 1*sizeof(vdouble2)); cudaMallocManaged(&a0, 1*sizeof(double)); cudaMallocManaged(&a1, 1*sizeof(double)); cudaMallocManaged(&a2, 1*sizeof(double)); cudaMallocManaged(&s , 1*sizeof(float)); cudaMallocManaged(&s2, 1*sizeof(vfloat2)); cudaMallocManaged(&b0, 1*sizeof(float)); cudaMallocManaged(&b1, 1*sizeof(float)); cudaMallocManaged(&b2, 1*sizeof(float)); cudaMallocManaged(&i0, 1*sizeof(int)); printf("3\n"); fflush(stdout); char buf[BUFSIZE]; if (fgets(buf, BUFSIZE-1, stdin)) {} while(!feof(stdin)) { func_d_d("sin", xsin); func_d_d("cos", xcos); func_d_d("tan", xtan); func_d_d("asin", xasin); func_d_d("acos", xacos); func_d_d("atan", xatan); func_d_d("log", xlog); func_d_d("exp", xexp); func_d_d("sqrt", xsqrt); func_d_d("sqrt_u05", xsqrt_u05); func_d_d("sqrt_u35", xsqrt_u35); func_d_d("cbrt", xcbrt); func_d_d("cbrt_u1", xcbrt_u1); func_d_d("sinh", xsinh); func_d_d("cosh", xcosh); func_d_d("tanh", xtanh); func_d_d("sinh_u35", xsinh_u35); func_d_d("cosh_u35", xcosh_u35); func_d_d("tanh_u35", xtanh_u35); func_d_d("asinh", xasinh); func_d_d("acosh", xacosh); func_d_d("atanh", xatanh); func_d_d("sin_u1", xsin_u1); func_d_d("cos_u1", xcos_u1); func_d_d("tan_u1", xtan_u1); func_d_d("sinpi_u05", xsinpi_u05); func_d_d("cospi_u05", xcospi_u05); func_d_d("asin_u1", xasin_u1); func_d_d("acos_u1", xacos_u1); func_d_d("atan_u1", xatan_u1); func_d_d("log_u1", xlog_u1); func_d_d("exp2", xexp2); func_d_d("exp10", xexp10); func_d_d("exp2_u35", xexp2_u35); func_d_d("exp10_u35", xexp10_u35); func_d_d("expm1", xexpm1); func_d_d("log10", xlog10); func_d_d("log2", xlog2); func_d_d("log2_u35", xlog2_u35); func_d_d("log1p", xlog1p); func_d_d("fabs", xfabs); func_d_d("trunc", xtrunc); func_d_d("floor", xfloor); func_d_d("ceil", xceil); func_d_d("round", xround); func_d_d("rint", xrint); func_d_d("frfrexp", xfrfrexp); func_d_d("tgamma_u1", xtgamma_u1); func_d_d("lgamma_u1", xlgamma_u1); func_d_d("erf_u1", xerf_u1); func_d_d("erfc_u15", xerfc_u15); func_d2_d("sincos", xsincos); func_d2_d("sincos_u1", xsincos_u1); func_d2_d("sincospi_u35", xsincospi_u35); func_d2_d("sincospi_u05", xsincospi_u05); func_d2_d("modf", xmodf); func_d_d_d("pow", xpow); func_d_d_d("atan2", xatan2); func_d_d_d("atan2_u1", xatan2_u1); func_d_d_d("hypot_u05", xhypot_u05); func_d_d_d("hypot_u35", xhypot_u35); func_d_d_d("copysign", xcopysign); func_d_d_d("fmax", xfmax); func_d_d_d("fmin", xfmin); func_d_d_d("fdim", xfdim); func_d_d_d("nextafter", xnextafter); func_d_d_d("fmod", xfmod); func_d_d_d("remainder", xremainder); func_d_d_i("ldexp", xldexp); func_i_d("ilogb", xilogb); func_i_d("expfrexp", xexpfrexp); // func_f_f("sinf", xsinf); func_f_f("cosf", xcosf); func_f_f("tanf", xtanf); func_f_f("asinf", xasinf); func_f_f("acosf", xacosf); func_f_f("atanf", xatanf); func_f_f("logf", xlogf); func_f_f("expf", xexpf); func_f_f("sqrtf", xsqrtf); func_f_f("sqrtf_u05", xsqrtf_u05); func_f_f("sqrtf_u35", xsqrtf_u35); func_f_f("cbrtf", xcbrtf); func_f_f("cbrtf_u1", xcbrtf_u1); func_f_f("sinhf", xsinhf); func_f_f("coshf", xcoshf); func_f_f("tanhf", xtanhf); func_f_f("sinhf_u35", xsinhf_u35); func_f_f("coshf_u35", xcoshf_u35); func_f_f("tanhf_u35", xtanhf_u35); func_f_f("asinhf", xasinhf); func_f_f("acoshf", xacoshf); func_f_f("atanhf", xatanhf); func_f_f("sinf_u1", xsinf_u1); func_f_f("cosf_u1", xcosf_u1); func_f_f("tanf_u1", xtanf_u1); func_f_f("sinpif_u05", xsinpif_u05); func_f_f("cospif_u05", xcospif_u05); func_f_f("asinf_u1", xasinf_u1); func_f_f("acosf_u1", xacosf_u1); func_f_f("atanf_u1", xatanf_u1); func_f_f("logf_u1", xlogf_u1); func_f_f("exp2f", xexp2f); func_f_f("exp10f", xexp10f); func_f_f("exp2f_u35", xexp2f_u35); func_f_f("exp10f_u35", xexp10f_u35); func_f_f("expm1f", xexpm1f); func_f_f("log10f", xlog10f); func_f_f("log2f", xlog2f); func_f_f("log2f_u35", xlog2f_u35); func_f_f("log1pf", xlog1pf); func_f2_f("sincosf", xsincosf); func_f2_f("sincosf_u1", xsincosf_u1); func_f2_f("sincospif_u35", xsincospif_u35); func_f2_f("sincospif_u05", xsincospif_u05); func_f_f_f("powf", xpowf); func_f_f_f("atan2f", xatan2f); func_f_f_f("atan2f_u1", xatan2f_u1); func_f_f("fabsf", xfabsf); func_f_f("truncf", xtruncf); func_f_f("floorf", xfloorf); func_f_f("ceilf", xceilf); func_f_f("roundf", xroundf); func_f_f("rintf", xrintf); func_f_f("frfrexpf", xfrfrexpf); func_f_f_f("hypotf_u05", xhypotf_u05); func_f_f_f("hypotf_u35", xhypotf_u35); func_f_f_f("copysignf", xcopysignf); func_f_f_f("fmaxf", xfmaxf); func_f_f_f("fminf", xfminf); func_f_f_f("fdimf", xfdimf); func_f_f_f("nextafterf", xnextafterf); func_f_f_f("fmodf", xfmodf); func_f_f_f("remainderf", xremainderf); func_f2_f("modff", xmodff); func_f_f("tgammaf_u1", xtgammaf_u1); func_f_f("lgammaf_u1", xlgammaf_u1); func_f_f("erff_u1", xerff_u1); func_f_f("erfcf_u15", xerfcf_u15); func_f_f("fastsinf_u3500", xfastsinf_u3500); func_f_f("fastcosf_u3500", xfastcosf_u3500); func_f_f_f("fastpowf_u3500", xfastpowf_u3500); } return 0; }
the_stack
#include <pyre/journal.h> #include <isce3/core/Ellipsoid.h> #include <isce3/core/Projections.h> #include <isce3/cuda/container/RadarGeometry.h> #include <isce3/cuda/core/OrbitView.h> #include <isce3/cuda/core/gpuLUT2d.h> #include <isce3/cuda/core/gpuProjections.h> #include <isce3/cuda/except/Error.h> #include <isce3/cuda/geometry/gpuDEMInterpolator.h> #include <isce3/cuda/geometry/gpuGeometry.h> #include <isce3/except/Error.h> #include <isce3/geocode/loadDem.h> #include <isce3/geometry/DEMInterpolator.h> #include <isce3/product/GeoGridParameters.h> #include "Geocode.h" #include "MaskedMinMax.h" using isce3::core::Vec3; using isce3::io::Raster; using isce3::cuda::core::InterpolatorHandle; using DeviceOrbitView = isce3::cuda::core::OrbitView; using isce3::cuda::core::ProjectionBaseHandle; using namespace isce3::geometry::detail; template<typename T> using DeviceInterp = isce3::cuda::core::gpuInterpolator<T>; template<typename T> using DeviceLUT2d = isce3::cuda::core::gpuLUT2d<T>; namespace isce3::cuda::geocode { /** Coverts a batch of rows from input geogrid into radar coordinates. Outputs * the pixel-space coordinates (x,y) of each resulting (range, azimuth) pair * with respect to specified radargrid, as well as mask invalid pixels (out * of bounds or failed to converge in geo2rdr). * * \param[out] rdr_x pointer to device_vector of computed radar grid * x / slant range indices * \param[out] rdr_y pointer to device_vector of computed radar grid * y / azimuth time indices * \param[out] mask pointer to device_vector mask of valid * pixels. Mask follows numpy mask_array * convention where True is masked * \param[in] ellipsoid Ellipsoid based on output geogrid coordinate * system * \param[in] orbit Orbit associated with radar data * \param[in] dem DEM interpolator. Maybe be of different * coordinate system than output geogrid. * \param[in] doppler doppler * \param[in] wvl wavelength * \param[in] side look side * \param[in] geo2rdr_params geo2rdr params * \param[in] geogrid Geogrid defining output product/rasters * \param[in] radargrid Radar grid decribing rasters to be geocoded * \param[in] line_start Starting line of block * \param[in] block_size Number of elements in a block * \param[in] proj Projection used to covert geogrid XYZ to LLH * of output coordinate system */ __global__ void geoToRdrIndices(double* rdr_x, double* rdr_y, bool* mask, const isce3::core::Ellipsoid ellipsoid, const DeviceOrbitView orbit, isce3::cuda::geometry::gpuDEMInterpolator dem, const DeviceLUT2d<double> doppler, const double wvl, const isce3::core::LookSide side, const Geo2RdrParams geo2rdr_params, const isce3::product::GeoGridParameters geogrid, const RadarGridParams radargrid, const size_t line_start, const size_t block_size, isce3::cuda::core::ProjectionBase** proj) { // thread index (1d grid of 1d blocks) const auto tid = static_cast<size_t>(blockIdx.x) * blockDim.x + threadIdx.x; if (tid >= block_size) return; const size_t block_line = tid / geogrid.width(); const size_t pixel = tid % geogrid.width(); const size_t line = line_start + block_line; // x and y coordinates of the output geocoded grid const Vec3 xyz {geogrid.startX() + geogrid.spacingX() * (0.5 + pixel), geogrid.startY() + geogrid.spacingY() * (0.5 + line), 0.0}; Vec3 llh; (*proj)->inverse(xyz, llh); llh[2] = dem.interpolateLonLat(llh[0], llh[1]); // geo2rdr slant range double r; // geo2rdr azimuth time initial value double t = radargrid.sensing_mid; // returns 0 if geo2rdr converges else 1 int converged = isce3::cuda::geometry::geo2rdr(llh, ellipsoid, orbit, doppler, &t, &r, wvl, side, geo2rdr_params.threshold, geo2rdr_params.maxiter, geo2rdr_params.delta_range); // convert aztime and range to indices double y = (t - radargrid.sensing_start) * radargrid.prf; double x = (r - radargrid.starting_range) / radargrid.range_pxl_spacing; // check if indinces in bounds and set accordingly const bool not_in_rdr_grid = y < 0 || y >= radargrid.length || x < 0 || x >= radargrid.width; const bool invalid_index = not_in_rdr_grid || converged == 0; rdr_y[tid] = invalid_index ? 0.0 : y; rdr_x[tid] = invalid_index ? 0.0 : x; mask[tid] = invalid_index; } /** Interpolate radar block data to block indices calculated in geoToRdrIndices * * \param[out] geo_data_block pointer to device vector of geocoded data of * current block * \param[in] rdr_x pointer to device vector of radar grid x / az * time indices of current block * \param[in] rdr_y pointer to device vector of radar grid y / range * indices of current block * \param[in] mask pointer to device vector of a mask / valid * pixels of current block \param[in] rdr_data_block pointer to device vector * of radar data of current block \param[in] width width of * rdr_data_block \param[in] length length of rdr_data_block * \param[in] block_size number of elements in a block * \param[in] az_1st_line offset applied to az time indices to correctly * access current block * \param[in] range_1st_pixel offset applied to slant range indices to * correctly access current block \param[in] invalid_value value assigned to * invalid geogrid pixels \param[in] interp interpolator used to * interpolate radar data to specified geogrid */ template<class T> __global__ void interpolate(T* geo_data_block, const double* __restrict__ rdr_x, const double* __restrict__ rdr_y, const bool* __restrict__ mask, const T* __restrict__ rdr_data_block, const size_t width, const size_t length, const size_t block_size, const double az_1st_line, const double range_1st_pixel, const T invalid_value, DeviceInterp<T>** interp) { // thread index (1d grid of 1d blocks) const auto tid = static_cast<size_t>(blockIdx.x) * blockDim.x + threadIdx.x; if (tid >= block_size) return; auto rdry = rdr_y[tid] - az_1st_line; auto rdrx = rdr_x[tid] - range_1st_pixel; // check if indices are in bounds of radar block // add margin to account for possibly deficient interpolator boundary checks constexpr double extra_margin = 4.0; // magic number for SINC_HALF bool out_of_bounds = rdrx < extra_margin || rdry < extra_margin || rdrx >= width - extra_margin || rdry >= length - extra_margin; // default to invalid value. interpolate only if in bounds and not masked. T interp_val = invalid_value; if (!(out_of_bounds || mask[tid])) interp_val = (*interp)->interpolate( rdrx, rdry, rdr_data_block, width, length); geo_data_block[tid] = interp_val; } __host__ Geocode::Geocode(const isce3::product::GeoGridParameters & geogrid, const isce3::container::RadarGeometry & rdr_geom, const Raster & dem_raster, const double dem_margin, const size_t lines_per_block, const isce3::core::dataInterpMethod data_interp_method, const isce3::core::dataInterpMethod dem_interp_method, const double threshold, const int maxiter, const double dr, const float invalid_value) : _geogrid(geogrid), _rdr_geom(rdr_geom), _ellipsoid(isce3::core::makeProjection(_geogrid.epsg())->ellipsoid()), _lines_per_block(lines_per_block), _geo_block_length(_lines_per_block), _n_blocks((geogrid.length() + _lines_per_block -1) / _lines_per_block), _az_first_line(_rdr_geom.radarGrid().length() - 1), _az_last_line(0), _range_first_pixel(_rdr_geom.radarGrid().width() - 1), _range_last_pixel(0), _dem_raster(dem_raster), _dem_margin(dem_margin), _interp_float_handle(data_interp_method), _interp_cfloat_handle(data_interp_method), _interp_double_handle(data_interp_method), _interp_cdouble_handle(data_interp_method), _interp_unsigned_char_handle(data_interp_method), _proj_handle(geogrid.epsg()), _dem_interp_method(dem_interp_method) { // init light weight radar grid _radar_grid.sensing_start = _rdr_geom.radarGrid().sensingStart(); _radar_grid.sensing_mid = _rdr_geom.radarGrid().sensingMid(); _radar_grid.prf = _rdr_geom.radarGrid().prf(); _radar_grid.starting_range = _rdr_geom.radarGrid().startingRange(); _radar_grid.range_pxl_spacing = _rdr_geom.radarGrid().rangePixelSpacing(); _radar_grid.length = _rdr_geom.gridLength(); _radar_grid.width = _rdr_geom.gridWidth(); // Determine max number of elements per block. Last block to be processed // may not contain the max number of elements. auto n_elem = _lines_per_block * _geogrid.width(); // Assign geo2rdr parameter values _geo2rdr_params.threshold = threshold; _geo2rdr_params.maxiter = maxiter; _geo2rdr_params.delta_range = dr; // Resize all device vectors to max block size. _radar_x.resize(n_elem); _radar_y.resize(n_elem); _mask.resize(n_elem); if (std::isnan(invalid_value)) { _invalid_float = std::numeric_limits<float>::quiet_NaN(); _invalid_double = std::numeric_limits<double>::quiet_NaN(); _invalid_unsigned_char = 255; } else { _invalid_float = invalid_value; _invalid_double = static_cast<double>(invalid_value); _invalid_unsigned_char = static_cast<unsigned char>(invalid_value); } } void Geocode::setBlockRdrCoordGrid(const size_t block_number) { // make sure block index does not exceed actual number of blocks if (block_number >= _n_blocks) { throw isce3::except::DomainError( ISCE_SRCINFO(), "block number exceeds max number of blocks"); } // Get block extents (of the geocoded grid) _line_start = block_number * _lines_per_block; // Set block sizes for everything but last block size_t block_size = _geo_block_length * _geogrid.width(); _geo_block_length = _lines_per_block; // Adjust for last block sized assuming it is sized differently than others if (block_number == (_n_blocks - 1)) { _geo_block_length = _geogrid.length() - _line_start; block_size = _geo_block_length * _geogrid.width(); } // Resize blocks accordingly _radar_x.resize(block_size); _radar_y.resize(block_size); _mask.resize(block_size); // prepare device DEMInterpolator isce3::geometry::DEMInterpolator host_dem_interp = isce3::geocode::loadDEM( _dem_raster, _geogrid, _line_start, _geo_block_length, _geogrid.width(), _dem_margin, _dem_interp_method); isce3::cuda::geometry::gpuDEMInterpolator dev_dem_interp(host_dem_interp); dev_dem_interp.initProjInterp(); // copy RadarGeometry to device isce3::cuda::container::RadarGeometry dev_rdr_geom(_rdr_geom); // Create geogrid on device { const unsigned threads_per_block = 256; const unsigned n_blocks = (block_size + threads_per_block - 1) / threads_per_block; geoToRdrIndices<<<n_blocks, threads_per_block>>>(_radar_x.data().get(), _radar_y.data().get(), _mask.data().get(), _ellipsoid, dev_rdr_geom.orbit(), dev_dem_interp, dev_rdr_geom.doppler(), dev_rdr_geom.wavelength(), dev_rdr_geom.lookSide(), _geo2rdr_params, _geogrid, _radar_grid, _line_start, block_size, _proj_handle.get_proj()); checkCudaErrors(cudaPeekAtLastError()); checkCudaErrors(cudaDeviceSynchronize()); } // find index of min and max values in x/range const auto [rdr_x_min, rdr_x_max] = masked_minmax(_radar_x, _mask); _range_first_pixel = std::min(_rdr_geom.radarGrid().width() - 1, static_cast<size_t>(std::floor(rdr_x_min))); _range_last_pixel = std::max(static_cast<size_t>(0), static_cast<size_t>(std::ceil(rdr_x_max) - 1)); // find index of min and max values in y/azimuth const auto [rdr_y_min, rdr_y_max] = masked_minmax(_radar_y, _mask); _az_first_line = std::min(_rdr_geom.radarGrid().length() - 1, static_cast<size_t>(std::floor(rdr_y_min))); _az_last_line = std::max(static_cast<size_t>(0), static_cast<size_t>(std::ceil(rdr_y_max) - 1)); // check if block entirely masked bool all_masked = std::isnan(rdr_y_min); // if mask not entirely masked, then set non zero dimensions _rdr_block_length = all_masked ? 0 : _az_last_line - _az_first_line + 1; _rdr_block_width = all_masked ? 0 : _range_last_pixel - _range_first_pixel + 1; pyre::journal::debug_t debug( "isce.cuda.geocode.Geocode.setBlockRdrCoordGrid"); if (all_masked) { debug << block_number << " is out of bounds. calls geocodeRasterBlock will \ not geocode." << pyre::journal::endl; } } template<class T> void Geocode::geocodeRasterBlock(Raster& output_raster, Raster& input_raster) { // determine number of elements in output vector const auto n_elem_out = _geo_block_length * _geogrid.width(); // determine by type, interp and invalid value DeviceInterp<T>** interp; T invalid_value; if constexpr (std::is_same_v<T, float>) { interp = _interp_float_handle.getInterp(); invalid_value = _invalid_float; } else if constexpr (std::is_same_v<T, thrust::complex<float>>) { interp = _interp_cfloat_handle.getInterp(); invalid_value = thrust::complex<float>(_invalid_float, _invalid_float); } else if constexpr (std::is_same_v<T, double>) { interp = _interp_double_handle.getInterp(); invalid_value = _invalid_double; } else if constexpr (std::is_same_v<T, thrust::complex<double>>) { interp = _interp_cdouble_handle.getInterp(); invalid_value = thrust::complex<double>(_invalid_double, _invalid_double); } else if constexpr (std::is_same_v<T, unsigned char>) { interp = _interp_unsigned_char_handle.getInterp(); invalid_value = _invalid_unsigned_char; } // 0 width indicates current block is out of bounds if (_rdr_block_width == 0) { // set entire block to invalid value thrust::host_vector<T> h_geo_data_block = thrust::host_vector<T>(n_elem_out, invalid_value); output_raster.setBlock(&h_geo_data_block[0], 0, _line_start, _geogrid.width(), _geo_block_length, 1); pyre::journal::debug_t debug( "isce.cuda.geocode.Geocode.geocodeRasterBlock"); debug << "Unable to geocode raster due to block being out of bounds." << pyre::journal::endl; return; } // load raster block on host const auto n_elem_in = _rdr_block_length * _rdr_block_width; thrust::host_vector<T> h_rdr_data_block(n_elem_in); input_raster.getBlock(&h_rdr_data_block[0], _range_first_pixel, _az_first_line, _rdr_block_width, _rdr_block_length, 1); // copy input raster block to device thrust::device_vector<T> d_rdr_data_block = h_rdr_data_block; // prepare output geocode raster block thrust::device_vector<T> d_geo_data_block(n_elem_out); // Perform interpolation on device { const unsigned threads_per_block = 256; const unsigned n_blocks = (n_elem_out + threads_per_block - 1) / threads_per_block; interpolate<<<n_blocks, threads_per_block>>>( d_geo_data_block.data().get(), _radar_x.data().get(), _radar_y.data().get(), _mask.data().get(), d_rdr_data_block.data().get(), _rdr_block_width, _rdr_block_length, n_elem_out, static_cast<double>(_az_first_line), static_cast<double>(_range_first_pixel), invalid_value, interp); checkCudaErrors(cudaPeekAtLastError()); checkCudaErrors(cudaDeviceSynchronize()); } // set output raster block from device thrust::host_vector<T> h_geo_data_block = d_geo_data_block; output_raster.setBlock(&h_geo_data_block[0], 0, _line_start, _geogrid.width(), _geo_block_length, 1); } #define EXPLICIT_INSTATIATION(T) \ template void Geocode::geocodeRasterBlock<T>( \ Raster & output_raster, Raster & input_raster); EXPLICIT_INSTATIATION(float); EXPLICIT_INSTATIATION(thrust::complex<float>); EXPLICIT_INSTATIATION(double); EXPLICIT_INSTATIATION(thrust::complex<double>); EXPLICIT_INSTATIATION(unsigned char); } // namespace isce3::cuda::geocode
the_stack
namespace anakin { namespace saber { /** * @brief reduce tensor acorrding to the given reduce dim. * e.g. * input tensor with shape [5, 2, 10, 4] (rank = 4, how many dimentions does a tensor have.) * and the reduce dim may have the following forms: * 1) reduce_dim = None, no reduce dim. It means that reduce all dimentions [default] * output's shape [1, 1, 1, 1]. * 2) reduce_dim = x, x is the dimention we want to reduce. * output's shape: * x = 0, for example, the shape will be [1, 2, 10, 4] if keep_dim is true, otherwise it will be [2*10*4, 1, 1, 1]. * x = 2, for example, the shape will be [5, 2, 1, 4] if keep_dim is true, otherwise it will be [5*2*4, 1, 1, 1]. * and so on. * 3) reduce_dim = [x, y], It will reduce two dimetions x and y. * output's shape: * reduce_dim = [0, 1], for example, the shape will be [1, 1, 10 ,4] or [10*4, 1, 1, 1] and so on. * Notes: * if reduce_dim[i] < 0: * do * reduce_dim[i] += rank. * * @tparam OpDtype * @param inputs * @param outputs * @param param * @return SaberStatus */ //This function is used to implement atioMin based on CAS function. // __device__ float atomicMin(float* address, float val) { // unsigned long long int* address_as_ull = (unsigned long long int*)address; // unsigned long long int old = *address_as_ull, assumed; // do{ // assumed = old; // old = atomicCAS(address_as_ull, assumed, __float_as_longlong( // fminf(val, __longlong_as_float(assumed)))); // }while(assumed != old); // return __longlong_as_float(old); // } __device__ double atomicMin(double* address, double val) { unsigned long long int* address_as_ull = (unsigned long long int*)address; unsigned long long int old = *address_as_ull, assumed; do{ assumed = old; old = atomicCAS(address_as_ull, assumed, __double_as_longlong( fmin(val, __longlong_as_double(assumed)))); }while(assumed != old); return __longlong_as_double(old); } __device__ double atomicMin(float* address, float val) { unsigned long long int* address_as_ull = (unsigned long long int*)address; unsigned long long int old = *address_as_ull, assumed; do{ assumed = old; old = atomicCAS(address_as_ull, assumed, __float_as_int( fminf(val, __int_as_float(assumed)))); }while(assumed != old); return __longlong_as_double(old); } //thread num: CHW template <typename dtype> __global__ void kernel_reduce_n(const dtype* src, dtype* dst, const int num_in, const int channel_in, const int height_in, const int width_in, const int count) { int tid = threadIdx.x + blockIdx.x * blockDim.x; int thread_num = blockDim.x * gridDim.x; int feature_map = height_in * width_in; //HW int size = channel_in * feature_map;// CHW int c_id = tid / feature_map; int feature_map_inner_index = tid % feature_map; dtype min = src[tid]; for (int n = 1; n < num_in; ++n) { dtype tmp = src[n * size + c_id * feature_map + feature_map_inner_index]; min = tmp < min ? tmp : min; } dst[tid] = min; } //thread num:NHW template <typename dtype> __global__ void kernel_reduce_c(const dtype* src, dtype* dst, const int num_in, const int channel_in, const int height_in, const int width_in, const int count) { int tid = threadIdx.x + blockIdx.x * blockDim.x; int thread_num = blockDim.x * gridDim.x; int feature_map = height_in * width_in; int size = channel_in * feature_map; for (int i = tid; i < count; i += thread_num) { int n_id = i / feature_map; int inner_index = i % feature_map; dtype min = src[n_id * size + inner_index]; for (int c = 1; c < channel_in; ++c) { dtype tmp = src[n_id * size + c * feature_map + inner_index]; min = tmp < min? tmp : min; } dst[n_id * feature_map + inner_index] = min; // Is data_index same to tid/i?. } } //thread num: NCW template <typename dtype> __global__ void kernel_reduce_h(const dtype* src, dtype* dst, const int num_in, const int channel_in, const int height_in, const int width_in, const int count) { int tid = threadIdx.x + blockIdx.x * blockDim.x; int thread_num = blockDim.x * gridDim.x; int feature_map = height_in * width_in; //CW int cw_size = channel_in * width_in; //CW int size = channel_in * feature_map; //CHW for (int i = tid; i < count; i += thread_num) { int n_id = i / cw_size; int c_id = (i / width_in) % channel_in; int inner_index = i % width_in; int data_index = n_id * size + c_id * feature_map + inner_index; dtype min = src[data_index]; for (int h = 1; h < height_in; ++h) { dtype tmp = src[data_index + h * width_in]; min = tmp < min? tmp : min; } dst[n_id * cw_size + c_id * width_in + inner_index] = min; // Is data_index same to tid/i?. } } //thread num: NCH template <typename dtype> __global__ void kernel_reduce_w(const dtype* src, dtype* dst, const int num_in, const int channel_in, const int height_in, const int width_in, const int count) { int tid = threadIdx.x + blockIdx.x * blockDim.x; int thread_num = blockDim.x * gridDim.x; int ch_size = channel_in * height_in; //CH int size = ch_size * width_in; //CHW int feature_map = height_in * width_in; //HW for (int i = tid; i < count; i += thread_num) { int n_id = i / ch_size; int c_id = (i / height_in) % channel_in; int inner_index = i % height_in; int data_index = n_id * size + c_id * feature_map + inner_index * width_in; dtype min = src[data_index]; for (int w = 1; w < width_in; ++w) { dtype tmp = src[data_index + w]; min = tmp < min? tmp : min; } dst[n_id * ch_size + c_id * height_in + inner_index] = min; } } //reduce all. template <typename dtype> __global__ void kernel_reduce_nchw(const dtype* src, dtype* dst, const int count) { int n_id = threadIdx.x + blockIdx.x * blockDim.x; int tid = threadIdx.x; int thread_num = blockDim.x * gridDim.x; dst[0] = src[n_id]; extern __shared__ dtype s[]; dtype min = src[n_id]; for (int i = n_id; i < count; i += thread_num) { min = src[i] < min ? src[i] : min; } s[tid] = min; __syncthreads(); int powOf2 = blockDim.x; if (powOf2 & (powOf2 - 1)) { //block threads are not pow of 2. while (powOf2 & (powOf2 - 1)) { powOf2 &= powOf2 - 1; } // it'll end when it find pow of 2. if (tid >= powOf2) { s[tid - powOf2] = s[tid - powOf2] < s[tid]? s[tid - powOf2] : s[tid]; } __syncthreads(); } for (int i = powOf2>>1; i > 0; i>>=1) { if (tid < i) { s[tid] = s[tid] < s[tid + i]? s[tid] : s[tid + i]; } __syncthreads(); } if (threadIdx.x == 0) { //double tmp = s[] atomicMin(&dst[0], s[threadIdx.x]); } } template <DataType OpDtype> SaberStatus SaberReduceMin<NV, OpDtype>::dispatch(const std::vector<Tensor<NV>*>& inputs, std::vector<Tensor<NV>*>& outputs, ReduceMinParam<NV>& param) { cudaStream_t cuda_stream = this->_ctx->get_compute_stream(); const OpDataType* input_ptr = (const OpDataType*)inputs[0]->data(); OpDataType* output_ptr = (OpDataType*)outputs[0]->mutable_data(); int count = outputs[0]->valid_size(); if (_reduce_dim.empty()) { // reduce_all int count_all = inputs[0]->valid_size(); int grid, thread_num; if (count_all < CUDA_NUM_THREADS) { thread_num = count_all; grid = 1; }else { thread_num = CUDA_NUM_THREADS; if (CUDA_GET_BLOCKS(count) >= 128) //This is to avoid share memory blowing up. grid = 64; else grid = CUDA_GET_BLOCKS(count); } int sharedSize = thread_num * 4; kernel_reduce_nchw<OpDataType><<<grid, thread_num, sharedSize, cuda_stream>>>( input_ptr, output_ptr, count_all); }else if (_reduce_dim.size() == 1) { if (_reduce_dim[0] == 0) { //reduce n kernel_reduce_n<OpDataType><<<CUDA_GET_BLOCKS(count), CUDA_NUM_THREADS, 0, cuda_stream>>>( input_ptr, output_ptr, _num, _channel, _height, _width, count); } if (_reduce_dim[0] == 1) { //reduce c kernel_reduce_c<OpDataType><<<CUDA_GET_BLOCKS(count), CUDA_NUM_THREADS, 0, cuda_stream>>>( input_ptr, output_ptr, _num, _channel, _height, _width, count); } if (_reduce_dim[0] == 2) { //reduce h kernel_reduce_h<OpDataType><<<CUDA_GET_BLOCKS(count), CUDA_NUM_THREADS, 0, cuda_stream>>>( input_ptr, output_ptr, _num, _channel, _height, _width, count); } if (_reduce_dim[0] == 3) { //reduce h kernel_reduce_w<OpDataType><<<CUDA_GET_BLOCKS(count), CUDA_NUM_THREADS, 0, cuda_stream>>>( input_ptr, output_ptr, _num, _channel, _height, _width, count); } } else if (_reduce_dim.size() == 2) { //only consecutive reduce dim? [0,1] [1, 2], not [0, 2]? if (_reduce_dim[0] == 0 && _reduce_dim[1] == 1) { //reduce n, c. reduce n first. _tensor_tmp.reshape(std::vector<int>({1, _channel, _height, _width})); int count_n = _tensor_tmp.valid_size(); int count_nc = count_n / _tensor_tmp.channel(); OpDataType* tmp_out = (OpDataType*)_tensor_tmp.mutable_data(); kernel_reduce_n<OpDataType><<<CUDA_GET_BLOCKS(count_n), CUDA_NUM_THREADS, 0, cuda_stream>>>( input_ptr, tmp_out, _num, _channel, _height, _width, count_n); kernel_reduce_c<OpDataType><<<CUDA_GET_BLOCKS(count_nc), CUDA_NUM_THREADS, 0, cuda_stream>>>( tmp_out, output_ptr, 1, _channel, _height, _width, count_nc); }else if (_reduce_dim[0] == 1 && _reduce_dim[1] == 2) { //reduce c. h. reduce c first. _tensor_tmp.reshape(std::vector<int>({_num, 1, _height, _width})); int count_c = _tensor_tmp.valid_size(); int count_ch = count_c / _tensor_tmp.height(); OpDataType* tmp_out = (OpDataType*)_tensor_tmp.mutable_data(); kernel_reduce_c<OpDataType><<<CUDA_GET_BLOCKS(count_c), CUDA_NUM_THREADS, 0, cuda_stream>>>( input_ptr, tmp_out, _num, _channel, _height, _width, count_c); kernel_reduce_h<OpDataType><<<CUDA_GET_BLOCKS(count_ch), CUDA_NUM_THREADS, 0, cuda_stream>>>( tmp_out, output_ptr, _num, 1, _height, _width, count_ch); }else if (_reduce_dim[0] == 2 && _reduce_dim[1] == 3) { //reduce h, w. reduce h first. _tensor_tmp.reshape(std::vector<int>({_num, _channel, 1, _width})); int count_h = _tensor_tmp.valid_size(); int count_hw = count_h / _tensor_tmp.width(); OpDataType* tmp_out = (OpDataType*)_tensor_tmp.mutable_data(); kernel_reduce_h<OpDataType><<<CUDA_GET_BLOCKS(count_h), CUDA_NUM_THREADS, 0, cuda_stream>>>( input_ptr, tmp_out, _num, _channel, _height, _width, count_h); kernel_reduce_w<OpDataType><<<CUDA_GET_BLOCKS(count_hw), CUDA_NUM_THREADS, 0, cuda_stream>>>( tmp_out, output_ptr, _num, _channel, 1, _width, count_hw); }else { LOG(FATAL) <<"[reduce_min] invalid reduce_dim!!!"; } }else { LOG(FATAL) << "[reduce_min]Reducing size over than 2 is not support!!"; } CUDA_POST_KERNEL_CHECK; return SaberSuccess; } template class SaberReduceMin<NV, AK_FLOAT>; DEFINE_OP_TEMPLATE(SaberReduceMin, ReduceMinParam, NV, AK_HALF); DEFINE_OP_TEMPLATE(SaberReduceMin, ReduceMinParam, NV, AK_INT8); } // namespace saber. } // namespace anakin.
the_stack
///////////////////////////////////////////////////////////////////////////////////////////////// TEST(SM50_batched_gemv, 1x64x64x1_1x64x4x1_1x4x4x1_rcr_fp32_fp32) { cutlass::gemm::BatchedGemmCoord problem_size(1, 64, 64, 1); using ThreadBlockShape = cutlass::gemm::GemmShape<1, 64, 4>; using ThreadShape = cutlass::gemm::GemmShape<1, 4, 4>; static int const kBatchTileSize = 1; test::gemm::kernel::batched_gemv_kernel_test< ThreadBlockShape, ThreadShape, float, float, float, cutlass::layout::RowMajor, cutlass::layout::ColumnMajor, cutlass::layout::RowMajor, kBatchTileSize>(problem_size); } TEST(SM50_batched_gemv, 1x64x64x4_1x64x4x2_1x4x4x2_rcr_fp32_fp32) { cutlass::gemm::BatchedGemmCoord problem_size(1, 64, 64, 4); using ThreadBlockShape = cutlass::gemm::GemmShape<1, 64, 4>; using ThreadShape = cutlass::gemm::GemmShape<1, 4, 4>; static int const kBatchTileSize = 2; test::gemm::kernel::batched_gemv_kernel_test< ThreadBlockShape, ThreadShape, float, float, float, cutlass::layout::RowMajor, cutlass::layout::ColumnMajor, cutlass::layout::RowMajor, kBatchTileSize>(problem_size); } TEST(SM50_batched_gemv, 1x256x256x64_1x64x4x8_1x4x4x8_rcr_fp32_fp32) { cutlass::gemm::BatchedGemmCoord problem_size(1, 256, 256, 64); using ThreadBlockShape = cutlass::gemm::GemmShape<1, 64, 4>; using ThreadShape = cutlass::gemm::GemmShape<1, 4, 4>; static int const kBatchTileSize = 8; test::gemm::kernel::batched_gemv_kernel_test< ThreadBlockShape, ThreadShape, float, float, float, cutlass::layout::RowMajor, cutlass::layout::ColumnMajor, cutlass::layout::RowMajor, kBatchTileSize>(problem_size); } TEST(SM50_batched_gemv, 1x7x256x4096_1x8x4x64_1x1x4x64_rcr_fp32_fp32) { cutlass::gemm::BatchedGemmCoord problem_size(1, 7, 256, 4096); using ThreadBlockShape = cutlass::gemm::GemmShape<1, 8, 4>; using ThreadShape = cutlass::gemm::GemmShape<1, 1, 4>; static int const kBatchTileSize = 64; test::gemm::kernel::batched_gemv_kernel_test< ThreadBlockShape, ThreadShape, float, float, float, cutlass::layout::RowMajor, cutlass::layout::ColumnMajor, cutlass::layout::RowMajor, kBatchTileSize>(problem_size); } TEST(SM50_batched_gemv, 1x64x27x4096_1x8x1x64_1x1x1x64_rcr_alpha_fp32_fp32) { cutlass::gemm::BatchedGemmCoord problem_size(1, 64, 27, 4096); using ThreadBlockShape = cutlass::gemm::GemmShape<1, 8, 1>; using ThreadShape = cutlass::gemm::GemmShape<1, 1, 1>; static int const kBatchTileSize = 64; test::gemm::kernel::batched_gemv_kernel_test< ThreadBlockShape, ThreadShape, float, float, float, cutlass::layout::RowMajor, cutlass::layout::ColumnMajor, cutlass::layout::RowMajor, kBatchTileSize>(problem_size, -0.5f); } TEST(SM50_batched_gemv, 1x64x27x4096_1x8x1x64_1x1x1x64_rcr_alpha_beta_fp32_fp32) { cutlass::gemm::BatchedGemmCoord problem_size(1, 64, 27, 4096); using ThreadBlockShape = cutlass::gemm::GemmShape<1, 8, 1>; using ThreadShape = cutlass::gemm::GemmShape<1, 1, 1>; static int const kBatchTileSize = 64; test::gemm::kernel::batched_gemv_kernel_test< ThreadBlockShape, ThreadShape, float, float, float, cutlass::layout::RowMajor, cutlass::layout::ColumnMajor, cutlass::layout::RowMajor, kBatchTileSize>(problem_size, 4.5f, -0.5f); } TEST(SM50_batched_gemv, 1x64x24x4096_1x8x4x64_1x1x4x64_rcr_alpha_beta_fp16_fp16) { cutlass::gemm::BatchedGemmCoord problem_size(1, 64, 24, 4096); using ThreadBlockShape = cutlass::gemm::GemmShape<1, 8, 4>; using ThreadShape = cutlass::gemm::GemmShape<1, 1, 4>; static int const kBatchTileSize = 64; test::gemm::kernel::batched_gemv_kernel_test< ThreadBlockShape, ThreadShape, cutlass::half_t, float, cutlass::half_t, cutlass::layout::RowMajor, cutlass::layout::ColumnMajor, cutlass::layout::RowMajor, kBatchTileSize>(problem_size, cutlass::half_t(4.5f), cutlass::half_t(-0.5f)); } /// TEST(SM50_batched_gemv, 1x64x64x1_1x64x4x1_1x4x4x1_rcr_fp16_fp32) { cutlass::gemm::BatchedGemmCoord problem_size(1, 64, 64, 1); using ThreadBlockShape = cutlass::gemm::GemmShape<1, 64, 4>; using ThreadShape = cutlass::gemm::GemmShape<1, 4, 4>; static int const kBatchTileSize = 1; test::gemm::kernel::batched_gemv_kernel_test< ThreadBlockShape, ThreadShape, cutlass::half_t, float, float, cutlass::layout::RowMajor, cutlass::layout::ColumnMajor, cutlass::layout::RowMajor, kBatchTileSize>(problem_size); } TEST(SM50_batched_gemv, 1x64x64x4_1x64x4x2_1x4x4x2_rcr_fp16_fp32) { cutlass::gemm::BatchedGemmCoord problem_size(1, 64, 64, 4); using ThreadBlockShape = cutlass::gemm::GemmShape<1, 64, 4>; using ThreadShape = cutlass::gemm::GemmShape<1, 4, 4>; static int const kBatchTileSize = 2; test::gemm::kernel::batched_gemv_kernel_test< ThreadBlockShape, ThreadShape, cutlass::half_t, float, float, cutlass::layout::RowMajor, cutlass::layout::ColumnMajor, cutlass::layout::RowMajor, kBatchTileSize>(problem_size); } TEST(SM50_batched_gemv, 1x256x256x64_1x64x4x8_1x4x4x8_rcr_fp16_fp32) { cutlass::gemm::BatchedGemmCoord problem_size(1, 256, 256, 64); using ThreadBlockShape = cutlass::gemm::GemmShape<1, 64, 4>; using ThreadShape = cutlass::gemm::GemmShape<1, 4, 4>; static int const kBatchTileSize = 8; test::gemm::kernel::batched_gemv_kernel_test< ThreadBlockShape, ThreadShape, cutlass::half_t, float, float, cutlass::layout::RowMajor, cutlass::layout::ColumnMajor, cutlass::layout::RowMajor, kBatchTileSize>(problem_size); } TEST(SM50_batched_gemv, 1x7x256x4096_1x8x4x64_1x1x4x64_rcr_fp16_fp32) { cutlass::gemm::BatchedGemmCoord problem_size(1, 7, 256, 4096); using ThreadBlockShape = cutlass::gemm::GemmShape<1, 8, 4>; using ThreadShape = cutlass::gemm::GemmShape<1, 1, 4>; static int const kBatchTileSize = 64; test::gemm::kernel::batched_gemv_kernel_test< ThreadBlockShape, ThreadShape, cutlass::half_t, float, float, cutlass::layout::RowMajor, cutlass::layout::ColumnMajor, cutlass::layout::RowMajor, kBatchTileSize>(problem_size); } /// TEST(SM50_batched_gemv, 1x64x64x1_1x64x4x1_1x4x4x1_rcr_fp16_fp16) { cutlass::gemm::BatchedGemmCoord problem_size(1, 64, 64, 1); using ThreadBlockShape = cutlass::gemm::GemmShape<1, 64, 4>; using ThreadShape = cutlass::gemm::GemmShape<1, 4, 4>; static int const kBatchTileSize = 1; test::gemm::kernel::batched_gemv_kernel_test< ThreadBlockShape, ThreadShape, cutlass::half_t, float, cutlass::half_t, cutlass::layout::RowMajor, cutlass::layout::ColumnMajor, cutlass::layout::RowMajor, kBatchTileSize>(problem_size); } TEST(SM50_batched_gemv, 1x64x64x4_1x64x4x2_1x4x4x2_rcr_fp16_fp16) { cutlass::gemm::BatchedGemmCoord problem_size(1, 64, 64, 4); using ThreadBlockShape = cutlass::gemm::GemmShape<1, 64, 4>; using ThreadShape = cutlass::gemm::GemmShape<1, 4, 4>; static int const kBatchTileSize = 2; test::gemm::kernel::batched_gemv_kernel_test< ThreadBlockShape, ThreadShape, cutlass::half_t, float, cutlass::half_t, cutlass::layout::RowMajor, cutlass::layout::ColumnMajor, cutlass::layout::RowMajor, kBatchTileSize>(problem_size); } TEST(SM50_batched_gemv, 1x256x256x64_1x64x4x8_1x4x4x8_rcr_fp16_fp16) { cutlass::gemm::BatchedGemmCoord problem_size(1, 256, 256, 64); using ThreadBlockShape = cutlass::gemm::GemmShape<1, 64, 4>; using ThreadShape = cutlass::gemm::GemmShape<1, 4, 4>; static int const kBatchTileSize = 8; test::gemm::kernel::batched_gemv_kernel_test< ThreadBlockShape, ThreadShape, cutlass::half_t, float, cutlass::half_t, cutlass::layout::RowMajor, cutlass::layout::ColumnMajor, cutlass::layout::RowMajor, kBatchTileSize>(problem_size); } TEST(SM50_batched_gemv, 1x7x256x4096_1x8x4x64_1x1x4x64_rcr_fp16_fp16) { cutlass::gemm::BatchedGemmCoord problem_size(1, 7, 256, 4096); using ThreadBlockShape = cutlass::gemm::GemmShape<1, 8, 4>; using ThreadShape = cutlass::gemm::GemmShape<1, 1, 4>; static int const kBatchTileSize = 64; test::gemm::kernel::batched_gemv_kernel_test< ThreadBlockShape, ThreadShape, cutlass::half_t, float, cutlass::half_t, cutlass::layout::RowMajor, cutlass::layout::ColumnMajor, cutlass::layout::RowMajor, kBatchTileSize>(problem_size); } /// TEST(SM50_batched_gemv, 1x64x64x1_1x64x4x1_1x4x4x1_rcr_i8_i32) { cutlass::gemm::BatchedGemmCoord problem_size(1, 64, 64, 1); using ThreadBlockShape = cutlass::gemm::GemmShape<1, 64, 4>; using ThreadShape = cutlass::gemm::GemmShape<1, 4, 4>; static int const kBatchTileSize = 1; test::gemm::kernel::batched_gemv_kernel_test< ThreadBlockShape, ThreadShape, int8_t, int32_t, int32_t, cutlass::layout::RowMajor, cutlass::layout::ColumnMajor, cutlass::layout::RowMajor, kBatchTileSize>(problem_size); } TEST(SM50_batched_gemv, 1x64x64x4_1x64x4x2_1x4x4x2_rcr_i8_i32) { cutlass::gemm::BatchedGemmCoord problem_size(1, 64, 64, 4); using ThreadBlockShape = cutlass::gemm::GemmShape<1, 64, 4>; using ThreadShape = cutlass::gemm::GemmShape<1, 4, 4>; static int const kBatchTileSize = 2; test::gemm::kernel::batched_gemv_kernel_test< ThreadBlockShape, ThreadShape, int8_t, int32_t, int32_t, cutlass::layout::RowMajor, cutlass::layout::ColumnMajor, cutlass::layout::RowMajor, kBatchTileSize>(problem_size); } TEST(SM50_batched_gemv, 1x256x256x64_1x64x4x8_1x4x4x8_rcr_i8_i32) { cutlass::gemm::BatchedGemmCoord problem_size(1, 256, 256, 64); using ThreadBlockShape = cutlass::gemm::GemmShape<1, 64, 4>; using ThreadShape = cutlass::gemm::GemmShape<1, 4, 4>; static int const kBatchTileSize = 8; test::gemm::kernel::batched_gemv_kernel_test< ThreadBlockShape, ThreadShape, int8_t, int32_t, int32_t, cutlass::layout::RowMajor, cutlass::layout::ColumnMajor, cutlass::layout::RowMajor, kBatchTileSize>(problem_size); } TEST(SM50_batched_gemv, 1x7x256x4096_1x8x4x64_1x1x4x64_rcr_i8_i32) { cutlass::gemm::BatchedGemmCoord problem_size(1, 7, 256, 4096); using ThreadBlockShape = cutlass::gemm::GemmShape<1, 8, 4>; using ThreadShape = cutlass::gemm::GemmShape<1, 1, 4>; static int const kBatchTileSize = 64; test::gemm::kernel::batched_gemv_kernel_test< ThreadBlockShape, ThreadShape, int8_t, int32_t, int32_t, cutlass::layout::RowMajor, cutlass::layout::ColumnMajor, cutlass::layout::RowMajor, kBatchTileSize>(problem_size); } ///////////// TEST(SM50_batched_gemv, 1x64x64x1_1x64x4x1_1x4x4x1_crc_fp32_fp32) { cutlass::gemm::BatchedGemmCoord problem_size(1, 64, 64, 1); using ThreadBlockShape = cutlass::gemm::GemmShape<1, 64, 4>; using ThreadShape = cutlass::gemm::GemmShape<1, 4, 4>; static int const kBatchTileSize = 1; test::gemm::kernel::batched_gemv_kernel_test< ThreadBlockShape, ThreadShape, float, float, float, cutlass::layout::ColumnMajor, cutlass::layout::RowMajor, cutlass::layout::ColumnMajor, kBatchTileSize>(problem_size); } TEST(SM50_batched_gemv, 1x64x64x4_1x64x4x2_1x4x4x2_crc_fp32_fp32) { cutlass::gemm::BatchedGemmCoord problem_size(1, 64, 64, 4); using ThreadBlockShape = cutlass::gemm::GemmShape<1, 64, 4>; using ThreadShape = cutlass::gemm::GemmShape<1, 4, 4>; static int const kBatchTileSize = 2; test::gemm::kernel::batched_gemv_kernel_test< ThreadBlockShape, ThreadShape, float, float, float, cutlass::layout::ColumnMajor, cutlass::layout::RowMajor, cutlass::layout::ColumnMajor, kBatchTileSize>(problem_size); } TEST(SM50_batched_gemv, 1x256x256x64_1x64x4x8_1x4x4x8_crc_fp32_fp32) { cutlass::gemm::BatchedGemmCoord problem_size(1, 256, 256, 64); using ThreadBlockShape = cutlass::gemm::GemmShape<1, 64, 4>; using ThreadShape = cutlass::gemm::GemmShape<1, 4, 4>; static int const kBatchTileSize = 8; test::gemm::kernel::batched_gemv_kernel_test< ThreadBlockShape, ThreadShape, float, float, float, cutlass::layout::ColumnMajor, cutlass::layout::RowMajor, cutlass::layout::ColumnMajor, kBatchTileSize>(problem_size); } TEST(SM50_batched_gemv, 1x7x256x4096_1x8x4x64_1x1x4x64_crc_fp32_fp32) { cutlass::gemm::BatchedGemmCoord problem_size(1, 7, 256, 4096); using ThreadBlockShape = cutlass::gemm::GemmShape<1, 8, 4>; using ThreadShape = cutlass::gemm::GemmShape<1, 1, 4>; static int const kBatchTileSize = 64; test::gemm::kernel::batched_gemv_kernel_test< ThreadBlockShape, ThreadShape, float, float, float, cutlass::layout::ColumnMajor, cutlass::layout::RowMajor, cutlass::layout::ColumnMajor, kBatchTileSize>(problem_size); } /// TEST(SM50_batched_gemv, 1x64x64x1_1x64x4x1_1x4x4x1_crc_fp16_fp32) { cutlass::gemm::BatchedGemmCoord problem_size(1, 64, 64, 1); using ThreadBlockShape = cutlass::gemm::GemmShape<1, 64, 4>; using ThreadShape = cutlass::gemm::GemmShape<1, 4, 4>; static int const kBatchTileSize = 1; test::gemm::kernel::batched_gemv_kernel_test< ThreadBlockShape, ThreadShape, cutlass::half_t, float, float, cutlass::layout::ColumnMajor, cutlass::layout::RowMajor, cutlass::layout::ColumnMajor, kBatchTileSize>(problem_size); } TEST(SM50_batched_gemv, 1x64x64x4_1x64x4x2_1x4x4x2_crc_fp16_fp32) { cutlass::gemm::BatchedGemmCoord problem_size(1, 64, 64, 4); using ThreadBlockShape = cutlass::gemm::GemmShape<1, 64, 4>; using ThreadShape = cutlass::gemm::GemmShape<1, 4, 4>; static int const kBatchTileSize = 2; test::gemm::kernel::batched_gemv_kernel_test< ThreadBlockShape, ThreadShape, cutlass::half_t, float, float, cutlass::layout::ColumnMajor, cutlass::layout::RowMajor, cutlass::layout::ColumnMajor, kBatchTileSize>(problem_size); } TEST(SM50_batched_gemv, 1x256x256x64_1x64x4x8_1x4x4x8_crc_fp16_fp32) { cutlass::gemm::BatchedGemmCoord problem_size(1, 256, 256, 64); using ThreadBlockShape = cutlass::gemm::GemmShape<1, 64, 4>; using ThreadShape = cutlass::gemm::GemmShape<1, 4, 4>; static int const kBatchTileSize = 8; test::gemm::kernel::batched_gemv_kernel_test< ThreadBlockShape, ThreadShape, cutlass::half_t, float, float, cutlass::layout::ColumnMajor, cutlass::layout::RowMajor, cutlass::layout::ColumnMajor, kBatchTileSize>(problem_size); } TEST(SM50_batched_gemv, 1x7x256x4096_1x8x4x64_1x1x4x64_crc_fp16_fp32) { cutlass::gemm::BatchedGemmCoord problem_size(1, 7, 256, 4096); using ThreadBlockShape = cutlass::gemm::GemmShape<1, 8, 4>; using ThreadShape = cutlass::gemm::GemmShape<1, 1, 4>; static int const kBatchTileSize = 64; test::gemm::kernel::batched_gemv_kernel_test< ThreadBlockShape, ThreadShape, cutlass::half_t, float, float, cutlass::layout::ColumnMajor, cutlass::layout::RowMajor, cutlass::layout::ColumnMajor, kBatchTileSize>(problem_size); } /// TEST(SM50_batched_gemv, 1x64x64x1_1x64x4x1_1x4x4x1_crc_fp16_fp16) { cutlass::gemm::BatchedGemmCoord problem_size(1, 64, 64, 1); using ThreadBlockShape = cutlass::gemm::GemmShape<1, 64, 4>; using ThreadShape = cutlass::gemm::GemmShape<1, 4, 4>; static int const kBatchTileSize = 1; test::gemm::kernel::batched_gemv_kernel_test< ThreadBlockShape, ThreadShape, cutlass::half_t, float, cutlass::half_t, cutlass::layout::ColumnMajor, cutlass::layout::RowMajor, cutlass::layout::ColumnMajor, kBatchTileSize>(problem_size); } TEST(SM50_batched_gemv, 1x64x64x4_1x64x4x2_1x4x4x2_crc_fp16_fp16) { cutlass::gemm::BatchedGemmCoord problem_size(1, 64, 64, 4); using ThreadBlockShape = cutlass::gemm::GemmShape<1, 64, 4>; using ThreadShape = cutlass::gemm::GemmShape<1, 4, 4>; static int const kBatchTileSize = 2; test::gemm::kernel::batched_gemv_kernel_test< ThreadBlockShape, ThreadShape, cutlass::half_t, float, cutlass::half_t, cutlass::layout::ColumnMajor, cutlass::layout::RowMajor, cutlass::layout::ColumnMajor, kBatchTileSize>(problem_size); } TEST(SM50_batched_gemv, 1x256x256x64_1x64x4x8_1x4x4x8_crc_fp16_fp16) { cutlass::gemm::BatchedGemmCoord problem_size(1, 256, 256, 64); using ThreadBlockShape = cutlass::gemm::GemmShape<1, 64, 4>; using ThreadShape = cutlass::gemm::GemmShape<1, 4, 4>; static int const kBatchTileSize = 8; test::gemm::kernel::batched_gemv_kernel_test< ThreadBlockShape, ThreadShape, cutlass::half_t, float, cutlass::half_t, cutlass::layout::ColumnMajor, cutlass::layout::RowMajor, cutlass::layout::ColumnMajor, kBatchTileSize>(problem_size); } TEST(SM50_batched_gemv, 1x7x256x4096_1x8x4x64_1x1x4x64_crc_fp16_fp16) { cutlass::gemm::BatchedGemmCoord problem_size(1, 7, 256, 4096); using ThreadBlockShape = cutlass::gemm::GemmShape<1, 8, 4>; using ThreadShape = cutlass::gemm::GemmShape<1, 1, 4>; static int const kBatchTileSize = 64; test::gemm::kernel::batched_gemv_kernel_test< ThreadBlockShape, ThreadShape, cutlass::half_t, float, cutlass::half_t, cutlass::layout::ColumnMajor, cutlass::layout::RowMajor, cutlass::layout::ColumnMajor, kBatchTileSize>(problem_size); } /// TEST(SM50_batched_gemv, 1x64x64x1_1x64x4x1_1x4x4x1_crc_i8_i32) { cutlass::gemm::BatchedGemmCoord problem_size(1, 64, 64, 1); using ThreadBlockShape = cutlass::gemm::GemmShape<1, 64, 4>; using ThreadShape = cutlass::gemm::GemmShape<1, 4, 4>; static int const kBatchTileSize = 1; test::gemm::kernel::batched_gemv_kernel_test< ThreadBlockShape, ThreadShape, int8_t, int32_t, int32_t, cutlass::layout::ColumnMajor, cutlass::layout::RowMajor, cutlass::layout::ColumnMajor, kBatchTileSize>(problem_size); } TEST(SM50_batched_gemv, 1x64x64x4_1x64x4x2_1x4x4x2_crc_i8_i32) { cutlass::gemm::BatchedGemmCoord problem_size(1, 64, 64, 4); using ThreadBlockShape = cutlass::gemm::GemmShape<1, 64, 4>; using ThreadShape = cutlass::gemm::GemmShape<1, 4, 4>; static int const kBatchTileSize = 2; test::gemm::kernel::batched_gemv_kernel_test< ThreadBlockShape, ThreadShape, int8_t, int32_t, int32_t, cutlass::layout::ColumnMajor, cutlass::layout::RowMajor, cutlass::layout::ColumnMajor, kBatchTileSize>(problem_size); } TEST(SM50_batched_gemv, 1x256x256x64_1x64x4x8_1x4x4x8_crc_i8_i32) { cutlass::gemm::BatchedGemmCoord problem_size(1, 256, 256, 64); using ThreadBlockShape = cutlass::gemm::GemmShape<1, 64, 4>; using ThreadShape = cutlass::gemm::GemmShape<1, 4, 4>; static int const kBatchTileSize = 8; test::gemm::kernel::batched_gemv_kernel_test< ThreadBlockShape, ThreadShape, int8_t, int32_t, int32_t, cutlass::layout::ColumnMajor, cutlass::layout::RowMajor, cutlass::layout::ColumnMajor, kBatchTileSize>(problem_size); } TEST(SM50_batched_gemv, 1x7x256x4096_1x8x4x64_1x1x4x64_crc_i8_i32) { cutlass::gemm::BatchedGemmCoord problem_size(1, 7, 256, 4096); using ThreadBlockShape = cutlass::gemm::GemmShape<1, 8, 4>; using ThreadShape = cutlass::gemm::GemmShape<1, 1, 4>; static int const kBatchTileSize = 64; test::gemm::kernel::batched_gemv_kernel_test< ThreadBlockShape, ThreadShape, int8_t, int32_t, int32_t, cutlass::layout::ColumnMajor, cutlass::layout::RowMajor, cutlass::layout::ColumnMajor, kBatchTileSize>(problem_size); } TEST(SM50_batched_gemv, 1x64x27x4096_1x8x1x64_1x1x1x64_crc_alpha_fp32_fp32) { cutlass::gemm::BatchedGemmCoord problem_size(1, 64, 27, 4096); using ThreadBlockShape = cutlass::gemm::GemmShape<1, 8, 1>; using ThreadShape = cutlass::gemm::GemmShape<1, 1, 1>; static int const kBatchTileSize = 64; test::gemm::kernel::batched_gemv_kernel_test< ThreadBlockShape, ThreadShape, float, float, float, cutlass::layout::ColumnMajor, cutlass::layout::RowMajor, cutlass::layout::ColumnMajor, kBatchTileSize>(problem_size, -0.5f); } TEST(SM50_batched_gemv, 1x64x27x4096_1x8x1x64_1x1x1x64_crc_alpha_beta_fp32_fp32) { cutlass::gemm::BatchedGemmCoord problem_size(1, 64, 27, 4096); using ThreadBlockShape = cutlass::gemm::GemmShape<1, 8, 1>; using ThreadShape = cutlass::gemm::GemmShape<1, 1, 1>; static int const kBatchTileSize = 64; test::gemm::kernel::batched_gemv_kernel_test< ThreadBlockShape, ThreadShape, float, float, float, cutlass::layout::ColumnMajor, cutlass::layout::RowMajor, cutlass::layout::ColumnMajor, kBatchTileSize>(problem_size, 4.5f, -0.5f); } TEST(SM50_batched_gemv, 1x64x24x4096_1x8x4x64_1x1x4x64_crc_alpha_beta_fp16_fp16) { cutlass::gemm::BatchedGemmCoord problem_size(1, 64, 24, 4096); using ThreadBlockShape = cutlass::gemm::GemmShape<1, 8, 4>; using ThreadShape = cutlass::gemm::GemmShape<1, 1, 4>; static int const kBatchTileSize = 64; test::gemm::kernel::batched_gemv_kernel_test< ThreadBlockShape, ThreadShape, cutlass::half_t, float, cutlass::half_t, cutlass::layout::ColumnMajor, cutlass::layout::RowMajor, cutlass::layout::ColumnMajor, kBatchTileSize>(problem_size, cutlass::half_t(4.5f), cutlass::half_t(-0.5f)); } ///////////// TEST(SM50_batched_gemv, 1x64x64x1_1x64x4x1_1x4x4x1_rcc_fp32_fp32) { cutlass::gemm::BatchedGemmCoord problem_size(1, 64, 64, 1); using ThreadBlockShape = cutlass::gemm::GemmShape<1, 64, 4>; using ThreadShape = cutlass::gemm::GemmShape<1, 4, 4>; static int const kBatchTileSize = 1; test::gemm::kernel::batched_gemv_kernel_test< ThreadBlockShape, ThreadShape, float, float, float, cutlass::layout::RowMajor, cutlass::layout::ColumnMajor, cutlass::layout::ColumnMajor, kBatchTileSize>(problem_size); } TEST(SM50_batched_gemv, 1x64x64x4_1x64x4x2_1x4x4x2_rcc_fp32_fp32) { cutlass::gemm::BatchedGemmCoord problem_size(1, 64, 64, 4); using ThreadBlockShape = cutlass::gemm::GemmShape<1, 64, 4>; using ThreadShape = cutlass::gemm::GemmShape<1, 4, 4>; static int const kBatchTileSize = 2; test::gemm::kernel::batched_gemv_kernel_test< ThreadBlockShape, ThreadShape, float, float, float, cutlass::layout::RowMajor, cutlass::layout::ColumnMajor, cutlass::layout::ColumnMajor, kBatchTileSize>(problem_size); } TEST(SM50_batched_gemv, 1x256x256x64_1x64x4x8_1x4x4x8_rcc_fp32_fp32) { cutlass::gemm::BatchedGemmCoord problem_size(1, 256, 256, 64); using ThreadBlockShape = cutlass::gemm::GemmShape<1, 64, 4>; using ThreadShape = cutlass::gemm::GemmShape<1, 4, 4>; static int const kBatchTileSize = 8; test::gemm::kernel::batched_gemv_kernel_test< ThreadBlockShape, ThreadShape, float, float, float, cutlass::layout::RowMajor, cutlass::layout::ColumnMajor, cutlass::layout::ColumnMajor, kBatchTileSize>(problem_size); } TEST(SM50_batched_gemv, 1x7x256x4096_1x8x4x64_1x1x4x64_rcc_fp32_fp32) { cutlass::gemm::BatchedGemmCoord problem_size(1, 7, 256, 4096); using ThreadBlockShape = cutlass::gemm::GemmShape<1, 8, 4>; using ThreadShape = cutlass::gemm::GemmShape<1, 1, 4>; static int const kBatchTileSize = 64; test::gemm::kernel::batched_gemv_kernel_test< ThreadBlockShape, ThreadShape, float, float, float, cutlass::layout::RowMajor, cutlass::layout::ColumnMajor, cutlass::layout::ColumnMajor, kBatchTileSize>(problem_size); } /// TEST(SM50_batched_gemv, 1x64x64x1_1x64x4x1_1x4x4x1_rcc_fp16_fp32) { cutlass::gemm::BatchedGemmCoord problem_size(1, 64, 64, 1); using ThreadBlockShape = cutlass::gemm::GemmShape<1, 64, 4>; using ThreadShape = cutlass::gemm::GemmShape<1, 4, 4>; static int const kBatchTileSize = 1; test::gemm::kernel::batched_gemv_kernel_test< ThreadBlockShape, ThreadShape, cutlass::half_t, float, float, cutlass::layout::RowMajor, cutlass::layout::ColumnMajor, cutlass::layout::ColumnMajor, kBatchTileSize>(problem_size); } TEST(SM50_batched_gemv, 1x64x64x4_1x64x4x2_1x4x4x2_rcc_fp16_fp32) { cutlass::gemm::BatchedGemmCoord problem_size(1, 64, 64, 4); using ThreadBlockShape = cutlass::gemm::GemmShape<1, 64, 4>; using ThreadShape = cutlass::gemm::GemmShape<1, 4, 4>; static int const kBatchTileSize = 2; test::gemm::kernel::batched_gemv_kernel_test< ThreadBlockShape, ThreadShape, cutlass::half_t, float, float, cutlass::layout::RowMajor, cutlass::layout::ColumnMajor, cutlass::layout::ColumnMajor, kBatchTileSize>(problem_size); } TEST(SM50_batched_gemv, 1x256x256x64_1x64x4x8_1x4x4x8_rcc_fp16_fp32) { cutlass::gemm::BatchedGemmCoord problem_size(1, 256, 256, 64); using ThreadBlockShape = cutlass::gemm::GemmShape<1, 64, 4>; using ThreadShape = cutlass::gemm::GemmShape<1, 4, 4>; static int const kBatchTileSize = 8; test::gemm::kernel::batched_gemv_kernel_test< ThreadBlockShape, ThreadShape, cutlass::half_t, float, float, cutlass::layout::RowMajor, cutlass::layout::ColumnMajor, cutlass::layout::ColumnMajor, kBatchTileSize>(problem_size); } TEST(SM50_batched_gemv, 1x7x256x4096_1x8x4x64_1x1x4x64_rcc_fp16_fp32) { cutlass::gemm::BatchedGemmCoord problem_size(1, 7, 256, 4096); using ThreadBlockShape = cutlass::gemm::GemmShape<1, 8, 4>; using ThreadShape = cutlass::gemm::GemmShape<1, 1, 4>; static int const kBatchTileSize = 64; test::gemm::kernel::batched_gemv_kernel_test< ThreadBlockShape, ThreadShape, cutlass::half_t, float, float, cutlass::layout::RowMajor, cutlass::layout::ColumnMajor, cutlass::layout::ColumnMajor, kBatchTileSize>(problem_size); } /// TEST(SM50_batched_gemv, 1x64x64x1_1x64x4x1_1x4x4x1_rcc_fp16_fp16) { cutlass::gemm::BatchedGemmCoord problem_size(1, 64, 64, 1); using ThreadBlockShape = cutlass::gemm::GemmShape<1, 64, 4>; using ThreadShape = cutlass::gemm::GemmShape<1, 4, 4>; static int const kBatchTileSize = 1; test::gemm::kernel::batched_gemv_kernel_test< ThreadBlockShape, ThreadShape, cutlass::half_t, float, cutlass::half_t, cutlass::layout::RowMajor, cutlass::layout::ColumnMajor, cutlass::layout::ColumnMajor, kBatchTileSize>(problem_size); } TEST(SM50_batched_gemv, 1x64x64x4_1x64x4x2_1x4x4x2_rcc_fp16_fp16) { cutlass::gemm::BatchedGemmCoord problem_size(1, 64, 64, 4); using ThreadBlockShape = cutlass::gemm::GemmShape<1, 64, 4>; using ThreadShape = cutlass::gemm::GemmShape<1, 4, 4>; static int const kBatchTileSize = 2; test::gemm::kernel::batched_gemv_kernel_test< ThreadBlockShape, ThreadShape, cutlass::half_t, float, cutlass::half_t, cutlass::layout::RowMajor, cutlass::layout::ColumnMajor, cutlass::layout::ColumnMajor, kBatchTileSize>(problem_size); } TEST(SM50_batched_gemv, 1x256x256x64_1x64x4x8_1x4x4x8_rcc_fp16_fp16) { cutlass::gemm::BatchedGemmCoord problem_size(1, 256, 256, 64); using ThreadBlockShape = cutlass::gemm::GemmShape<1, 64, 4>; using ThreadShape = cutlass::gemm::GemmShape<1, 4, 4>; static int const kBatchTileSize = 8; test::gemm::kernel::batched_gemv_kernel_test< ThreadBlockShape, ThreadShape, cutlass::half_t, float, cutlass::half_t, cutlass::layout::RowMajor, cutlass::layout::ColumnMajor, cutlass::layout::ColumnMajor, kBatchTileSize>(problem_size); } TEST(SM50_batched_gemv, 1x7x256x4096_1x8x4x64_1x1x4x64_rcc_fp16_fp16) { cutlass::gemm::BatchedGemmCoord problem_size(1, 7, 256, 4096); using ThreadBlockShape = cutlass::gemm::GemmShape<1, 8, 4>; using ThreadShape = cutlass::gemm::GemmShape<1, 1, 4>; static int const kBatchTileSize = 64; test::gemm::kernel::batched_gemv_kernel_test< ThreadBlockShape, ThreadShape, cutlass::half_t, float, cutlass::half_t, cutlass::layout::RowMajor, cutlass::layout::ColumnMajor, cutlass::layout::ColumnMajor, kBatchTileSize>(problem_size); } /// TEST(SM50_batched_gemv, 1x64x64x1_1x64x4x1_1x4x4x1_rcc_i8_i32) { cutlass::gemm::BatchedGemmCoord problem_size(1, 64, 64, 1); using ThreadBlockShape = cutlass::gemm::GemmShape<1, 64, 4>; using ThreadShape = cutlass::gemm::GemmShape<1, 4, 4>; static int const kBatchTileSize = 1; test::gemm::kernel::batched_gemv_kernel_test< ThreadBlockShape, ThreadShape, int8_t, int32_t, int32_t, cutlass::layout::RowMajor, cutlass::layout::ColumnMajor, cutlass::layout::ColumnMajor, kBatchTileSize>(problem_size); } TEST(SM50_batched_gemv, 1x64x64x4_1x64x4x2_1x4x4x2_rcc_i8_i32) { cutlass::gemm::BatchedGemmCoord problem_size(1, 64, 64, 4); using ThreadBlockShape = cutlass::gemm::GemmShape<1, 64, 4>; using ThreadShape = cutlass::gemm::GemmShape<1, 4, 4>; static int const kBatchTileSize = 2; test::gemm::kernel::batched_gemv_kernel_test< ThreadBlockShape, ThreadShape, int8_t, int32_t, int32_t, cutlass::layout::RowMajor, cutlass::layout::ColumnMajor, cutlass::layout::ColumnMajor, kBatchTileSize>(problem_size); } TEST(SM50_batched_gemv, 1x256x256x64_1x64x4x8_1x4x4x8_rcc_i8_i32) { cutlass::gemm::BatchedGemmCoord problem_size(1, 256, 256, 64); using ThreadBlockShape = cutlass::gemm::GemmShape<1, 64, 4>; using ThreadShape = cutlass::gemm::GemmShape<1, 4, 4>; static int const kBatchTileSize = 8; test::gemm::kernel::batched_gemv_kernel_test< ThreadBlockShape, ThreadShape, int8_t, int32_t, int32_t, cutlass::layout::RowMajor, cutlass::layout::ColumnMajor, cutlass::layout::ColumnMajor, kBatchTileSize>(problem_size); } TEST(SM50_batched_gemv, 1x7x256x4096_1x8x4x64_1x1x4x64_rcc_i8_i32) { cutlass::gemm::BatchedGemmCoord problem_size(1, 7, 256, 4096); using ThreadBlockShape = cutlass::gemm::GemmShape<1, 8, 4>; using ThreadShape = cutlass::gemm::GemmShape<1, 1, 4>; static int const kBatchTileSize = 64; test::gemm::kernel::batched_gemv_kernel_test< ThreadBlockShape, ThreadShape, int8_t, int32_t, int32_t, cutlass::layout::RowMajor, cutlass::layout::ColumnMajor, cutlass::layout::ColumnMajor, kBatchTileSize>(problem_size); } TEST(SM50_batched_gemv, 1x64x27x4096_1x8x1x64_1x1x1x64_rcc_alpha_fp32_fp32) { cutlass::gemm::BatchedGemmCoord problem_size(1, 64, 27, 4096); using ThreadBlockShape = cutlass::gemm::GemmShape<1, 8, 1>; using ThreadShape = cutlass::gemm::GemmShape<1, 1, 1>; static int const kBatchTileSize = 64; test::gemm::kernel::batched_gemv_kernel_test< ThreadBlockShape, ThreadShape, float, float, float, cutlass::layout::RowMajor, cutlass::layout::ColumnMajor, cutlass::layout::ColumnMajor, kBatchTileSize>(problem_size, -0.5f); } TEST(SM50_batched_gemv, 1x64x27x4096_1x8x1x64_1x1x1x64_rcc_alpha_beta_fp32_fp32) { cutlass::gemm::BatchedGemmCoord problem_size(1, 64, 27, 4096); using ThreadBlockShape = cutlass::gemm::GemmShape<1, 8, 1>; using ThreadShape = cutlass::gemm::GemmShape<1, 1, 1>; static int const kBatchTileSize = 64; test::gemm::kernel::batched_gemv_kernel_test< ThreadBlockShape, ThreadShape, float, float, float, cutlass::layout::RowMajor, cutlass::layout::ColumnMajor, cutlass::layout::ColumnMajor, kBatchTileSize>(problem_size, 4.5f, -0.5f); } TEST(SM50_batched_gemv, 1x64x24x4096_1x8x4x64_1x1x4x64_rcc_alpha_beta_fp16_fp16) { cutlass::gemm::BatchedGemmCoord problem_size(1, 64, 24, 4096); using ThreadBlockShape = cutlass::gemm::GemmShape<1, 8, 4>; using ThreadShape = cutlass::gemm::GemmShape<1, 1, 4>; static int const kBatchTileSize = 64; test::gemm::kernel::batched_gemv_kernel_test< ThreadBlockShape, ThreadShape, cutlass::half_t, float, cutlass::half_t, cutlass::layout::RowMajor, cutlass::layout::ColumnMajor, cutlass::layout::ColumnMajor, kBatchTileSize>(problem_size, cutlass::half_t(4.5f), cutlass::half_t(-0.5f)); }
the_stack
#include "_reg_blocksize_gpu.h" /* ******************************** */ /* ******************************** */ NiftyReg_CudaBlock100 * NiftyReg_CudaBlock::instance = NULL; /* ******************************** */ /* ******************************** */ NiftyReg_CudaBlock100::NiftyReg_CudaBlock100() { Block_target_block = 512; // 15 reg - 32 smem - 24 cmem Block_result_block = 384; // 21 reg - 11048 smem - 24 cmem /* _reg_mutualinformation_gpu */ Block_reg_smoothJointHistogramX = 384; // 07 reg - 24 smem - 20 cmem Block_reg_smoothJointHistogramY = 320; // 11 reg - 24 smem - 20 cmem Block_reg_smoothJointHistogramZ = 320; // 11 reg - 24 smem - 20 cmem Block_reg_smoothJointHistogramW = 384; // 08 reg - 24 smem - 20 cmem Block_reg_marginaliseTargetX = 384; // 06 reg - 24 smem Block_reg_marginaliseTargetXY = 384; // 07 reg - 24 smem Block_reg_marginaliseResultX = 384; // 06 reg - 24 smem Block_reg_marginaliseResultXY = 384; // 07 reg - 24 smem Block_reg_getVoxelBasedNMIGradientUsingPW2D = 384; // 21 reg - 24 smem - 32 cmem Block_reg_getVoxelBasedNMIGradientUsingPW3D = 320; // 25 reg - 24 smem - 32 cmem Block_reg_getVoxelBasedNMIGradientUsingPW2x2 = 192; // 42 reg - 24 smem - 36 cmem /* _reg_globalTransformation_gpu */ Block_reg_affine_deformationField = 512; // 16 reg - 24 smem /* _reg_localTransformation_gpu */ Block_reg_spline_getDeformationField2D = 384; // 20 reg - 6168 smem - 28 cmem Block_reg_spline_getDeformationField3D = 192; // 37 reg - 6168 smem - 28 cmem Block_reg_spline_getApproxSecondDerivatives2D = 512; // 15 reg - 132 smem - 32 cmem Block_reg_spline_getApproxSecondDerivatives3D = 192; // 38 reg - 672 smem - 104 cmem Block_reg_spline_getApproxBendingEnergy2D = 384; // 07 reg - 24 smem Block_reg_spline_getApproxBendingEnergy3D = 320; // 12 reg - 24 smem Block_reg_spline_getApproxBendingEnergyGradient2D = 512; // 15 reg - 132 smem - 36 cmem Block_reg_spline_getApproxBendingEnergyGradient3D = 256; // 27 reg - 672 smem - 108 cmem Block_reg_spline_getApproxJacobianValues2D = 384; // 17 reg - 104 smem - 36 cmem Block_reg_spline_getApproxJacobianValues3D = 256; // 27 reg - 356 smem - 108 cmem Block_reg_spline_getJacobianValues2D = 256; // 29 reg - 32 smem - 16 cmem - 32 lmem Block_reg_spline_getJacobianValues3D = 192; // 41 reg - 6176 smem - 20 cmem - 32 lmem Block_reg_spline_logSquaredValues = 384; // 07 reg - 24 smem - 36 cmem Block_reg_spline_computeApproxJacGradient2D = 320; // 23 reg - 96 smem - 72 cmem Block_reg_spline_computeApproxJacGradient3D = 256; // 32 reg - 384 smem - 144 cmem Block_reg_spline_computeJacGradient2D = 384; // 21 reg - 24 smem - 64 cmem Block_reg_spline_computeJacGradient3D = 256; // 32 reg - 24 smem - 64 cmem Block_reg_spline_approxCorrectFolding3D = 256; // 32 reg - 24 smem - 24 cmem Block_reg_spline_correctFolding3D = 256; // 31 reg - 24 smem - 32 cmem Block_reg_getDeformationFromDisplacement = 384; // 09 reg - 24 smem Block_reg_getDisplacementFromDeformation = 384; // 09 reg - 24 smem Block_reg_defField_compose2D = 512; // 15 reg - 24 smem - 08 cmem - 16 lmem Block_reg_defField_compose3D = 384; // 21 reg - 24 smem - 08 cmem - 24 lmem Block_reg_defField_getJacobianMatrix = 512; // 16 reg - 24 smem - 04 cmem /* _reg_optimiser_gpu */ Block_reg_initialiseConjugateGradient = 384; // 09 reg - 24 smem Block_reg_GetConjugateGradient1 = 320; // 12 reg - 24 smem Block_reg_GetConjugateGradient2 = 384; // 10 reg - 40 smem Block_reg_getEuclideanDistance = 384; // 04 reg - 24 smem Block_reg_updateControlPointPosition = 384; // 08 reg - 24 smem /* _reg_ssd_gpu */ Block_reg_getSquaredDifference = 320; // 12 reg - 24 smem - 08 cmem Block_reg_getSSDGradient = 320; // 12 reg - 24 smem - 08 cmem /* _reg_tools_gpu */ Block_reg_voxelCentric2NodeCentric = 320; // 11 reg - 24 smem - 16 cmem Block_reg_convertNMIGradientFromVoxelToRealSpace = 512; // 16 reg - 24 smem Block_reg_ApplyConvolutionWindowAlongX = 512; // 14 reg - 28 smem - 08 cmem Block_reg_ApplyConvolutionWindowAlongY = 512; // 14 reg - 28 smem - 08 cmem Block_reg_ApplyConvolutionWindowAlongZ = 512; // 15 reg - 28 smem - 08 cmem Block_reg_arithmetic = 384; // 5 reg - 24 smem /* _reg_resampling_gpu */ Block_reg_resampleImage2D = 320; // 10 reg - 24 smem - 12 cmem Block_reg_resampleImage3D = 512; // 16 reg - 24 smem - 12 cmem Block_reg_getImageGradient2D = 512; // 16 reg - 24 smem - 20 cmem - 24 lmem Block_reg_getImageGradient3D = 320; // 24 reg - 24 smem - 16 cmem - 32 lmem #ifndef NDEBUG printf("[NiftyReg DEBUG] NiftyReg_CudaBlock100 constructor called\n"); #endif } /* ******************************** */ NiftyReg_CudaBlock200::NiftyReg_CudaBlock200() { // Block_target_block = ; // // Block_result_block = ; // // /* _reg_mutualinformation_gpu */ // Block_reg_smoothJointHistogramX = ; // // Block_reg_smoothJointHistogramY = ; // // Block_reg_smoothJointHistogramZ = ; // // Block_reg_smoothJointHistogramW = ; // // Block_reg_marginaliseTargetX = ; // // Block_reg_marginaliseTargetXY = ; // // Block_reg_marginaliseResultX = ; // // Block_reg_marginaliseResultXY = ; // // Block_reg_getVoxelBasedNMIGradientUsingPW2D = ; // // Block_reg_getVoxelBasedNMIGradientUsingPW3D = ; // // Block_reg_getVoxelBasedNMIGradientUsingPW2x2 = ; // // /* _reg_globalTransformation_gpu */ // Block_reg_affine_deformationField = ; // // /* _reg_localTransformation_gpu */ // Block_reg_spline_getDeformationField2D = ; // // Block_reg_spline_getDeformationField3D = ; // // Block_reg_spline_getApproxSecondDerivatives2D = ; // // Block_reg_spline_getApproxSecondDerivatives3D = ; // // Block_reg_spline_getApproxBendingEnergy2D = ; // // Block_reg_spline_getApproxBendingEnergy3D = ; // // Block_reg_spline_getApproxBendingEnergyGradient2D = ; // // Block_reg_spline_getApproxBendingEnergyGradient3D = ; // // Block_reg_spline_getApproxJacobianValues2D = ; // // Block_reg_spline_getApproxJacobianValues3D = ; // // Block_reg_spline_getJacobianValues2D = ; // // Block_reg_spline_getJacobianValues3D = ; // // Block_reg_spline_logSquaredValues = ; // // Block_reg_spline_computeApproxJacGradient2D = ; // // Block_reg_spline_computeApproxJacGradient3D = ; // // Block_reg_spline_computeJacGradient2D = ; // // Block_reg_spline_computeJacGradient3D = ; // // Block_reg_spline_approxCorrectFolding3D = ; // // Block_reg_spline_correctFolding3D = ; // // Block_reg_getDeformationFromDisplacement = ; // // Block_reg_getDisplacementFromDeformation = ; // // Block_reg_defField_compose2D = ; // // Block_reg_defField_compose3D = ; // // Block_reg_defField_getJacobianMatrix = ; // // /* _reg_optimiser_gpu */ // Block_reg_initialiseConjugateGradient = ; // // Block_reg_GetConjugateGradient1 = ; // // Block_reg_GetConjugateGradient2 = ; // // Block_reg_getEuclideanDistance = ; // // Block_reg_updateControlPointPosition = ; // // /* _reg_ssd_gpu */ // Block_reg_getSquaredDifference = ; // // Block_reg_getSSDGradient = ; // // /* _reg_tools_gpu */ // Block_reg_voxelCentric2NodeCentric = ; // // Block_reg_convertNMIGradientFromVoxelToRealSpace = ; // // Block_reg_ApplyConvolutionWindowAlongX = ; // // Block_reg_ApplyConvolutionWindowAlongY = ; // // Block_reg_ApplyConvolutionWindowAlongZ = ; // // Block_reg_arithmetic = ; // // /* _reg_resampling_gpu */ // Block_reg_resampleImage2D = ; // // Block_reg_resampleImage3D = ; // // Block_reg_getImageGradient2D = ; // // Block_reg_getImageGradient3D = ; // #ifndef NDEBUG printf("[NiftyReg DEBUG] NiftyReg_CudaBlock200 constructor called\n"); #endif } /* ******************************** */ NiftyReg_CudaBlock300::NiftyReg_CudaBlock300() { Block_target_block = 640; // 45 reg Block_result_block = 640; // 47 reg - ????? smem /* _reg_mutualinformation_gpu */ Block_reg_smoothJointHistogramX = 768; // 34 reg Block_reg_smoothJointHistogramY = 768; // 34 reg Block_reg_smoothJointHistogramZ = 768; // 34 reg Block_reg_smoothJointHistogramW = 768; // 34 reg Block_reg_marginaliseTargetX = 1024; // 24 reg Block_reg_marginaliseTargetXY = 1024; // 24 reg Block_reg_marginaliseResultX = 1024; // 24 reg Block_reg_marginaliseResultXY = 1024; // 24 reg Block_reg_getVoxelBasedNMIGradientUsingPW2D = 768; // 38 reg Block_reg_getVoxelBasedNMIGradientUsingPW3D = 640; // 45 reg Block_reg_getVoxelBasedNMIGradientUsingPW2x2 = 576; // 55 reg /* _reg_globalTransformation_gpu */ Block_reg_affine_deformationField = 1024; // 23 reg /* _reg_localTransformation_gpu */ Block_reg_spline_getDeformationField2D = 768; // 34 reg Block_reg_spline_getDeformationField3D = 768; // 34 reg Block_reg_spline_getApproxSecondDerivatives2D = 1024; // 25 reg Block_reg_spline_getApproxSecondDerivatives3D = 768; // 34 reg Block_reg_spline_getApproxBendingEnergy2D = 1024; // 23 reg Block_reg_spline_getApproxBendingEnergy3D = 1024; // 23 reg Block_reg_spline_getApproxBendingEnergyGradient2D = 1024; // 28 reg Block_reg_spline_getApproxBendingEnergyGradient3D = 768; // 33 reg Block_reg_spline_getApproxJacobianValues2D = 768; // 34 reg Block_reg_spline_getApproxJacobianValues3D = 640; // 46 reg Block_reg_spline_getJacobianValues2D = 768; // 34 reg Block_reg_spline_getJacobianValues3D = 768; // 34 reg Block_reg_spline_logSquaredValues = 1024; // 23 reg Block_reg_spline_computeApproxJacGradient2D = 768; // 34 reg Block_reg_spline_computeApproxJacGradient3D = 768; // 38 reg Block_reg_spline_computeJacGradient2D = 768; // 34 reg Block_reg_spline_computeJacGradient3D = 768; // 37 reg Block_reg_spline_approxCorrectFolding3D = 768; // 34 reg Block_reg_spline_correctFolding3D = 768; // 34 reg Block_reg_getDeformationFromDisplacement = 1024; // 18 reg Block_reg_getDisplacementFromDeformation = 1024; // 18 reg Block_reg_defField_compose2D = 1024; // 23 reg Block_reg_defField_compose3D = 1024; // 24 reg Block_reg_defField_getJacobianMatrix = 768; // 34 reg /* _reg_optimiser_gpu */ Block_reg_initialiseConjugateGradient = 1024; // 20 reg Block_reg_GetConjugateGradient1 = 1024; // 22 reg Block_reg_GetConjugateGradient2 = 1024; // 25 reg Block_reg_getEuclideanDistance = 1024; // 20 reg Block_reg_updateControlPointPosition = 1024; // 22 reg /* _reg_ssd_gpu */ Block_reg_getSquaredDifference = 768; // 34 reg Block_reg_getSSDGradient = 768; // 34 reg /* _reg_tools_gpu */ Block_reg_voxelCentric2NodeCentric = 1024; // 23 reg Block_reg_convertNMIGradientFromVoxelToRealSpace = 1024; // 23 reg Block_reg_ApplyConvolutionWindowAlongX = 1024; // 25 reg Block_reg_ApplyConvolutionWindowAlongY = 1024; // 25 reg Block_reg_ApplyConvolutionWindowAlongZ = 1024; // 25 reg Block_reg_arithmetic = 1024; // /* _reg_resampling_gpu */ Block_reg_resampleImage2D = 1024; // 23 reg Block_reg_resampleImage3D = 1024; // 24 reg Block_reg_getImageGradient2D = 768; // 34 reg Block_reg_getImageGradient3D = 768; // 34 reg #ifndef NDEBUG printf("[NiftyReg DEBUG] NiftyReg_CudaBlock300 constructor called\n"); #endif } #endif
the_stack
#include <cuda_runtime.h> #include <pycaUtils.h> #include <gcache.h> #include "PyCAException.h" #include "mem.h" #include "MemOperDefs.h" // TEST make sure boost isn't included in nvcc code #if defined(BOOST_COMPILER) int bla[-1]; #endif #define GMEM_UNARY_OPERS(OP) MEM_UNARY_OPERS(GMemOpers, OP) #define GMEM_BINARY_OPERS(OP) MEM_BINARY_OPERS(GMemOpers, OP) #define GMEM_BINARY_OPERC(OP) MEM_BINARY_OPERC(GMemOpers, OP) #define GMEM_BINARY_OPERC_I(OP) MEM_BINARY_OPERC_I(GMemOpers, OP) #define GMEM_BINARY_OPER_NOC(OP) MEM_BINARY_OPER_NOC(GMemOpers, OP) #define GMEM_BINARY_OPER_NOC_I(OP) MEM_BINARY_OPER_NOC_I(GMemOpers, OP) #define GMEM_BINARY_OPER(OP) MEM_BINARY_OPER(GMemOpers, OP) #define GMEM_BINARY_OPER_I(OP) MEM_BINARY_OPER_I(GMemOpers, OP) namespace PyCA { template<typename T> void GMemOpers<T>::Copy(T* d_o, const T* d_i, size_t n, StreamT stream){ acpyArrayD2D(d_o, d_i, n, stream); } // masked version requires kernel template<typename T> __global__ void Copy_masked_kernel(T* d_o, const T* d_i, const T* d_mask, uint n) { uint blockId = get_blockID(); uint id = get_threadID(blockId); if (id < n) if(d_mask[id]) d_o[id] = d_i[id]; } template<typename T> void GMemOpers<T>::Copy(T* d_o, const T* d_i, const T* d_mask, size_t n, StreamT stream) { dim3 threads(REG_BLOCK_SIZE); dim3 grids = make_grid(iDivUp(n, threads.x)); Copy_masked_kernel<<<grids, threads, 0, stream>>>(d_o, d_i, d_mask, n); } ///////////////////////////////////////////////////////////////////////////////// // the function provided by CUDA is slow and is not flexible enough to set // value with different type ////////////////////////////////////////////////////////////////////////////////// template<typename T> __global__ void SetMem_kernel(T* d_o, T c, uint n){ uint blockId = get_blockID(); uint id = get_threadID(blockId); if (id < n) d_o[id] = c; } template<class T> __global__ void SetMem_kernel(T* d_o, uint n){ uint blockId = get_blockID(); uint id = get_threadID(blockId); if (id < n) d_o[id] = fetch(0, (T*)NULL); } template<typename T> void GMemOpers<T>::SetMem(T* d_o, const T& c, size_t n, StreamT stream, bool onDev){ dim3 threads(REG_BLOCK_SIZE); dim3 grids = make_grid(iDivUp(n, threads.x)); if (onDev) { cache_bind(&c); SetMem_kernel<<<grids, threads, 0, stream>>>(d_o, n); } else { SetMem_kernel<<<grids, threads, 0, stream>>>(d_o, c, n); } } template<typename T> __global__ void SetMem_masked_kernel(T* d_o, T c, const T* d_mask, uint n){ uint blockId = get_blockID(); uint id = get_threadID(blockId); if (id < n) if(d_mask[id]) d_o[id] = c; } template<class T> __global__ void SetMem_masked_kernel(T* d_o, const T* d_mask, uint n){ uint blockId = get_blockID(); uint id = get_threadID(blockId); if (id < n) if(d_mask[id]) d_o[id] = fetch(0, (T*)NULL); } template<typename T> void GMemOpers<T>::SetMem(T* d_o, const T& c, const T* d_mask, size_t n, StreamT stream, bool onDev) { dim3 threads(REG_BLOCK_SIZE); dim3 grids = make_grid(iDivUp(n, threads.x)); if (onDev) { cache_bind(&c); SetMem_masked_kernel<<<grids, threads, 0, stream>>>(d_o, d_mask, n); } else { SetMem_masked_kernel<<<grids, threads, 0, stream>>>(d_o, c, d_mask, n); } } // ///////////////////////////////////////////////////////////////////////////////// // // SetLinear // // Initiate the value of an unsigned array with a linear ram // ////////////////////////////////////////////////////////////////////////////////// template<typename T> __global__ void SetLinear_kernel(T* d_o, uint n){ uint blockId = get_blockID(); uint id = get_threadID(blockId); if (id < n) d_o[id] = (T)id; } template<typename T> void GMemOpers<T>::SetLinear(T* d_o, size_t n, StreamT stream) { dim3 threads(REG_BLOCK_SIZE); dim3 grids = make_grid(iDivUp(n,threads.x)); SetLinear_kernel<<<grids, threads, 0, stream>>>(d_o, n); } ///////////////////////////////////////////////////////////////////////////////// // SetLinearDown // Initiate the value of an unsigned array with a linear ram down ////////////////////////////////////////////////////////////////////////////////// template<typename T> __global__ void SetLinearDown_kernel(T* d_o, uint n){ uint blockId = get_blockID(); uint id = get_threadID(blockId); if (id < n) d_o[id] = (T)(n - id - 1); } template<typename T> void GMemOpers<T>::SetLinearDown(T* d_o, size_t n, StreamT stream) { dim3 threads(REG_BLOCK_SIZE); dim3 grids=make_grid(iDivUp(n,threads.x)); SetLinearDown_kernel<<<grids, threads, 0, stream>>>(d_o, n); } ///////////////////////////////////////////////////////////////////////////////// // Comp_unary // Return the result on a single operation on the input : abs, negative, sqrt ... ////////////////////////////////////////////////////////////////////////////////// template<typename T, class trait> __global__ void Comp_unary_kernel(T* d_o, const T* d_i, int n){ uint blockId = get_blockID(); uint id = get_threadID(blockId); if (id < n){ d_o[id] = trait::op(d_i[id]); } } template<typename T> template<MATH_OPS op> void GMemOpers<T>::Comp_unary(T* d_o, const T* d_i, size_t n, StreamT stream){ dim3 threads(REG_BLOCK_SIZE); dim3 grids=make_grid(iDivUp(n,threads.x)); if(!MOpers<T, op>::valid()){ throw PyCAException(__FILE__, __LINE__, "Unimplemented math operation for specified type"); } Comp_unary_kernel<T, MOpers<T, op> ><<<grids, threads, 0, stream>>>(d_o, d_i, n); } // masked version template<typename T, class trait> __global__ void Comp_unary_masked_kernel(T* d_o, const T* d_i, const T* d_mask, int n) { uint blockId = get_blockID(); uint id = get_threadID(blockId); if (id < n){ if(d_mask[id]) d_o[id] = trait::op(d_i[id]); } } template<typename T> template<MATH_OPS op> void GMemOpers<T>:: Comp_unary(T* d_o, const T* d_i, const T* d_mask, size_t n, StreamT stream) { dim3 threads(REG_BLOCK_SIZE); dim3 grids=make_grid(iDivUp(n,threads.x)); if(!MOpers<T, op>::valid()){ throw PyCAException(__FILE__, __LINE__, "Unimplemented math operation for specified type"); } Comp_unary_masked_kernel<T, MOpers<T, op> > <<<grids, threads, 0, stream>>> (d_o, d_i, d_mask, n); } ///////////////////////////////////////////////////////////////////////////////// // Comp_unary Inplace version ////////////////////////////////////////////////////////////////////////////////// template<typename T, class trait> __global__ void Comp_unary_kernel_I(T* d_o, int n){ uint blockId = get_blockID(); uint id = get_threadID(blockId); if (id < n){ d_o[id] = trait::op(d_o[id]); } } template<typename T> template<MATH_OPS op> void GMemOpers<T>::Comp_unary_I(T* d_o, size_t n, StreamT stream){ dim3 threads(REG_BLOCK_SIZE); dim3 grids=make_grid(iDivUp(n,threads.x)); checkConfig(grids); if(!MOpers<T, op>::valid()){ throw PyCAException(__FILE__, __LINE__, "Unimplemented math operation for specified type"); } Comp_unary_kernel_I<T, MOpers<T, op> ><<<grids, threads, 0, stream>>>(d_o, n); } // masked version template<typename T, class trait> __global__ void Comp_unary_masked_kernel_I(T* d_o, const T* d_mask, int n) { uint blockId = get_blockID(); uint id = get_threadID(blockId); if (id < n){ if(d_mask[id]) d_o[id] = trait::op(d_o[id]); } } template<typename T> template<MATH_OPS op> void GMemOpers<T>:: Comp_unary_I(T* d_o, const T* d_mask, size_t n, StreamT stream) { dim3 threads(REG_BLOCK_SIZE); dim3 grids=make_grid(iDivUp(n,threads.x)); checkConfig(grids); if(!MOpers<T, op>::valid()){ throw PyCAException(__FILE__, __LINE__, "Unimplemented math operation for specified type"); } Comp_unary_masked_kernel_I<T, MOpers<T, op> > <<<grids, threads, 0, stream>>> (d_o, d_mask, n); } /////////////////////////////////////////////////////////////////////////////// // Generated Unary Function Definitions /////////////////////////////////////////////////////////////////////////////// GMEM_UNARY_OPERS(Abs) GMEM_UNARY_OPERS(Cube) GMEM_UNARY_OPERS(Exp) GMEM_UNARY_OPERS(Log) GMEM_UNARY_OPERS(Sqr) GMEM_UNARY_OPERS(Neg) GMEM_UNARY_OPERS(Ramp) GMEM_UNARY_OPERS(Sgn) GMEM_UNARY_OPERS(Step) GMEM_UNARY_OPERS(Sqrt) GMEM_UNARY_OPERS(Inv) GMEM_UNARY_OPERS(Sin) GMEM_UNARY_OPERS(Asin) GMEM_UNARY_OPERS(Cos) GMEM_UNARY_OPERS(Acos) GMEM_UNARY_OPERS(Tan) GMEM_UNARY_OPERS(Atan) GMEM_UNARY_OPERS(Csc) GMEM_UNARY_OPERS(Sec) GMEM_UNARY_OPERS(Cot) GMEM_UNARY_OPERS(Ceil) GMEM_UNARY_OPERS(Floor) GMEM_UNARY_OPERS(Round) ///////////////////////////////////////////////////////////////////////////////// // Binary function with constant ////////////////////////////////////////////////////////////////////////////////// template<typename T, class op> __global__ void binaryC_kernel(T* d_o, const T* d_i, T c, uint n){ uint blockId = get_blockID(); uint id = get_threadID(blockId); if (id < n){ d_o[id] = op::op(d_i[id], c); } } template<class T, class op> __global__ void binaryC_kernel(T* d_o, const T* d_i, uint n){ uint blockId = get_blockID(); uint id = get_threadID(blockId); if (id < n){ d_o[id] = op::op(d_i[id], fetch(0,(T*)NULL)); } } template<typename T> template<MATH_OPS op> void GMemOpers<T>::binaryC(T* d_o, const T* d_i, const T& c, size_t n, StreamT stream, bool onDev){ dim3 threads(REG_BLOCK_SIZE); dim3 grids=make_grid(iDivUp(n,threads.x)); if(!MOpers<T, op>::valid()){ throw PyCAException(__FILE__, __LINE__, "Unimplemented math operation for specified type"); } if (onDev) { cache_bind(&c); binaryC_kernel<T, MOpers<T, op> ><<<grids, threads, 0, stream>>>(d_o, d_i, n); } else { binaryC_kernel<T, MOpers<T, op> ><<<grids, threads, 0, stream>>>(d_o, d_i, c, n); } } // masked versions template<typename T, class op> __global__ void binaryC_masked_kernel(T* d_o, const T* d_i, T c, const T* d_mask, uint n) { uint blockId = get_blockID(); uint id = get_threadID(blockId); if (id < n){ if(d_mask[id]) d_o[id] = op::op(d_i[id], c); } } template<class T, class op> __global__ void binaryC_masked_kernel(T* d_o, const T* d_i, const T* d_mask, uint n) { uint blockId = get_blockID(); uint id = get_threadID(blockId); if (id < n){ if(d_mask[id]) d_o[id] = op::op(d_i[id], fetch(0,(T*)NULL)); } } template<typename T> template<MATH_OPS op> void GMemOpers<T>:: binaryC(T* d_o, const T* d_i, const T& c, const T* d_mask, size_t n, StreamT stream, bool onDev) { dim3 threads(REG_BLOCK_SIZE); dim3 grids=make_grid(iDivUp(n,threads.x)); if(!MOpers<T, op>::valid()){ throw PyCAException(__FILE__, __LINE__, "Unimplemented math operation for specified type"); } if (onDev) { cache_bind(&c); binaryC_masked_kernel<T, MOpers<T, op> > <<<grids, threads, 0, stream>>>(d_o, d_i, d_mask, n); } else { binaryC_masked_kernel<T, MOpers<T, op> > <<<grids, threads, 0, stream>>>(d_o, d_i, c, d_mask, n); } } ///////////////////////////////////////////////////////////////////////////////// // Binary in-place function with constant ////////////////////////////////////////////////////////////////////////////////// template<typename T, class op> __global__ void binaryC_I_kernel(T* d_o, T c, uint n){ uint blockId = get_blockID(); uint id = get_threadID(blockId); if (id < n){ op::iop(d_o[id], c); } } template<class T, class op> __global__ void binaryC_I_kernel(T* d_o, uint n){ uint blockId = get_blockID(); uint id = get_threadID(blockId); if (id < n){ op::iop(d_o[id], fetch(0,(T*)NULL)); } } template<typename T> template<MATH_OPS op> void GMemOpers<T>::binaryC_I(T* d_o, const T& c, size_t n, StreamT stream, bool onDev){ dim3 threads(REG_BLOCK_SIZE); dim3 grids=make_grid(iDivUp(n,threads.x)); if(!MOpers<T, op>::valid()){ throw PyCAException(__FILE__, __LINE__, "Unimplemented math operation for specified type"); } if (onDev) { cache_bind(&c); binaryC_I_kernel<T, MOpers<T, op> ><<<grids, threads, 0, stream>>>(d_o, n); } else { binaryC_I_kernel<T, MOpers<T, op> ><<<grids, threads, 0, stream>>>(d_o, c, n); } } // masked versions template<typename T, class op> __global__ void binaryC_I_masked_kernel(T* d_o, T c, const T* d_mask, uint n) { uint blockId = get_blockID(); uint id = get_threadID(blockId); if (id < n){ if(d_mask[id]) op::iop(d_o[id], c); } } template<class T, class op> __global__ void binaryC_I_masked_kernel(T* d_o, const T* d_mask, uint n) { uint blockId = get_blockID(); uint id = get_threadID(blockId); if (id < n){ if(d_mask[id]) op::iop(d_o[id], fetch(0,(T*)NULL)); } } template<typename T> template<MATH_OPS op> void GMemOpers<T>:: binaryC_I(T* d_o, const T& c, const T* d_mask, size_t n, StreamT stream, bool onDev) { dim3 threads(REG_BLOCK_SIZE); dim3 grids=make_grid(iDivUp(n,threads.x)); if(!MOpers<T, op>::valid()){ throw PyCAException(__FILE__, __LINE__, "Unimplemented math operation for specified type"); } if (onDev) { cache_bind(&c); binaryC_I_masked_kernel<T, MOpers<T, op> > <<<grids, threads, 0, stream>>>(d_o, d_mask, n); } else { binaryC_I_masked_kernel<T, MOpers<T, op> > <<<grids, threads, 0, stream>>>(d_o, c, d_mask, n); } } ///////////////////////////////////////////////////////////////////////////////// // Binary function with image ////////////////////////////////////////////////////////////////////////////////// template<typename T, class op> __global__ void binary_kernel(T* d_o, const T* d_i, const T* d_i1, uint n){ uint blockId = get_blockID(); uint id = get_threadID(blockId); if (id < n){ d_o[id] = op::op(d_i[id], d_i1[id]); } } template<typename T> template<MATH_OPS op> void GMemOpers<T>::binary(T* d_o, const T* d_i, const T* d_i1, size_t n, StreamT stream){ dim3 threads(REG_BLOCK_SIZE); dim3 grids=make_grid(iDivUp(n,threads.x)); if(!MOpers<T, op>::valid()){ throw PyCAException(__FILE__, __LINE__, "Unimplemented math operation for specified type"); } binary_kernel<T, MOpers<T, op> ><<<grids, threads, 0, stream>>>(d_o, d_i, d_i1, n); } // masked version template<typename T, class op> __global__ void binary_masked_kernel(T* d_o, const T* d_i, const T* d_i1, const T* d_mask, uint n) { uint blockId = get_blockID(); uint id = get_threadID(blockId); if (id < n){ if(d_mask[id]) d_o[id] = op::op(d_i[id], d_i1[id]); } } template<typename T> template<MATH_OPS op> void GMemOpers<T>:: binary(T* d_o, const T* d_i, const T* d_i1, const T* d_mask, size_t n, StreamT stream) { dim3 threads(REG_BLOCK_SIZE); dim3 grids=make_grid(iDivUp(n,threads.x)); if(!MOpers<T, op>::valid()){ throw PyCAException(__FILE__, __LINE__, "Unimplemented math operation for specified type"); } binary_masked_kernel<T, MOpers<T, op> > <<<grids, threads, 0, stream>>>(d_o, d_i, d_i1, d_mask, n); } ///////////////////////////////////////////////////////////////////////////////// // Binary in-place function with image ////////////////////////////////////////////////////////////////////////////////// template<typename T, class op> __global__ void binary_I_kernel(T* d_o, const T* d_i, uint n){ uint blockId = get_blockID(); uint id = get_threadID(blockId); if (id < n){ op::iop(d_o[id], d_i[id]); } } template<typename T> template<MATH_OPS op> void GMemOpers<T>::binary_I(T* d_o, const T* d_i, size_t n, StreamT stream){ dim3 threads(REG_BLOCK_SIZE); dim3 grids=make_grid(iDivUp(n,threads.x)); if(!MOpers<T, op>::valid()){ throw PyCAException(__FILE__, __LINE__, "Unimplemented math operation for specified type"); } binary_I_kernel<T, MOpers<T, op> ><<<grids, threads, 0, stream>>>(d_o, d_i, n); } // masked versions template<typename T, class op> __global__ void binary_I_masked_kernel(T* d_o, const T* d_i, const T* d_mask, uint n) { uint blockId = get_blockID(); uint id = get_threadID(blockId); if (id < n){ if(d_mask[id]) op::iop(d_o[id], d_i[id]); } } template<typename T> template<MATH_OPS op> void GMemOpers<T>::binary_I(T* d_o, const T* d_i, const T* d_mask, size_t n, StreamT stream) { dim3 threads(REG_BLOCK_SIZE); dim3 grids=make_grid(iDivUp(n,threads.x)); if(!MOpers<T, op>::valid()){ throw PyCAException(__FILE__, __LINE__, "Unimplemented math operation for specified type"); } binary_I_masked_kernel<T, MOpers<T, op> > <<<grids, threads, 0, stream>>>(d_o, d_i, d_mask, n); } /////////////////////////////////////////////////////////////////////////////// // Generated Binary Function Definitions /////////////////////////////////////////////////////////////////////////////// GMEM_BINARY_OPERS(Add) GMEM_BINARY_OPERS(Sub) GMEM_BINARY_OPERS(Mul) GMEM_BINARY_OPERS(Div) GMEM_BINARY_OPERS(Max) GMEM_BINARY_OPERS(Min) GMEM_BINARY_OPERS(LT) GMEM_BINARY_OPERS(LTE) GMEM_BINARY_OPERS(EQ) GMEM_BINARY_OPERS(NEQ) GMEM_BINARY_OPERS(GT) GMEM_BINARY_OPERS(GTE) GMEM_BINARY_OPERS(Atan2) // Pow doesn't have image version GMEM_BINARY_OPERC(Pow) GMEM_BINARY_OPERC_I(Pow) GMEM_BINARY_OPER_NOC(Pow) GMEM_BINARY_OPER_NOC_I(Pow) ///////////////////////////////////////////////////////////////////////////////// // Absolute value of the difference ////////////////////////////////////////////////////////////////////////////////// template<typename T> __global__ void AbsDiff_kernel(T* d_o , const T* d_i, const T* d_i1, uint n){ uint blockId = get_blockID(); uint id = get_threadID(blockId); if (id < n){ d_o[id] = (d_i[id] >= d_i1[id]) ? (d_i[id] - d_i1[id]) : (d_i1[id] - d_i[id]); } } template<typename T> void GMemOpers<T>::AbsDiff(T* d_o, const T* d_i, const T* d_i1, size_t n, StreamT stream){ dim3 threads(REG_BLOCK_SIZE); dim3 grids=make_grid(iDivUp(n,threads.x)); AbsDiff_kernel<<<grids, threads, 0, stream>>>(d_o, d_i, d_i1, n); } ///////////////////////////////////////////////////////////////////////////////// // Absolute value of the difference ////////////////////////////////////////////////////////////////////////////////// template<typename T> __global__ void AbsDiff_kernel_I(T* d_o, const T* d_i, uint n){ uint blockId = get_blockID(); uint id = get_threadID(blockId); if (id < n){ d_o[id] = (d_o[id] >= d_i[id]) ? d_o[id] - d_i[id] : d_o[id] = d_i[id] - d_o[id]; } } template<typename T> void GMemOpers<T>::AbsDiff_I(T* d_o, const T* d_i, size_t n, StreamT stream){ dim3 threads(REG_BLOCK_SIZE); dim3 grids=make_grid(iDivUp(n,threads.x)); AbsDiff_kernel_I<<<grids, threads, 0, stream>>>(d_o, d_i, n); } ///////////////////////////////////////////////////////////////////////////////// // Square value of the difference ////////////////////////////////////////////////////////////////////////////////// template<typename T> __global__ void SqrDiff_kernel(T* d_o , const T* d_i, const T* d_i1, uint n){ uint blockId = get_blockID(); uint id = get_threadID(blockId); if (id < n){ d_o[id] = (d_i[id] - d_i1[id]) * (d_i[id] - d_i1[id]); } } template<typename T> void GMemOpers<T>::SqrDiff(T* d_o, const T* d_i, const T* d_i1, size_t n, StreamT stream){ dim3 threads(REG_BLOCK_SIZE); dim3 grids=make_grid(iDivUp(n,threads.x)); SqrDiff_kernel<<<grids, threads, 0, stream>>>(d_o, d_i, d_i1, n); } ///////////////////////////////////////////////////////////////////////////////// // Square value of the difference ////////////////////////////////////////////////////////////////////////////////// template<typename T> __global__ void SqrDiff_kernel_I(T* d_o, const T* d_i, uint n){ uint blockId = get_blockID(); uint id = get_threadID(blockId); if (id < n){ d_o[id] = (d_o[id] - d_i[id]) * (d_o[id] - d_i[id]); } } template<typename T> void GMemOpers<T>::SqrDiff_I(T* d_o, const T* d_i, size_t n, StreamT stream){ dim3 threads(REG_BLOCK_SIZE); dim3 grids=make_grid(iDivUp(n,threads.x)); SqrDiff_kernel_I<<<grids, threads, 0, stream>>>(d_o, d_i, n); } ///////////////////////////////////////////////////////////////////////////////// // d_o = d_i * (d_i1 * c) ////////////////////////////////////////////////////////////////////////////////// template<typename T> __global__ void MulMulC_kernel(T* d_o, const T* d_i, const T* d_i1, T c, uint n){ uint blockId = get_blockID(); uint id = get_threadID(blockId); if (id <n){ d_o[id] = d_i[id] * (d_i1[id] * c); } } template<class T> __global__ void MulMulC_kernel(T* d_o, const T* d_i, const T* d_i1, uint n){ uint blockId = get_blockID(); uint id = get_threadID(blockId); if (id <n){ d_o[id] = d_i[id] * (d_i1[id] * fetch(0,(T*)NULL)); } } template<typename T> void GMemOpers<T>::MulMulC(T* d_o, const T* d_i, const T* d_i1, const T& c, size_t n, StreamT stream, bool onDev){ dim3 threads(REG_BLOCK_SIZE); dim3 grids=make_grid(iDivUp(n, threads.x)); if (onDev) { cache_bind(&c); MulMulC_kernel<<<grids, threads, 0, stream>>>(d_o, d_i, d_i1, n); } else MulMulC_kernel<<<grids, threads, 0, stream>>>(d_o, d_i, d_i1, c, n); } template<class T> __global__ void MulMulC_I_kernel(T* d_o, const T* d_i, uint n){ uint blockId = get_blockID(); uint id = get_threadID(blockId); if (id <n){ d_o[id] *= (d_i[id] * fetch(0,(T*)NULL)); } } template<typename T> __global__ void MulMulC_I_kernel(T* d_o, const T* d_i, T c, uint n){ uint blockId = get_blockID(); uint id = get_threadID(blockId); if (id <n){ d_o[id] *= (d_i[id] * c); } } template<typename T> void GMemOpers<T>::MulMulC_I(T* d_o, const T* d_i, const T& c, size_t n, StreamT stream, bool onDev){ dim3 threads(REG_BLOCK_SIZE); dim3 grids=make_grid(iDivUp(n, threads.x)); if (onDev) { cache_bind(&c); MulMulC_I_kernel<<<grids, threads, 0, stream>>>(d_o, d_i, n); }else MulMulC_I_kernel<<<grids, threads, 0, stream>>>(d_o, d_i, c, n); } ///////////////////////////////////////////////////////////////////////////////// // Multiply three arrays together ////////////////////////////////////////////////////////////////////////////////// template<typename T> __global__ void MulMul_kernel(T* d_o, const T* d_i, const T* d_i1, const T* d_i2, uint n){ uint blockId = get_blockID(); uint id = get_threadID(blockId); if (id <n){ d_o[id] = d_i[id] * (d_i1[id] * d_i2[id]); } } template<typename T> void GMemOpers<T>::MulMul(T* d_o, const T* d_i, const T* d_i1, const T* d_i2, size_t n, StreamT stream){ dim3 threads(REG_BLOCK_SIZE); dim3 grids=make_grid(iDivUp(n,threads.x)); MulMul_kernel<<<grids, threads, 0, stream>>>(d_o, d_i, d_i1, d_i2, n); } ///////////////////////////////////////////////////////////////////////////////// // Multiply three arrays together inplace version ////////////////////////////////////////////////////////////////////////////////// template<typename T> __global__ void MulMul_I_kernel(T* d_o, const T* d_i, const T* d_i1, uint n){ uint blockId = get_blockID(); uint id = get_threadID(blockId); if (id <n){ d_o[id] *= (d_i[id] * d_i1[id]); } } template<typename T> void GMemOpers<T>::MulMul_I(T* d_o, const T* d_i, const T* d_i1, size_t n, StreamT stream){ dim3 threads(REG_BLOCK_SIZE); dim3 grids=make_grid(iDivUp(n,threads.x)); MulMul_I_kernel<<<grids, threads, 0, stream>>>(d_o, d_i, d_i1, n); } ///////////////////////////////////////////////////////////////////////////////// // Add two array together and divide by the third array ////////////////////////////////////////////////////////////////////////////////// template<typename T> __global__ void AddDiv_kernel(T* d_o, const T* d_i, const T* d_i1, const T* d_i2, uint n){ uint blockId = get_blockID(); uint id = get_threadID(blockId); if (id < n) d_o[id] = (d_i[id] + d_i1[id]) / d_i2[id]; } template<typename T> void GMemOpers<T>::AddDiv(T* d_o, const T* d_i, const T* d_i1, const T* d_i2, size_t n, StreamT stream){ dim3 threads(REG_BLOCK_SIZE); dim3 grids=make_grid(iDivUp(n,threads.x)); AddDiv_kernel<<<grids, threads, 0, stream>>>(d_o, d_i, d_i1, d_i2, n); } ///////////////////////////////////////////////////////////////////////////////// // Add two array together and divide by the third array inplace ////////////////////////////////////////////////////////////////////////////////// template<typename T> __global__ void AddDiv_I_kernel(T* d_o, const T* d_i, const T* d_i1, uint n){ uint blockId = get_blockID(); uint id = get_threadID(blockId); if (id < n) d_o[id] = (d_o[id] + d_i[id]) / d_i1[id]; } template<typename T> void GMemOpers<T>::AddDiv_I(T* d_o, const T* d_i, const T* d_i1, size_t n, StreamT stream){ dim3 threads(REG_BLOCK_SIZE); dim3 grids=make_grid(iDivUp(n,threads.x)); AddDiv_I_kernel<<<grids, threads, 0, stream>>>(d_o, d_i, d_i1, n); } ///////////////////////////////////////////////////////////////////////////////// // Sub two array together and divide by the third array ////////////////////////////////////////////////////////////////////////////////// template<typename T> __global__ void SubDiv_kernel(T* d_o, const T* d_i, const T* d_i1, const T* d_i2, uint n){ uint blockId = get_blockID(); uint id = get_threadID(blockId); if (id < n) d_o[id] = (d_i[id] - d_i1[id]) / d_i2[id]; } template<typename T> void GMemOpers<T>::SubDiv(T* d_o, const T* d_i, const T* d_i1, const T* d_i2, size_t n, StreamT stream){ dim3 threads(REG_BLOCK_SIZE); dim3 grids=make_grid(iDivUp(n,threads.x)); SubDiv_kernel<<<grids, threads, 0, stream>>>(d_o, d_i, d_i1, d_i2, n); } ///////////////////////////////////////////////////////////////////////////////// // Sub two array together and divide by the third array inplace ////////////////////////////////////////////////////////////////////////////////// template<typename T> __global__ void SubDiv_I_kernel(T* d_o, const T* d_i, const T* d_i1, uint n){ uint blockId = get_blockID(); uint id = get_threadID(blockId); if (id < n) d_o[id] = (d_o[id] - d_i[id]) / d_i1[id]; } template<typename T> void GMemOpers<T>::SubDiv_I(T* d_o, const T* d_i, const T* d_i1, size_t n, StreamT stream){ dim3 threads(REG_BLOCK_SIZE); dim3 grids=make_grid(iDivUp(n,threads.x)); SubDiv_I_kernel<<<grids, threads, 0, stream>>>(d_o, d_i, d_i1, n); } ///////////////////////////////////////////////////////////////////////////////// // Multiply two array together and add the third one ////////////////////////////////////////////////////////////////////////////////// template<typename T> __global__ void MulAdd_kernel(T* d_o, const T* d_i, const T* d_i1, const T* d_i2, uint n){ uint blockId = get_blockID(); uint id = get_threadID(blockId); if (id < n) d_o[id] = d_i[id] * d_i1[id] + d_i2[id]; } template<typename T> void GMemOpers<T>::MulAdd(T* d_o, const T* d_i, const T* d_i1, const T* d_i2, size_t n, StreamT stream){ dim3 threads(REG_BLOCK_SIZE); dim3 grids=make_grid(iDivUp(n,threads.x)); MulAdd_kernel<<<grids, threads, 0, stream>>>(d_o, d_i, d_i1, d_i2, n); } ///////////////////////////////////////////////////////////////////////////////// // Multiply two array together and add the third one inplace version ////////////////////////////////////////////////////////////////////////////////// template<typename T> __global__ void MulAdd_I_kernel(T* d_o, const T* d_i, const T* d_i1, uint n){ uint blockId = get_blockID(); uint id = get_threadID(blockId); if (id < n) d_o[id] = d_o[id] * d_i[id] + d_i1[id]; } template<typename T> void GMemOpers<T>::MulAdd_I(T* d_o, const T* d_i, const T* d_i1, size_t n, StreamT stream){ dim3 threads(REG_BLOCK_SIZE); dim3 grids=make_grid(iDivUp(n,threads.x)); MulAdd_I_kernel<<<grids, threads, 0, stream>>>(d_o, d_i, d_i1, n); } ///////////////////////////////////////////////////////////////////////////////// // Multiply two array together and sub the third one ////////////////////////////////////////////////////////////////////////////////// template<typename T> __global__ void MulSub_kernel(T* d_o, const T* d_i, const T* d_i1, const T* d_i2, uint n){ uint blockId = get_blockID(); uint id = get_threadID(blockId); if (id < n) d_o[id] = d_i[id] * d_i1[id] - d_i2[id]; } template<typename T> void GMemOpers<T>::MulSub(T* d_o, const T* d_i, const T* d_i1, const T* d_i2, size_t n, StreamT stream){ dim3 threads(REG_BLOCK_SIZE); dim3 grids=make_grid(iDivUp(n,threads.x)); MulSub_kernel<<<grids, threads, 0, stream>>>(d_o, d_i, d_i1, d_i2, n); } ///////////////////////////////////////////////////////////////////////////////// // Multiply two array together and sub the third one inplace version ////////////////////////////////////////////////////////////////////////////////// template<typename T> __global__ void MulSub_I_kernel(T* d_o, const T* d_i, const T* d_i1, uint n){ uint blockId = get_blockID(); uint id = get_threadID(blockId); if (id < n) d_o[id] = d_o[id] * d_i[id] - d_i1[id]; } template<typename T> void GMemOpers<T>::MulSub_I(T* d_o, const T* d_i, const T* d_i1, size_t n, StreamT stream){ dim3 threads(REG_BLOCK_SIZE); dim3 grids=make_grid(iDivUp(n,threads.x)); MulSub_I_kernel<<<grids, threads, 0, stream>>>(d_o, d_i, d_i1, n); } ///////////////////////////////////////////////////////////////////////////////// // Add two arrays and multiply by the third one ////////////////////////////////////////////////////////////////////////////////// template<typename T> __global__ void AddMul_kernel(T* d_o, const T* d_i, const T* d_i1, const T* d_i2, uint n){ uint blockId = get_blockID(); uint id = get_threadID(blockId); if (id < n) d_o[id] = (d_i[id] + d_i1[id]) * d_i2[id]; } template<typename T> void GMemOpers<T>::AddMul(T* d_o, const T* d_i, const T* d_i1, const T* d_i2, size_t n, StreamT stream){ dim3 threads(REG_BLOCK_SIZE); dim3 grids=make_grid(iDivUp(n,threads.x)); AddMul_kernel<<<grids, threads, 0, stream>>>(d_o, d_i, d_i1, d_i2, n); } ///////////////////////////////////////////////////////////////////////////////// // Add two array together and multiply by the third one inplace version ////////////////////////////////////////////////////////////////////////////////// template<typename T> __global__ void AddMul_I_kernel(T* d_o, const T* d_i, const T* d_i1, uint n){ uint blockId = get_blockID(); uint id = get_threadID(blockId); if (id < n) d_o[id] = (d_o[id] + d_i[id]) * d_i1[id]; } template<typename T> void GMemOpers<T>::AddMul_I(T* d_o, const T* d_i, const T* d_i1, size_t n, StreamT stream){ dim3 threads(REG_BLOCK_SIZE); dim3 grids=make_grid(iDivUp(n,threads.x)); AddMul_I_kernel<<<grids, threads, 0, stream>>>(d_o, d_i, d_i1, n); } ///////////////////////////////////////////////////////////////////////////////// // Sub two arrays and multiply by the third one ////////////////////////////////////////////////////////////////////////////////// template<typename T> __global__ void SubMul_kernel(T* d_o, const T* d_i, const T* d_i1, const T* d_i2, uint n){ uint blockId = get_blockID(); uint id = get_threadID(blockId); if (id < n) d_o[id] = (d_i[id] - d_i1[id]) * d_i2[id]; } template<typename T> void GMemOpers<T>::SubMul(T* d_o, const T* d_i, const T* d_i1, const T* d_i2, size_t n, StreamT stream){ dim3 threads(REG_BLOCK_SIZE); dim3 grids=make_grid(iDivUp(n,threads.x)); SubMul_kernel<<<grids, threads, 0, stream>>>(d_o, d_i, d_i1, d_i2, n); } ///////////////////////////////////////////////////////////////////////////////// // Sub two array together and multiply by the third one inplace version ////////////////////////////////////////////////////////////////////////////////// template<typename T> __global__ void SubMul_I_kernel(T* d_o, const T* d_i, const T* d_i1, uint n){ uint blockId = get_blockID(); uint id = get_threadID(blockId); if (id < n) d_o[id] = (d_o[id] - d_i[id]) * d_i1[id]; } template<typename T> void GMemOpers<T>::SubMul_I(T* d_o, const T* d_i, const T* d_i1, size_t n, StreamT stream){ dim3 threads(REG_BLOCK_SIZE); dim3 grids=make_grid(iDivUp(n,threads.x)); SubMul_I_kernel<<<grids, threads, 0, stream>>>(d_o, d_i, d_i1, n); } ///////////////////////////////////////////////////////////////////////////////// // Add an array by a constant then multiply by other constant // Used with normalized function ////////////////////////////////////////////////////////////////////////////////// template<class T> __global__ void AddCMulC_kernel(T* d_o, const T* d_i, uint n){ uint blockId = get_blockID(); uint id = get_threadID(blockId); if (id < n) d_o[id] = (d_i[id] + fetch_y(0, (T*)NULL)) * fetch_z(0, (T*)NULL); } template<typename T> __global__ void AddCMulC_kernel(T* d_o, const T* d_i, T a, T b, uint n){ uint blockId = get_blockID(); uint id = get_threadID(blockId); if (id < n) d_o[id] = (d_i[id] + a) * b; } template<typename T> void GMemOpers<T>::AddCMulC(T* d_o, const T* d_i, const T& a, const T& b, size_t n, StreamT stream, bool onDev){ dim3 threads(REG_BLOCK_SIZE); dim3 grids=make_grid(iDivUp(n,threads.x)); if (onDev) { cache_bind_y(&a); cache_bind_z(&b); AddCMulC_kernel<<<grids, threads, 0, stream>>>(d_o, d_i, n); } else { AddCMulC_kernel<<<grids, threads, 0, stream>>>(d_o, d_i, a, b, n); } } template<typename T> __global__ void AddCMulC_I_kernel(T* d_o, T a, T b, uint n){ uint blockId = get_blockID(); uint id = get_threadID(blockId); if (id < n) d_o[id] = (d_o[id] + a) * b; } template<class T> __global__ void AddCMulC_I_kernel(T* d_o, uint n){ uint blockId = get_blockID(); uint id = get_threadID(blockId); if (id < n) d_o[id] = (d_o[id] + fetch_y(0, (T*)NULL)) * fetch_z(0, (T*)NULL); } template<typename T> void GMemOpers<T>::AddCMulC_I(T* d_o, const T& a, const T& b, size_t n, StreamT stream, bool onDev){ dim3 threads(REG_BLOCK_SIZE); dim3 grids=make_grid(iDivUp(n,threads.x)); if (onDev) { cache_bind_y(&a); cache_bind_z(&b); AddCMulC_I_kernel<<<grids, threads, 0, stream>>>(d_o, n); } else AddCMulC_I_kernel<<<grids, threads, 0, stream>>>(d_o, a, b, n); } ///////////////////////////////////////////////////////////////////////////////// // Multiply an array by a constant and add the second one ////////////////////////////////////////////////////////////////////////////////// template<typename T> __global__ void MulCAdd_kernel(T* d_o, const T* d_i, T c, const T* d_i1, uint n){ uint blockId = get_blockID(); uint id = get_threadID(blockId); if (id < n) d_o[id] = d_i[id] * c + d_i1[id]; } template<class T> __global__ void MulCAdd_kernel(T* d_o, const T* d_i, const T* d_i1, uint n){ uint blockId = get_blockID(); uint id = get_threadID(blockId); if (id < n) d_o[id] = d_i[id] * fetch(0, (T*)NULL) + d_i1[id]; } template<typename T> void GMemOpers<T>::MulCAdd(T* d_o, const T* d_i, const T& c, const T* d_i1, size_t n, StreamT stream, bool onDev){ dim3 threads(REG_BLOCK_SIZE); dim3 grids=make_grid(iDivUp(n,threads.x)); if (onDev) { cache_bind(&c); MulCAdd_kernel<<<grids, threads, 0, stream>>>(d_o, d_i, d_i1, n); } else MulCAdd_kernel<<<grids, threads, 0, stream>>>(d_o, d_i, c, d_i1, n); } ///////////////////////////////////////////////////////////////////////////////// // Multiply an array by a constant and add the second one inplace version ////////////////////////////////////////////////////////////////////////////////// template<typename T> __global__ void MulCAdd_I_kernel(T* d_o, T c, const T* d_i, uint n){ uint blockId = get_blockID(); uint id = get_threadID(blockId); if (id < n) d_o[id] = d_o[id] * c + d_i[id]; } template<class T> __global__ void MulCAdd_I_kernel(T* d_o, const T* d_i, uint n){ uint blockId = get_blockID(); uint id = get_threadID(blockId); if (id < n) d_o[id] = d_o[id] * fetch(0, (T*)NULL) + d_i[id]; } template<typename T> void GMemOpers<T>::MulCAdd_I(T* d_o, const T& c, const T* d_i, size_t n, StreamT stream, bool onDev){ dim3 threads(REG_BLOCK_SIZE); dim3 grids=make_grid(iDivUp(n,threads.x)); if (onDev) { cache_bind(&c); MulCAdd_I_kernel<<<grids, threads, 0, stream>>>(d_o, d_i, n); } else MulCAdd_I_kernel<<<grids, threads, 0, stream>>>(d_o, c, d_i, n); } ///////////////////////////////////////////////////////////////////////////////// // Multiply an array by a constant and sub the second one ////////////////////////////////////////////////////////////////////////////////// template<typename T> __global__ void MulCSub_kernel(T* d_o, const T* d_i, T c, const T* d_i1, uint n){ uint blockId = get_blockID(); uint id = get_threadID(blockId); if (id < n) d_o[id] = d_i[id] * c - d_i1[id]; } template<class T> __global__ void MulCSub_kernel(T* d_o, const T* d_i, const T* d_i1, uint n){ uint blockId = get_blockID(); uint id = get_threadID(blockId); if (id < n) d_o[id] = d_i[id] * fetch(0, (T*)NULL) - d_i1[id]; } template<typename T> void GMemOpers<T>::MulCSub(T* d_o, const T* d_i, const T& c, const T* d_i1, size_t n, StreamT stream, bool onDev){ dim3 threads(REG_BLOCK_SIZE); dim3 grids=make_grid(iDivUp(n,threads.x)); if (onDev) { cache_bind(&c); MulCSub_kernel<<<grids, threads, 0, stream>>>(d_o, d_i, d_i1, n); } else MulCSub_kernel<<<grids, threads, 0, stream>>>(d_o, d_i, c, d_i1, n); } ///////////////////////////////////////////////////////////////////////////////// // Multiply an array by a constant and sub the second one inplace version ////////////////////////////////////////////////////////////////////////////////// template<typename T> __global__ void MulCSub_I_kernel(T* d_o, T c, const T* d_i, uint n){ uint blockId = get_blockID(); uint id = get_threadID(blockId); if (id < n) d_o[id] = d_o[id] * c - d_i[id]; } template<class T> __global__ void MulCSub_I_kernel(T* d_o, const T* d_i, uint n){ uint blockId = get_blockID(); uint id = get_threadID(blockId); if (id < n) d_o[id] = d_o[id] * fetch(0, (T*)NULL) - d_i[id]; } template<typename T> void GMemOpers<T>::MulCSub_I(T* d_o, const T& c, const T* d_i, size_t n, StreamT stream, bool onDev){ dim3 threads(REG_BLOCK_SIZE); dim3 grids=make_grid(iDivUp(n,threads.x)); if (onDev) { cache_bind(&c); MulCSub_I_kernel<<<grids, threads, 0, stream>>>(d_o, d_i, n); } else MulCSub_I_kernel<<<grids, threads, 0, stream>>>(d_o, c, d_i, n); } ///////////////////////////////////////////////////////////////////////////////// // Multiply an array by a constant and add the second constant ////////////////////////////////////////////////////////////////////////////////// template<typename T> __global__ void MulCAddC_kernel(T* d_o, const T* d_i, T a, T b, uint n){ uint blockId = get_blockID(); uint id = get_threadID(blockId); if (id < n) d_o[id] = d_i[id] * a + b; } template<class T> __global__ void MulCAddC_kernel(T* d_o, const T* d_i, uint n){ uint blockId = get_blockID(); uint id = get_threadID(blockId); if (id < n) d_o[id] = d_i[id] * fetch_y(0, (T*)NULL) + fetch_z(0, (T*)NULL); } template<typename T> void GMemOpers<T>::MulCAddC(T* d_o, const T* d_i, const T& a, const T& b, size_t n, StreamT stream, bool onDev){ dim3 threads(REG_BLOCK_SIZE); dim3 grids=make_grid(iDivUp(n,threads.x)); if (onDev){ cache_bind_y(&a); cache_bind_z(&b); MulCAddC_kernel<<<grids, threads, 0, stream>>>(d_o, d_i, n); } else MulCAddC_kernel<<<grids, threads, 0, stream>>>(d_o, d_i, a, b, n); } ///////////////////////////////////////////////////////////////////////////////// // Multiply an array by a constant and add the second constant inplace version ////////////////////////////////////////////////////////////////////////////////// template<typename T> __global__ void MulCAddC_I_kernel(T* d_o, T a, T b, uint n){ uint blockId = get_blockID(); uint id = get_threadID(blockId); if (id < n) d_o[id] = d_o[id] * a + b; } template<class T> __global__ void MulCAddC_I_kernel(T* d_o, uint n){ uint blockId = get_blockID(); uint id = get_threadID(blockId); if (id < n) d_o[id] = d_o[id] * fetch_y(0, (T*)NULL) + fetch_z(0, (T*)NULL); } template<typename T> void GMemOpers<T>::MulCAddC_I(T* d_o, const T& a, const T& b, size_t n, StreamT stream, bool onDev){ dim3 threads(REG_BLOCK_SIZE); dim3 grids=make_grid(iDivUp(n,threads.x)); if (onDev) { cache_bind_y(&a); cache_bind_z(&b); MulCAddC_I_kernel<<<grids, threads, 0, stream>>>(d_o, n); }else MulCAddC_I_kernel<<<grids, threads, 0, stream>>>(d_o, a, b, n); } ///////////////////////////////////////////////////////////////////////////////// // Add two array then multiply by a constant ////////////////////////////////////////////////////////////////////////////////// template<class T> __global__ void AddMulC_kernel(T* d_o, const T* d_i, const T* d_i1, uint n){ uint blockId = get_blockID(); uint id = get_threadID(blockId); if (id < n) d_o[id] = (d_i[id] + d_i1[id]) * fetch(0,(T*)NULL); } template<typename T> __global__ void AddMulC_kernel(T* d_o, const T* d_i, const T* d_i1, T c, uint n){ uint blockId = get_blockID(); uint id = get_threadID(blockId); if (id < n) d_o[id] = (d_i[id] + d_i1[id]) * c; } template<typename T> void GMemOpers<T>::AddMulC(T* d_o, const T* d_i, const T* d_i1, const T& c, size_t n, StreamT stream, bool onDev){ dim3 threads(REG_BLOCK_SIZE); dim3 grids=make_grid(iDivUp(n,threads.x)); if (onDev) { cache_bind(&c); AddMulC_kernel<<<grids, threads, 0, stream>>>(d_o, d_i, d_i1, n); } else AddMulC_kernel<<<grids, threads, 0, stream>>>(d_o, d_i, d_i1, c, n); } ///////////////////////////////////////////////////////////////////////////////// // Add two array then multiply by a constant inplace version ////////////////////////////////////////////////////////////////////////////////// template<typename T> __global__ void AddMulC_I_kernel(T* d_o, const T* d_i, T c, uint n){ uint blockId = get_blockID(); uint id = get_threadID(blockId); if (id < n) d_o[id] = (d_o[id] + d_i[id]) * c; } template<class T> __global__ void AddMulC_I_kernel(T* d_o, const T* d_i, uint n){ uint blockId = get_blockID(); uint id = get_threadID(blockId); if (id < n) d_o[id] = (d_o[id] + d_i[id]) * fetch(0,(T*)NULL); } template<typename T> void GMemOpers<T>::AddMulC_I(T* d_o, const T* d_i, const T& c, size_t n, StreamT stream, bool onDev){ dim3 threads(REG_BLOCK_SIZE); dim3 grids=make_grid(iDivUp(n,threads.x)); if (onDev) { cache_bind(&c); AddMulC_I_kernel<<<grids, threads, 0, stream>>>(d_o, d_i, n); } else AddMulC_I_kernel<<<grids, threads, 0, stream>>>(d_o, d_i, c, n); } ///////////////////////////////////////////////////////////////////////////////// // Sub two array then multiply by a constant ////////////////////////////////////////////////////////////////////////////////// template<typename T> __global__ void SubMulC_kernel(T* d_o, const T* d_i, const T* d_i1, T c, uint n){ uint blockId = get_blockID(); uint id = get_threadID(blockId); if (id < n) d_o[id] = (d_i[id] - d_i1[id]) * c; } template<class T> __global__ void SubMulC_kernel(T* d_o, const T* d_i, const T* d_i1, uint n){ uint blockId = get_blockID(); uint id = get_threadID(blockId); if (id < n) d_o[id] = (d_i[id] - d_i1[id]) * fetch(0,(T*)NULL); } template<typename T> void GMemOpers<T>::SubMulC(T* d_o, const T* d_i, const T* d_i1, const T& c, size_t n, StreamT stream, bool onDev){ dim3 threads(REG_BLOCK_SIZE); dim3 grids=make_grid(iDivUp(n,threads.x)); if (onDev) { cache_bind(&c); SubMulC_kernel<<<grids, threads, 0, stream>>>(d_o, d_i, d_i1, n); }else SubMulC_kernel<<<grids, threads, 0, stream>>>(d_o, d_i, d_i1, c, n); } ///////////////////////////////////////////////////////////////////////////////// // Sub two array then multiply by a constant inplace version ////////////////////////////////////////////////////////////////////////////////// template<typename T> __global__ void SubMulC_I_kernel(T* d_o, const T* d_i, T c, uint n){ uint blockId = get_blockID(); uint id = get_threadID(blockId); if (id < n) d_o[id] = (d_o[id] - d_i[id]) * c; } template<class T> __global__ void SubMulC_I_kernel(T* d_o, const T* d_i, uint n){ uint blockId = get_blockID(); uint id = get_threadID(blockId); if (id < n) d_o[id] = (d_o[id] - d_i[id]) * fetch(0,(T*)NULL); } template<typename T> void GMemOpers<T>::SubMulC_I(T* d_o, const T* d_i, const T& c, size_t n, StreamT stream, bool onDev){ dim3 threads(REG_BLOCK_SIZE); dim3 grids=make_grid(iDivUp(n,threads.x)); if (onDev) { cache_bind(&c); SubMulC_I_kernel<<<grids, threads, 0, stream>>>(d_o, d_i, n); } else SubMulC_I_kernel<<<grids, threads, 0, stream>>>(d_o, d_i, c, n); } ///////////////////////////////////////////////////////////////////////////////// // Add an arrays with another array that multiply by a constant // d_o = d_i + d_i1 * c ////////////////////////////////////////////////////////////////////////////////// template<typename T> __global__ void Add_MulC_kernel(T* d_o, const T* d_i, const T* d_i1, T c, uint n) { uint blockId = get_blockID(); uint id = get_threadID(blockId); if (id < n) d_o[id] = d_i[id] + d_i1[id] * c; } template<class T> __global__ void Add_MulC_kernel(T* d_o, const T* d_i, const T* d_i1, uint n) { uint blockId = get_blockID(); uint id = get_threadID(blockId); if (id < n) d_o[id] = d_i[id] + d_i1[id] * fetch(0,(T*)NULL); } template<typename T> void GMemOpers<T>::Add_MulC(T* d_o, const T* d_i, const T* d_i1, const T& c, size_t n, StreamT stream, bool onDev) { dim3 threads(REG_BLOCK_SIZE); dim3 grids=make_grid(iDivUp(n, threads.x)); if (onDev) { cache_bind(&c); Add_MulC_kernel<<<grids, threads, 0, stream>>>(d_o, d_i, d_i1, n); } else Add_MulC_kernel<<<grids, threads, 0, stream>>>(d_o, d_i, d_i1, c, n); } ///////////////////////////////////////////////////////////////////////////////// // Add an arrays with another array that multiply by a constant // d_o = d_i + d_i1 * c ////////////////////////////////////////////////////////////////////////////////// template<typename T> __global__ void Add_MulC_kernel_I(T* d_o, const T* d_i, T c, uint n) { uint blockId = get_blockID(); uint id = get_threadID(blockId); if (id < n) d_o[id] += d_i[id] * c; } template<class T> __global__ void Add_MulC_kernel_I(T* d_o, const T* d_i, uint n) { uint blockId = get_blockID(); uint id = get_threadID(blockId); if (id < n) d_o[id] += d_i[id] * fetch(0,(T*)NULL); } template<typename T> void GMemOpers<T>::Add_MulC_I(T* d_o, const T* d_i, const T& c, size_t n, StreamT stream, bool onDev) { dim3 threads(REG_BLOCK_SIZE); dim3 grids=make_grid(iDivUp(n, threads.x)); if (onDev) { cache_bind(&c); Add_MulC_kernel_I<<<grids, threads, 0, stream>>>(d_o, d_i, n); } else Add_MulC_kernel_I<<<grids, threads, 0, stream>>>(d_o, d_i, c, n); } ///////////////////////////////////////////////////////////////////////////////// // Add an arrays with another array that multiply by an array // d_o = d_i + d_i1 * d_i2 ////////////////////////////////////////////////////////////////////////////////// template<typename T> __global__ void Add_Mul_kernel(T* d_o, const T* d_i, const T* d_i1, const T* d_i2, uint n) { uint blockId = get_blockID(); uint id = get_threadID(blockId); if (id < n) d_o[id] = d_i[id] + d_i1[id] * d_i2[id]; } template<typename T> void GMemOpers<T>::Add_Mul(T* d_o, const T* d_i, const T* d_i1, const T* d_i2, size_t n, StreamT stream) { dim3 threads(REG_BLOCK_SIZE); dim3 grids=make_grid(iDivUp(n, threads.x)); Add_Mul_kernel<<<grids, threads, 0, stream>>>(d_o, d_i, d_i1, d_i2, n); } ///////////////////////////////////////////////////////////////////////////////// // Add an arrays with another array that multiply by an array // d_o = d_o + d_i * d_i1 ///////////////////////////////////////////////////////////////////////////////// template<typename T> __global__ void Add_Mul_kernel_I(T* d_o, const T* d_i, const T* d_i1, uint n) { uint blockId = get_blockID(); uint id = get_threadID(blockId); if (id < n) d_o[id] += d_i[id] * d_i1[id]; } template<typename T> void GMemOpers<T>::Add_Mul_I(T* d_o, const T* d_i, const T* d_i1, size_t n, StreamT stream) { dim3 threads(REG_BLOCK_SIZE); dim3 grids=make_grid(iDivUp(n, threads.x)); Add_Mul_kernel_I<<<grids, threads, 0, stream>>>(d_o, d_i, d_i1, n); } ///////////////////////////////////////////////////////////////////////////////// // Sub an arrays with another array that multiply by an array // d_o = d_i + d_i1 * d_i2 ////////////////////////////////////////////////////////////////////////////////// template<typename T> __global__ void Sub_Mul_kernel(T* d_o, const T* d_i, const T* d_i1, const T* d_i2, uint n) { uint blockId = get_blockID(); uint id = get_threadID(blockId); if (id < n) d_o[id] = d_i[id] - d_i1[id] * d_i2[id]; } template<typename T> void GMemOpers<T>::Sub_Mul(T* d_o, const T* d_i, const T* d_i1, const T* d_i2, size_t n, StreamT stream) { dim3 threads(REG_BLOCK_SIZE); dim3 grids=make_grid(iDivUp(n, threads.x)); Sub_Mul_kernel<<<grids, threads, 0, stream>>>(d_o, d_i, d_i1, d_i2, n); } ///////////////////////////////////////////////////////////////////////////////// // Sub an arrays with another array that multiply by an array // d_o = d_o + d_i * d_i1 ///////////////////////////////////////////////////////////////////////////////// template<typename T> __global__ void Sub_Mul_kernel_I(T* d_o, const T* d_i, const T* d_i1, uint n) { uint blockId = get_blockID(); uint id = get_threadID(blockId); if (id < n) d_o[id] -= d_i[id] * d_i1[id]; } template<typename T> void GMemOpers<T>::Sub_Mul_I(T* d_o, const T* d_i, const T* d_i1, size_t n, StreamT stream) { dim3 threads(REG_BLOCK_SIZE); dim3 grids=make_grid(iDivUp(n, threads.x)); Sub_Mul_kernel_I<<<grids, threads, 0, stream>>>(d_o, d_i, d_i1, n); } ///////////////////////////////////////////////////////////////////////////////// // Quadary function ////////////////////////////////////////////////////////////////////////////////// template<typename T> __global__ void MulC_Add_MulC_kernel(T* d_o, const T* d_i, T a, const T* d_i1, T b, uint n){ uint blockId = get_blockID(); uint id = get_threadID(blockId); if (id < n){ d_o[id] = d_i[id] * a + d_i1[id] * b; } } template<class T> __global__ void MulC_Add_MulC_kernel(T* d_o, const T* d_i, const T* d_i1, uint n){ uint blockId = get_blockID(); uint id = get_threadID(blockId); if (id < n){ d_o[id] = d_i[id] * fetch_y(0, (T*)NULL) + d_i1[id] * fetch_z(0, (T*)NULL); } } template<typename T> void GMemOpers<T>:: MulC_Add_MulC(T* d_o, const T* d_i, const T& a, const T* d_i1, const T& b, size_t n, StreamT stream, bool onDev){ dim3 threads(REG_BLOCK_SIZE); dim3 grids=make_grid(iDivUp(n,threads.x)); if (onDev) { cache_bind_y(&a); cache_bind_z(&b); MulC_Add_MulC_kernel<<<grids, threads, 0, stream>>>(d_o, d_i, d_i1, n); } else { MulC_Add_MulC_kernel<<<grids, threads, 0, stream>>>(d_o, d_i, a, d_i1, b, n); } } template<typename T> __global__ void MulC_Add_MulC_kernel_I(T* d_o, T a, const T* d_i, T b, uint n){ uint blockId = get_blockID(); uint id = get_threadID(blockId); if (id < n){ d_o[id] = d_o[id] * a + d_i[id] * b; } } template<class T> __global__ void MulC_Add_MulC_kernel_I(T* d_o, const T* d_i, uint n){ uint blockId = get_blockID(); uint id = get_threadID(blockId); if (id < n){ d_o[id] = d_o[id] * fetch_y(0, (T*)NULL) + d_i[id] * fetch_z(0, (T*)NULL); } } template<typename T> void GMemOpers<T>:: MulC_Add_MulC_I(T* d_o, const T& a, const T* d_i, const T& b, size_t n, StreamT stream, bool onDev){ dim3 threads(REG_BLOCK_SIZE); dim3 grids=make_grid(iDivUp(n, threads.x)); if (onDev) { cache_bind_y(&a); cache_bind_z(&b); MulC_Add_MulC_kernel_I<<<grids, threads, 0, stream>>>(d_o, d_i, n); } else MulC_Add_MulC_kernel_I<<<grids, threads, 0, stream>>>(d_o, a, d_i, b, n); } //////////////////////////////////////////////////////////////////////////////// // //////////////////////////////////////////////////////////////////////////////// template<typename T> __global__ void Add_AddMulC_kernel(T* d_o, const T* d_i, const T* d_i1, const T* d_i2, T c, uint n){ uint blockId = get_blockID(); uint id = get_threadID(blockId); if (id < n){ d_o[id] = d_i[id] + (d_i1[id] + d_i2[id]) * c; } } template<class T> __global__ void Add_AddMulC_kernel(T* d_o, const T* d_i, const T* d_i1, const T* d_i2, uint n){ uint blockId = get_blockID(); uint id = get_threadID(blockId); if (id < n){ d_o[id] = d_i[id] + (d_i1[id] + d_i2[id]) * fetch(0,(T*)NULL); } } template<typename T> void GMemOpers<T>::Add_AddMulC(T* d_o, const T* d_i, const T* d_i1, const T* d_i2, const T& c, size_t n, StreamT stream, bool onDev){ dim3 threads(REG_BLOCK_SIZE); dim3 grids=make_grid(iDivUp(n, threads.x)); if (onDev) { cache_bind(&c); Add_AddMulC_kernel<<<grids, threads, 0, stream>>>(d_o, d_i, d_i1, d_i2, n); } else Add_AddMulC_kernel<<<grids, threads, 0, stream>>>(d_o, d_i, d_i1, d_i2, c, n); } //////////////////////////////////////////////////////////////////////////////// // //////////////////////////////////////////////////////////////////////////////// template<typename T> __global__ void Add_AddMulC_I_kernel(T* d_o, const T* d_i, const T* d_i1, T c, uint n){ uint blockId = get_blockID(); uint id = get_threadID(blockId); if (id < n){ d_o[id] += (d_i[id] + d_i1[id]) * c; } } template<class T> __global__ void Add_AddMulC_I_kernel(T* d_o, const T* d_i, const T* d_i1, uint n){ uint blockId = get_blockID(); uint id = get_threadID(blockId); if (id < n){ d_o[id] += (d_i[id] + d_i1[id]) * fetch(0,(T*)NULL); } } template<typename T> void GMemOpers<T>::Add_AddMulC_I(T* d_o, const T* d_i, const T* d_i1, const T& c, size_t n, StreamT stream, bool onDev){ dim3 threads(REG_BLOCK_SIZE); dim3 grids=make_grid(iDivUp(n, threads.x)); if (onDev) { cache_bind(&c); Add_AddMulC_I_kernel<<<grids, threads, 0, stream>>>(d_o, d_i, d_i1, n); } else Add_AddMulC_I_kernel<<<grids, threads, 0, stream>>>(d_o, d_i, d_i1, c, n); } //////////////////////////////////////////////////////////////////////////////// // //////////////////////////////////////////////////////////////////////////////// template<class T> __global__ void Add_SubMulC_kernel(T* d_o, const T* d_i, const T* d_i1, const T* d_i2, uint n){ uint blockId = get_blockID(); uint id = get_threadID(blockId); if (id < n){ d_o[id] = d_i[id] + (d_i1[id] - d_i2[id]) * fetch(0,(T*)NULL); } } template<typename T> __global__ void Add_SubMulC_kernel(T* d_o, const T* d_i, const T* d_i1, const T* d_i2, T c, uint n){ uint blockId = get_blockID(); uint id = get_threadID(blockId); if (id < n){ d_o[id] = d_i[id] + (d_i1[id] - d_i2[id]) * c; } } template<typename T> void GMemOpers<T>::Add_SubMulC(T* d_o, const T* d_i, const T* d_i1, const T* d_i2, const T& c, size_t n, StreamT stream, bool onDev){ dim3 threads(REG_BLOCK_SIZE); dim3 grids=make_grid(iDivUp(n, threads.x)); if (onDev){ cache_bind(&c); Add_SubMulC_kernel<<<grids, threads, 0, stream>>>(d_o, d_i, d_i1, d_i2, n); } else Add_SubMulC_kernel<<<grids, threads, 0, stream>>>(d_o, d_i, d_i1, d_i2, c, n); } //////////////////////////////////////////////////////////////////////////////// // //////////////////////////////////////////////////////////////////////////////// template<typename T> __global__ void Add_SubMulC_I_kernel(T* d_o, const T* d_i, const T* d_i1, T c, uint n){ uint blockId = get_blockID(); uint id = get_threadID(blockId); if (id < n){ d_o[id] += (d_i[id] - d_i1[id]) * c; } } template<class T> __global__ void Add_SubMulC_I_kernel(T* d_o, const T* d_i, const T* d_i1, uint n){ uint blockId = get_blockID(); uint id = get_threadID(blockId); if (id < n){ d_o[id] += (d_i[id] - d_i1[id]) * fetch(0,(T*)NULL); } } template<typename T> void GMemOpers<T>::Add_SubMulC_I(T* d_o, const T* d_i, const T* d_i1, const T& c, size_t n, StreamT stream, bool onDev){ dim3 threads(REG_BLOCK_SIZE); dim3 grids=make_grid(iDivUp(n, threads.x)); if (onDev) { cache_bind(&c); Add_SubMulC_I_kernel<<<grids, threads, 0, stream>>>(d_o, d_i, d_i1, n); } else Add_SubMulC_I_kernel<<<grids, threads, 0, stream>>>(d_o, d_i, d_i1, c, n); } //////////////////////////////////////////////////////////////////////////////// //// d_o = d_i + (d_i1 * d_i2 * c) //////////////////////////////////////////////////////////////////////////////// template<class T> __global__ void Add_MulMulC_kernel(T* d_o, const T* d_i, const T* d_i1, const T* d_i2, uint n){ uint blockId = get_blockID(); uint id = get_threadID(blockId); if (id < n){ d_o[id] = d_i[id] + (d_i1[id] * d_i2[id]) * fetch(0,(T*)NULL); } } template<typename T> __global__ void Add_MulMulC_kernel(T* d_o, const T* d_i, const T* d_i1, const T* d_i2, T c, uint n){ uint blockId = get_blockID(); uint id = get_threadID(blockId); if (id < n){ d_o[id] = d_i[id] + (d_i1[id] * d_i2[id]) * c; } } template<typename T> void GMemOpers<T>::Add_MulMulC(T* d_o, const T* d_i, const T* d_i1, const T* d_i2, const T& c, size_t n, StreamT stream, bool onDev){ dim3 threads(REG_BLOCK_SIZE); dim3 grids=make_grid(iDivUp(n, threads.x)); if (onDev) { cache_bind(&c); Add_MulMulC_kernel<<<grids, threads, 0, stream>>>(d_o, d_i, d_i1, d_i2, n); }else Add_MulMulC_kernel<<<grids, threads, 0, stream>>>(d_o, d_i, d_i1, d_i2, c, n); } //////////////////////////////////////////////////////////////////////////////// // d_o = d_o + (d_i * d_i1 * d) //////////////////////////////////////////////////////////////////////////////// template<typename T> __global__ void Add_MulMulC_I_kernel(T* d_o, const T* d_i, const T* d_i1, T c, uint n){ uint blockId = get_blockID(); uint id = get_threadID(blockId); if (id < n){ d_o[id] += (d_i[id] * d_i1[id]) * c; } } template<class T> __global__ void Add_MulMulC_I_kernel(T* d_o, const T* d_i, const T* d_i1, uint n){ uint blockId = get_blockID(); uint id = get_threadID(blockId); if (id < n){ d_o[id] += (d_i[id] * d_i1[id]) * fetch(0,(T*)NULL); } } template<typename T> void GMemOpers<T>::Add_MulMulC_I(T* d_o, const T* d_i, const T* d_i1, const T& c, size_t n, StreamT stream, bool onDev){ dim3 threads(REG_BLOCK_SIZE); dim3 grids=make_grid(iDivUp(n, threads.x)); if (onDev) { cache_bind(&c); Add_MulMulC_I_kernel<<<grids, threads, 0, stream>>>(d_o, d_i, d_i1, n); } else Add_MulMulC_I_kernel<<<grids, threads, 0, stream>>>(d_o, d_i, d_i1, c, n); } //////////////////////////////////////////////////////////////////////////////// // d_o = d_i - (d_i1 + d_i2) * c //////////////////////////////////////////////////////////////////////////////// template<class T> __global__ void Sub_AddMulC_kernel(T* d_o, const T* d_i, const T* d_i1, const T* d_i2, uint n){ uint blockId = get_blockID(); uint id = get_threadID(blockId); if (id < n){ d_o[id] = d_i[id] - (d_i1[id] + d_i2[id]) * fetch(0,(T*)NULL); } } template<typename T> __global__ void Sub_AddMulC_kernel(T* d_o, const T* d_i, const T* d_i1, const T* d_i2, T c, uint n){ uint blockId = get_blockID(); uint id = get_threadID(blockId); if (id < n){ d_o[id] = d_i[id] - (d_i1[id] + d_i2[id]) * c; } } template<typename T> void GMemOpers<T>::Sub_AddMulC(T* d_o, const T* d_i, const T* d_i1, const T* d_i2, const T& c, size_t n, StreamT stream, bool onDev){ dim3 threads(REG_BLOCK_SIZE); dim3 grids=make_grid(iDivUp(n, threads.x)); if (onDev){ cache_bind(&c); Sub_AddMulC_kernel<<<grids, threads, 0, stream>>>(d_o, d_i, d_i1, d_i2, n); } else Sub_AddMulC_kernel<<<grids, threads, 0, stream>>>(d_o, d_i, d_i1, d_i2, c, n); } //////////////////////////////////////////////////////////////////////////////// // d_o -= (d_i + d_i1) * c //////////////////////////////////////////////////////////////////////////////// template<class T> __global__ void Sub_AddMulC_I_kernel(T* d_o, const T* d_i, const T* d_i1, uint n){ uint blockId = get_blockID(); uint id = get_threadID(blockId); if (id < n){ d_o[id] -= (d_i[id] + d_i1[id]) * fetch(0,(T*)NULL); } } template<typename T> __global__ void Sub_AddMulC_I_kernel(T* d_o, const T* d_i, const T* d_i1, T c, uint n){ uint blockId = get_blockID(); uint id = get_threadID(blockId); if (id < n){ d_o[id] -= (d_i[id] + d_i1[id]) * c; } } template<typename T> void GMemOpers<T>::Sub_AddMulC_I(T* d_o, const T* d_i, const T* d_i1, const T& c, size_t n, StreamT stream, bool onDev){ dim3 threads(REG_BLOCK_SIZE); dim3 grids=make_grid(iDivUp(n, threads.x)); if( onDev){ cache_bind(&c); Sub_AddMulC_I_kernel<<<grids, threads, 0, stream>>>(d_o, d_i, d_i1, n); } else Sub_AddMulC_I_kernel<<<grids, threads, 0, stream>>>(d_o, d_i, d_i1, c, n); } //////////////////////////////////////////////////////////////////////////////// // d_o = d_i - (d_i1 - d_i2) * c //////////////////////////////////////////////////////////////////////////////// template<typename T> __global__ void Sub_SubMulC_kernel(T* d_o, const T* d_i, const T* d_i1, const T* d_i2, T c, uint n){ uint blockId = get_blockID(); uint id = get_threadID(blockId); if (id < n){ d_o[id] = d_i[id] - (d_i1[id] - d_i2[id]) * c; } } template<class T> __global__ void Sub_SubMulC_kernel(T* d_o, const T* d_i, const T* d_i1, const T* d_i2, uint n){ uint blockId = get_blockID(); uint id = get_threadID(blockId); if (id < n){ d_o[id] = d_i[id] - (d_i1[id] - d_i2[id]) * fetch(0,(T*)NULL); } } template<typename T> void GMemOpers<T>::Sub_SubMulC(T* d_o, const T* d_i, const T* d_i1, const T* d_i2, const T& c, size_t n, StreamT stream, bool onDev){ dim3 threads(REG_BLOCK_SIZE); dim3 grids=make_grid(iDivUp(n, threads.x)); if (onDev){ cache_bind(&c); Sub_SubMulC_kernel<<<grids, threads, 0, stream>>>(d_o, d_i, d_i1, d_i2, n); } else Sub_SubMulC_kernel<<<grids, threads, 0, stream>>>(d_o, d_i, d_i1, d_i2, c, n); } //////////////////////////////////////////////////////////////////////////////// // d_o -= (d_i - d_i1) * c //////////////////////////////////////////////////////////////////////////////// template<class T> __global__ void Sub_SubMulC_I_kernel(T* d_o, const T* d_i, const T* d_i1, uint n){ uint blockId = get_blockID(); uint id = get_threadID(blockId); if (id < n){ d_o[id] -= (d_i[id] - d_i1[id]) * fetch(0,(T*)NULL); } } template<typename T> __global__ void Sub_SubMulC_I_kernel(T* d_o, const T* d_i, const T* d_i1, T c, uint n){ uint blockId = get_blockID(); uint id = get_threadID(blockId); if (id < n){ d_o[id] -= (d_i[id] - d_i1[id]) * c; } } template<typename T> void GMemOpers<T>::Sub_SubMulC_I(T* d_o, const T* d_i, const T* d_i1, const T& c, size_t n, StreamT stream, bool onDev){ dim3 threads(REG_BLOCK_SIZE); dim3 grids=make_grid(iDivUp(n, threads.x)); if (onDev){ cache_bind(&c); Sub_SubMulC_I_kernel<<<grids, threads, 0, stream>>>(d_o, d_i, d_i1, n); } else Sub_SubMulC_I_kernel<<<grids, threads, 0, stream>>>(d_o, d_i, d_i1, c, n); } //////////////////////////////////////////////////////////////////////////////// //// d_o = d_i + (d_i1 * d_i2 * c) //////////////////////////////////////////////////////////////////////////////// template<class T> __global__ void Sub_MulMulC_kernel(T* d_o, const T* d_i, const T* d_i1, const T* d_i2, uint n){ uint blockId = get_blockID(); uint id = get_threadID(blockId); if (id < n){ d_o[id] = d_i[id] - (d_i1[id] * d_i2[id]) * fetch(0,(T*)NULL); } } template<typename T> __global__ void Sub_MulMulC_kernel(T* d_o, const T* d_i, const T* d_i1, const T* d_i2, T c, uint n){ uint blockId = get_blockID(); uint id = get_threadID(blockId); if (id < n){ d_o[id] = d_i[id] - (d_i1[id] * d_i2[id]) * c; } } template<typename T> void GMemOpers<T>::Sub_MulMulC(T* d_o, const T* d_i, const T* d_i1, const T* d_i2, const T& c, size_t n, StreamT stream, bool onDev){ dim3 threads(REG_BLOCK_SIZE); dim3 grids=make_grid(iDivUp(n, threads.x)); if (onDev){ cache_bind(&c); Sub_MulMulC_kernel<<<grids, threads, 0, stream>>>(d_o, d_i, d_i1, d_i2, n); } else Sub_MulMulC_kernel<<<grids, threads, 0, stream>>>(d_o, d_i, d_i1, d_i2, c, n); } //////////////////////////////////////////////////////////////////////////////// // d_o = d_o + (d_i * d_i1 * d) //////////////////////////////////////////////////////////////////////////////// template<typename T> __global__ void Sub_MulMulC_I_kernel(T* d_o, const T* d_i, const T* d_i1, T c, uint n){ uint blockId = get_blockID(); uint id = get_threadID(blockId); if (id < n){ d_o[id] -= (d_i[id] * d_i1[id]) * c; } } template<class T> __global__ void Sub_MulMulC_I_kernel(T* d_o, const T* d_i, const T* d_i1, uint n){ uint blockId = get_blockID(); uint id = get_threadID(blockId); if (id < n){ d_o[id] -= (d_i[id] * d_i1[id]) * fetch(0,(T*)NULL); } } template<typename T> void GMemOpers<T>::Sub_MulMulC_I(T* d_o, const T* d_i, const T* d_i1, const T& c, size_t n, StreamT stream, bool onDev){ dim3 threads(REG_BLOCK_SIZE); dim3 grids=make_grid(iDivUp(n, threads.x)); if (onDev){ cache_bind(&c); Sub_MulMulC_I_kernel<<<grids, threads, 0, stream>>>(d_o, d_i, d_i1, n); } else Sub_MulMulC_I_kernel<<<grids, threads, 0, stream>>>(d_o, d_i, d_i1, c, n); } //////////////////////////////////////////////////////////////////////////////// // d_o = (d_i + a) * b + c //////////////////////////////////////////////////////////////////////////////// template<typename T> __global__ void AddCMulCAddC_kernel(T* d_o, const T* d_i, T a, T b, T c, uint n){ uint blockId = get_blockID(); uint id = get_threadID(blockId); if (id < n) d_o[id] = (d_i[id] + a) * b + c; } template<class T> __global__ void AddCMulCAddC_kernel(T* d_o, const T* d_i, uint n){ uint blockId = get_blockID(); uint id = get_threadID(blockId); if (id < n) d_o[id] = d_i[id] + fetch_y(0,(T*)NULL) * fetch_z(0,(T*)NULL) + fetch(0,(T*)NULL); } template<typename T> void GMemOpers<T>:: AddCMulCAddC(T* d_o, const T* d_i, const T& a, const T& b, const T& c, size_t n, StreamT stream, bool onDev){ dim3 threads(REG_BLOCK_SIZE); dim3 grids=make_grid(iDivUp(n, threads.x)); if (onDev) { cache_bind(&c); cache_bind_y(&a); cache_bind_z(&b); AddCMulCAddC_kernel<<<grids, threads, 0, stream>>>(d_o, d_i, n); } else AddCMulCAddC_kernel<<<grids, threads, 0, stream>>>(d_o, d_i, a, b, c, n); } //////////////////////////////////////////////////////////////////////////////// // d_o = (d_o + a) * b + c //////////////////////////////////////////////////////////////////////////////// template<typename T> __global__ void AddCMulCAddC_I_kernel(T* d_o, T a, T b, T c, uint n){ uint blockId = get_blockID(); uint id = get_threadID(blockId); if (id < n) d_o[id] = (d_o[id] + a) * b + c; } template<class T> __global__ void AddCMulCAddC_I_kernel(T* d_o, uint n){ uint blockId = get_blockID(); uint id = get_threadID(blockId); if (id < n) d_o[id] = d_o[id] + fetch_y(0,(T*)NULL) * fetch_z(0,(T*)NULL) + fetch(0,(T*)NULL); } template<typename T> void GMemOpers<T>::AddCMulCAddC_I(T* d_o, const T& a, const T& b, const T& c, size_t n, StreamT stream, bool onDev){ dim3 threads(REG_BLOCK_SIZE); dim3 grids=make_grid(iDivUp(n, threads.x)); if (onDev){ cache_bind(&c); cache_bind_y(&a); cache_bind_z(&b); AddCMulCAddC_I_kernel<<<grids, threads, 0, stream>>>(d_o, n); } else AddCMulCAddC_I_kernel<<<grids, threads, 0, stream>>>(d_o, a, b, c, n); } template<typename T> __global__ void ReverseOrder_kernel(T* d_o, const T* d_i, uint n){ uint blockId = get_blockID(); uint id = get_threadID(blockId); if (id < n) d_o[id] = (d_i[n -1 - id]); } template<typename T> void GMemOpers<T>::ReverseOrder(T* d_o, const T* d_i, size_t n, StreamT stream) { dim3 threads(REG_BLOCK_SIZE); dim3 grids=make_grid(iDivUp(n, threads.x)); ReverseOrder_kernel<<<grids, threads, 0, stream>>>(d_o, d_i, n); } #define TILE_DIM 8 template<typename T> __global__ void ShiftRightCoordinate_shared_kernel(T* d_o, const T* d_i, uint sizeX, uint sizeY, uint sizeZ) { __shared__ T sdata[TILE_DIM][TILE_DIM+1][TILE_DIM+1]; const uint iPlaneSize = sizeX * sizeY; const uint oPlaneSize = sizeX * sizeZ; uint bx = blockIdx.x * TILE_DIM; uint by = blockIdx.y * TILE_DIM; uint tx = threadIdx.x; uint ty = threadIdx.y; uint iid = bx + tx + (by + ty) * sizeX; for (uint bz=0; bz < sizeZ; bz+= TILE_DIM){ if ((bx + tx < sizeX) && (by + ty < sizeY)) for (size_t tz=0; tz < TILE_DIM && (bz + tz < sizeZ); ++tz, iid+=iPlaneSize) sdata[tz][ty][tx] = d_i[iid]; __syncthreads(); uint oid = bz + ty + ((bx + tx) + by * sizeX) * sizeZ; if ((bz + ty < sizeZ) && (bx + tx < sizeX)) for (uint tz = 0; tz < TILE_DIM && (by + tz < sizeY); ++tz, oid+=oPlaneSize) d_o[oid] = sdata[ty][tz][tx]; } } template<typename T> __global__ void ShiftLeftCoordinate_shared_kernel(T* d_o, const T* d_i, uint sizeX, uint sizeY, uint sizeZ) { __shared__ T sdata[TILE_DIM][TILE_DIM+1][TILE_DIM+1]; const uint iPlaneSize = sizeX * sizeY; const uint oPlaneSize = sizeY * sizeZ; uint bx = blockIdx.x * TILE_DIM; uint by = blockIdx.y * TILE_DIM; uint tx = threadIdx.x; uint ty = threadIdx.y; uint bz = 0; uint iid = bx + tx + (by + ty) * sizeX; while (bz < sizeZ){ if ((bx + tx < sizeX) && (by + ty < sizeY)) for (uint tz = 0; tz < TILE_DIM && (bz + tz < sizeZ); ++tz, iid +=iPlaneSize) sdata[tz][ty][tx] = d_i[iid]; __syncthreads(); uint oid = by + bz * sizeY + bx * sizeY * sizeZ + (tx + ty * sizeY); if ((by + tx < sizeY) && (bz + ty < sizeZ)) for (uint tz = 0; tz < TILE_DIM && (bx + tz < sizeX); ++tz, oid += oPlaneSize) d_o[oid] = sdata[ty][tx][tz]; bz += TILE_DIM; } } template<typename T> void GMemOpers<T>:: ShiftCoordinate(T* d_o, const T* d_i, size_t sizeX, size_t sizeY, size_t sizeZ, bool dir, StreamT stream){ dim3 threads(8, 8); dim3 grids(iDivUp(sizeX, threads.x),iDivUp(sizeY, threads.y)); if (dir) { ShiftRightCoordinate_shared_kernel<<<grids, threads, 0, stream>>> (d_o, d_i, sizeX, sizeY, sizeZ); } else ShiftLeftCoordinate_shared_kernel<<<grids, threads, 0, stream>>> (d_o, d_i, sizeX, sizeY, sizeZ); } template class GMemOpers<float>; template class GMemOpers<int>; template class GMemOpers<uint>; } // end namespace PyCA
the_stack
#include "../config.cuh" #include "../util_type.cuh" #include "../util_namespace.cuh" #include "../block/block_load.cuh" #include "../block/block_store.cuh" #include "../block/block_merge_sort.cuh" #include <thrust/system/cuda/detail/core/util.h> CUB_NAMESPACE_BEGIN template < int _BLOCK_THREADS, int _ITEMS_PER_THREAD = 1, cub::BlockLoadAlgorithm _LOAD_ALGORITHM = cub::BLOCK_LOAD_DIRECT, cub::CacheLoadModifier _LOAD_MODIFIER = cub::LOAD_LDG, cub::BlockStoreAlgorithm _STORE_ALGORITHM = cub::BLOCK_STORE_DIRECT> struct AgentMergeSortPolicy { static constexpr int BLOCK_THREADS = _BLOCK_THREADS; static constexpr int ITEMS_PER_THREAD = _ITEMS_PER_THREAD; static constexpr int ITEMS_PER_TILE = BLOCK_THREADS * ITEMS_PER_THREAD; static constexpr cub::BlockLoadAlgorithm LOAD_ALGORITHM = _LOAD_ALGORITHM; static constexpr cub::CacheLoadModifier LOAD_MODIFIER = _LOAD_MODIFIER; static constexpr cub::BlockStoreAlgorithm STORE_ALGORITHM = _STORE_ALGORITHM; }; /// \brief This agent is responsible for the initial in-tile sorting. template <typename Policy, typename KeyInputIteratorT, typename ValueInputIteratorT, typename KeyIteratorT, typename ValueIteratorT, typename OffsetT, typename CompareOpT, typename KeyT, typename ValueT> struct AgentBlockSort { //--------------------------------------------------------------------- // Types and constants //--------------------------------------------------------------------- static constexpr bool KEYS_ONLY = Equals<ValueT, NullType>::VALUE; using BlockMergeSortT = BlockMergeSort<KeyT, Policy::BLOCK_THREADS, Policy::ITEMS_PER_THREAD, ValueT>; using KeysLoadIt = typename THRUST_NS_QUALIFIER::cuda_cub::core::LoadIterator<Policy, KeyInputIteratorT>::type; using ItemsLoadIt = typename THRUST_NS_QUALIFIER::cuda_cub::core::LoadIterator<Policy, ValueInputIteratorT>::type; using BlockLoadKeys = typename cub::BlockLoadType<Policy, KeysLoadIt>::type; using BlockLoadItems = typename cub::BlockLoadType<Policy, ItemsLoadIt>::type; using BlockStoreKeysIt = typename cub::BlockStoreType<Policy, KeyIteratorT>::type; using BlockStoreItemsIt = typename cub::BlockStoreType<Policy, ValueIteratorT>::type; using BlockStoreKeysRaw = typename cub::BlockStoreType<Policy, KeyT *>::type; using BlockStoreItemsRaw = typename cub::BlockStoreType<Policy, ValueT *>::type; union _TempStorage { typename BlockLoadKeys::TempStorage load_keys; typename BlockLoadItems::TempStorage load_items; typename BlockStoreKeysIt::TempStorage store_keys_it; typename BlockStoreItemsIt::TempStorage store_items_it; typename BlockStoreKeysRaw::TempStorage store_keys_raw; typename BlockStoreItemsRaw::TempStorage store_items_raw; typename BlockMergeSortT::TempStorage block_merge; }; /// Alias wrapper allowing storage to be unioned struct TempStorage : Uninitialized<_TempStorage> {}; static constexpr int BLOCK_THREADS = Policy::BLOCK_THREADS; static constexpr int ITEMS_PER_THREAD = Policy::ITEMS_PER_THREAD; static constexpr int ITEMS_PER_TILE = Policy::ITEMS_PER_TILE; static constexpr int SHARED_MEMORY_SIZE = static_cast<int>(sizeof(TempStorage)); //--------------------------------------------------------------------- // Per thread data //--------------------------------------------------------------------- bool ping; _TempStorage &storage; KeysLoadIt keys_in; ItemsLoadIt items_in; OffsetT keys_count; KeyIteratorT keys_out_it; ValueIteratorT items_out_it; KeyT *keys_out_raw; ValueT *items_out_raw; CompareOpT compare_op; __device__ __forceinline__ AgentBlockSort(bool ping_, TempStorage &storage_, KeysLoadIt keys_in_, ItemsLoadIt items_in_, OffsetT keys_count_, KeyIteratorT keys_out_it_, ValueIteratorT items_out_it_, KeyT *keys_out_raw_, ValueT *items_out_raw_, CompareOpT compare_op_) : ping(ping_) , storage(storage_.Alias()) , keys_in(keys_in_) , items_in(items_in_) , keys_count(keys_count_) , keys_out_it(keys_out_it_) , items_out_it(items_out_it_) , keys_out_raw(keys_out_raw_) , items_out_raw(items_out_raw_) , compare_op(compare_op_) { } __device__ __forceinline__ void Process() { auto tile_idx = static_cast<OffsetT>(blockIdx.x); auto num_tiles = static_cast<OffsetT>(gridDim.x); auto tile_base = tile_idx * ITEMS_PER_TILE; int items_in_tile = (cub::min)(keys_count - tile_base, int{ITEMS_PER_TILE}); if (tile_idx < num_tiles - 1) { consume_tile<false>(tile_base, ITEMS_PER_TILE); } else { consume_tile<true>(tile_base, items_in_tile); } } template <bool IS_LAST_TILE> __device__ __forceinline__ void consume_tile(OffsetT tile_base, int num_remaining) { ValueT items_local[ITEMS_PER_THREAD]; if (!KEYS_ONLY) { if (IS_LAST_TILE) { BlockLoadItems(storage.load_items) .Load(items_in + tile_base, items_local, num_remaining, *(items_in + tile_base)); } else { BlockLoadItems(storage.load_items).Load(items_in + tile_base, items_local); } CTA_SYNC(); } KeyT keys_local[ITEMS_PER_THREAD]; if (IS_LAST_TILE) { BlockLoadKeys(storage.load_keys) .Load(keys_in + tile_base, keys_local, num_remaining, *(keys_in + tile_base)); } else { BlockLoadKeys(storage.load_keys) .Load(keys_in + tile_base, keys_local); } CTA_SYNC(); if (IS_LAST_TILE) { BlockMergeSortT(storage.block_merge) .Sort(keys_local, items_local, compare_op, num_remaining, keys_local[0]); } else { BlockMergeSortT(storage.block_merge).Sort(keys_local, items_local, compare_op); } CTA_SYNC(); if (ping) { if (IS_LAST_TILE) { BlockStoreKeysIt(storage.store_keys_it) .Store(keys_out_it + tile_base, keys_local, num_remaining); } else { BlockStoreKeysIt(storage.store_keys_it) .Store(keys_out_it + tile_base, keys_local); } if (!KEYS_ONLY) { CTA_SYNC(); if (IS_LAST_TILE) { BlockStoreItemsIt(storage.store_items_it) .Store(items_out_it + tile_base, items_local, num_remaining); } else { BlockStoreItemsIt(storage.store_items_it) .Store(items_out_it + tile_base, items_local); } } } else { if (IS_LAST_TILE) { BlockStoreKeysRaw(storage.store_keys_raw) .Store(keys_out_raw + tile_base, keys_local, num_remaining); } else { BlockStoreKeysRaw(storage.store_keys_raw) .Store(keys_out_raw + tile_base, keys_local); } if (!KEYS_ONLY) { CTA_SYNC(); if (IS_LAST_TILE) { BlockStoreItemsRaw(storage.store_items_raw) .Store(items_out_raw + tile_base, items_local, num_remaining); } else { BlockStoreItemsRaw(storage.store_items_raw) .Store(items_out_raw + tile_base, items_local); } } } } }; /** * \brief This agent is responsible for partitioning a merge path into equal segments * * There are two sorted arrays to be merged into one array. If the first array * is partitioned between parallel workers by slicing it into ranges of equal * size, there could be a significant workload imbalance. The imbalance is * caused by the fact that the distribution of elements from the second array * is unknown beforehand. Instead, the MergePath is partitioned between workers. * This approach guarantees an equal amount of work being assigned to each worker. * * This approach is outlined in the paper: * Odeh et al, "Merge Path - Parallel Merging Made Simple" * doi:10.1109/IPDPSW.2012.202 */ template < typename KeyIteratorT, typename OffsetT, typename CompareOpT, typename KeyT> struct AgentPartition { bool ping; KeyIteratorT keys_ping; KeyT *keys_pong; OffsetT keys_count; OffsetT partition_idx; OffsetT *merge_partitions; CompareOpT compare_op; OffsetT target_merged_tiles_number; int items_per_tile; __device__ __forceinline__ AgentPartition(bool ping, KeyIteratorT keys_ping, KeyT *keys_pong, OffsetT keys_count, OffsetT partition_idx, OffsetT *merge_partitions, CompareOpT compare_op, OffsetT target_merged_tiles_number, int items_per_tile) : ping(ping) , keys_ping(keys_ping) , keys_pong(keys_pong) , keys_count(keys_count) , partition_idx(partition_idx) , merge_partitions(merge_partitions) , compare_op(compare_op) , target_merged_tiles_number(target_merged_tiles_number) , items_per_tile(items_per_tile) {} __device__ __forceinline__ void Process() { OffsetT merged_tiles_number = target_merged_tiles_number / 2; // target_merged_tiles_number is a power of two. OffsetT mask = target_merged_tiles_number - 1; // The first tile number in the tiles group being merged, equal to: // target_merged_tiles_number * (partition_idx / target_merged_tiles_number) OffsetT list = ~mask & partition_idx; OffsetT start = items_per_tile * list; OffsetT size = items_per_tile * merged_tiles_number; // Tile number within the tile group being merged, equal to: // partition_idx / target_merged_tiles_number OffsetT local_tile_idx = mask & partition_idx; OffsetT keys1_beg = (cub::min)(keys_count, start); OffsetT keys1_end = (cub::min)(keys_count, start + size); OffsetT keys2_beg = keys1_end; OffsetT keys2_end = (cub::min)(keys_count, keys2_beg + size); OffsetT partition_at = (cub::min)(keys2_end - keys1_beg, items_per_tile * local_tile_idx); OffsetT partition_diag = ping ? MergePath<KeyT>(keys_ping + keys1_beg, keys_ping + keys2_beg, keys1_end - keys1_beg, keys2_end - keys2_beg, partition_at, compare_op) : MergePath<KeyT>(keys_pong + keys1_beg, keys_pong + keys2_beg, keys1_end - keys1_beg, keys2_end - keys2_beg, partition_at, compare_op); merge_partitions[partition_idx] = keys1_beg + partition_diag; } }; /// \brief The agent is responsible for merging N consecutive sorted arrays into N/2 sorted arrays. template < typename Policy, typename KeyIteratorT, typename ValueIteratorT, typename OffsetT, typename CompareOpT, typename KeyT, typename ValueT> struct AgentMerge { //--------------------------------------------------------------------- // Types and constants //--------------------------------------------------------------------- using KeysLoadPingIt = typename THRUST_NS_QUALIFIER::cuda_cub::core::LoadIterator<Policy, KeyIteratorT>::type; using ItemsLoadPingIt = typename THRUST_NS_QUALIFIER::cuda_cub::core::LoadIterator<Policy, ValueIteratorT>::type; using KeysLoadPongIt = typename THRUST_NS_QUALIFIER::cuda_cub::core::LoadIterator<Policy, KeyT *>::type; using ItemsLoadPongIt = typename THRUST_NS_QUALIFIER::cuda_cub::core::LoadIterator<Policy, ValueT *>::type; using KeysOutputPongIt = KeyIteratorT; using ItemsOutputPongIt = ValueIteratorT; using KeysOutputPingIt = KeyT*; using ItemsOutputPingIt = ValueT*; using BlockStoreKeysPong = typename BlockStoreType<Policy, KeysOutputPongIt>::type; using BlockStoreItemsPong = typename BlockStoreType<Policy, ItemsOutputPongIt>::type; using BlockStoreKeysPing = typename BlockStoreType<Policy, KeysOutputPingIt>::type; using BlockStoreItemsPing = typename BlockStoreType<Policy, ItemsOutputPingIt>::type; /// Parameterized BlockReduce primitive union _TempStorage { typename BlockStoreKeysPing::TempStorage store_keys_ping; typename BlockStoreItemsPing::TempStorage store_items_ping; typename BlockStoreKeysPong::TempStorage store_keys_pong; typename BlockStoreItemsPong::TempStorage store_items_pong; KeyT keys_shared[Policy::ITEMS_PER_TILE + 1]; ValueT items_shared[Policy::ITEMS_PER_TILE + 1]; }; /// Alias wrapper allowing storage to be unioned struct TempStorage : Uninitialized<_TempStorage> {}; static constexpr bool KEYS_ONLY = Equals<ValueT, NullType>::VALUE; static constexpr int BLOCK_THREADS = Policy::BLOCK_THREADS; static constexpr int ITEMS_PER_THREAD = Policy::ITEMS_PER_THREAD; static constexpr int ITEMS_PER_TILE = Policy::ITEMS_PER_TILE; static constexpr int SHARED_MEMORY_SIZE = static_cast<int>(sizeof(TempStorage)); //--------------------------------------------------------------------- // Per thread data //--------------------------------------------------------------------- bool ping; _TempStorage& storage; KeysLoadPingIt keys_in_ping; ItemsLoadPingIt items_in_ping; KeysLoadPongIt keys_in_pong; ItemsLoadPongIt items_in_pong; OffsetT keys_count; KeysOutputPongIt keys_out_pong; ItemsOutputPongIt items_out_pong; KeysOutputPingIt keys_out_ping; ItemsOutputPingIt items_out_ping; CompareOpT compare_op; OffsetT *merge_partitions; OffsetT target_merged_tiles_number; //--------------------------------------------------------------------- // Utility functions //--------------------------------------------------------------------- /** * \brief Concatenates up to ITEMS_PER_THREAD elements from input{1,2} into output array * * Reads data in a coalesced fashion [BLOCK_THREADS * item + tid] and * stores the result in output[item]. */ template <bool IS_FULL_TILE, class T, class It1, class It2> __device__ __forceinline__ void gmem_to_reg(T (&output)[ITEMS_PER_THREAD], It1 input1, It2 input2, int count1, int count2) { if (IS_FULL_TILE) { #pragma unroll for (int item = 0; item < ITEMS_PER_THREAD; ++item) { int idx = BLOCK_THREADS * item + threadIdx.x; output[item] = (idx < count1) ? input1[idx] : input2[idx - count1]; } } else { #pragma unroll for (int item = 0; item < ITEMS_PER_THREAD; ++item) { int idx = BLOCK_THREADS * item + threadIdx.x; if (idx < count1 + count2) { output[item] = (idx < count1) ? input1[idx] : input2[idx - count1]; } } } } /// \brief Stores data in a coalesced fashion in[item] -> out[BLOCK_THREADS * item + tid] template <class T, class It> __device__ __forceinline__ void reg_to_shared(It output, T (&input)[ITEMS_PER_THREAD]) { #pragma unroll for (int item = 0; item < ITEMS_PER_THREAD; ++item) { int idx = BLOCK_THREADS * item + threadIdx.x; output[idx] = input[item]; } } template <bool IS_FULL_TILE> __device__ __forceinline__ void consume_tile(int tid, OffsetT tile_idx, OffsetT tile_base, int count) { OffsetT partition_beg = merge_partitions[tile_idx + 0]; OffsetT partition_end = merge_partitions[tile_idx + 1]; // target_merged_tiles_number is a power of two. OffsetT merged_tiles_number = target_merged_tiles_number / 2; OffsetT mask = target_merged_tiles_number - 1; // The first tile number in the tiles group being merged, equal to: // target_merged_tiles_number * (tile_idx / target_merged_tiles_number) OffsetT list = ~mask & tile_idx; OffsetT start = ITEMS_PER_TILE * list; OffsetT size = ITEMS_PER_TILE * merged_tiles_number; OffsetT diag = ITEMS_PER_TILE * tile_idx - start; OffsetT keys1_beg = partition_beg; OffsetT keys1_end = partition_end; OffsetT keys2_beg = (cub::min)(keys_count, 2 * start + size + diag - partition_beg); OffsetT keys2_end = (cub::min)(keys_count, 2 * start + size + diag + ITEMS_PER_TILE - partition_end); // Check if it's the last tile in the tile group being merged if (mask == (mask & tile_idx)) { keys1_end = (cub::min)(keys_count, start + size); keys2_end = (cub::min)(keys_count, start + size * 2); } // number of keys per tile // int num_keys1 = static_cast<int>(keys1_end - keys1_beg); int num_keys2 = static_cast<int>(keys2_end - keys2_beg); // load keys1 & keys2 KeyT keys_local[ITEMS_PER_THREAD]; if (ping) { gmem_to_reg<IS_FULL_TILE>(keys_local, keys_in_ping + keys1_beg, keys_in_ping + keys2_beg, num_keys1, num_keys2); } else { gmem_to_reg<IS_FULL_TILE>(keys_local, keys_in_pong + keys1_beg, keys_in_pong + keys2_beg, num_keys1, num_keys2); } reg_to_shared(&storage.keys_shared[0], keys_local); // preload items into registers already // ValueT items_local[ITEMS_PER_THREAD]; if (!KEYS_ONLY) { if (ping) { gmem_to_reg<IS_FULL_TILE>(items_local, items_in_ping + keys1_beg, items_in_ping + keys2_beg, num_keys1, num_keys2); } else { gmem_to_reg<IS_FULL_TILE>(items_local, items_in_pong + keys1_beg, items_in_pong + keys2_beg, num_keys1, num_keys2); } } CTA_SYNC(); // use binary search in shared memory // to find merge path for each of thread // we can use int type here, because the number of // items in shared memory is limited // int diag0_local = (cub::min)(num_keys1 + num_keys2, ITEMS_PER_THREAD * tid); int keys1_beg_local = MergePath<KeyT>(&storage.keys_shared[0], &storage.keys_shared[num_keys1], num_keys1, num_keys2, diag0_local, compare_op); int keys1_end_local = num_keys1; int keys2_beg_local = diag0_local - keys1_beg_local; int keys2_end_local = num_keys2; int num_keys1_local = keys1_end_local - keys1_beg_local; int num_keys2_local = keys2_end_local - keys2_beg_local; // perform serial merge // int indices[ITEMS_PER_THREAD]; SerialMerge(&storage.keys_shared[0], keys1_beg_local, keys2_beg_local + num_keys1, num_keys1_local, num_keys2_local, keys_local, indices, compare_op); CTA_SYNC(); // write keys // if (ping) { if (IS_FULL_TILE) { BlockStoreKeysPing(storage.store_keys_ping) .Store(keys_out_ping + tile_base, keys_local); } else { BlockStoreKeysPing(storage.store_keys_ping) .Store(keys_out_ping + tile_base, keys_local, num_keys1 + num_keys2); } } else { if (IS_FULL_TILE) { BlockStoreKeysPong(storage.store_keys_pong) .Store(keys_out_pong + tile_base, keys_local); } else { BlockStoreKeysPong(storage.store_keys_pong) .Store(keys_out_pong + tile_base, keys_local, num_keys1 + num_keys2); } } // if items are provided, merge them if (!KEYS_ONLY) { CTA_SYNC(); reg_to_shared(&storage.items_shared[0], items_local); CTA_SYNC(); // gather items from shared mem // #pragma unroll for (int item = 0; item < ITEMS_PER_THREAD; ++item) { items_local[item] = storage.items_shared[indices[item]]; } CTA_SYNC(); // write from reg to gmem // if (ping) { if (IS_FULL_TILE) { BlockStoreItemsPing(storage.store_items_ping) .Store(items_out_ping + tile_base, items_local); } else { BlockStoreItemsPing(storage.store_items_ping) .Store(items_out_ping + tile_base, items_local, count); } } else { if (IS_FULL_TILE) { BlockStoreItemsPong(storage.store_items_pong) .Store(items_out_pong + tile_base, items_local); } else { BlockStoreItemsPong(storage.store_items_pong) .Store(items_out_pong + tile_base, items_local, count); } } } } __device__ __forceinline__ AgentMerge(bool ping_, TempStorage &storage_, KeysLoadPingIt keys_in_ping_, ItemsLoadPingIt items_in_ping_, KeysLoadPongIt keys_in_pong_, ItemsLoadPongIt items_in_pong_, OffsetT keys_count_, KeysOutputPingIt keys_out_ping_, ItemsOutputPingIt items_out_ping_, KeysOutputPongIt keys_out_pong_, ItemsOutputPongIt items_out_pong_, CompareOpT compare_op_, OffsetT *merge_partitions_, OffsetT target_merged_tiles_number_) : ping(ping_) , storage(storage_.Alias()) , keys_in_ping(keys_in_ping_) , items_in_ping(items_in_ping_) , keys_in_pong(keys_in_pong_) , items_in_pong(items_in_pong_) , keys_count(keys_count_) , keys_out_pong(keys_out_pong_) , items_out_pong(items_out_pong_) , keys_out_ping(keys_out_ping_) , items_out_ping(items_out_ping_) , compare_op(compare_op_) , merge_partitions(merge_partitions_) , target_merged_tiles_number(target_merged_tiles_number_) {} __device__ __forceinline__ void Process() { int tile_idx = static_cast<int>(blockIdx.x); int num_tiles = static_cast<int>(gridDim.x); OffsetT tile_base = OffsetT(tile_idx) * ITEMS_PER_TILE; int tid = static_cast<int>(threadIdx.x); int items_in_tile = static_cast<int>( (cub::min)(static_cast<OffsetT>(ITEMS_PER_TILE), keys_count - tile_base)); if (tile_idx < num_tiles - 1) { consume_tile<true>(tid, tile_idx, tile_base, ITEMS_PER_TILE); } else { consume_tile<false>(tid, tile_idx, tile_base, items_in_tile); } } }; CUB_NAMESPACE_END
the_stack
#include <cstdint> template<typename ExecutionPolicy, typename Iterator1, typename Iterator2, typename Iterator3, typename Iterator4, typename Iterator5> __global__ void reduce_by_key_kernel(ExecutionPolicy exec, Iterator1 keys_first, Iterator1 keys_last, Iterator2 values_first, Iterator3 keys_result, Iterator4 values_result, Iterator5 result) { *result = thrust::reduce_by_key(exec, keys_first, keys_last, values_first, keys_result, values_result); } template<typename ExecutionPolicy, typename Iterator1, typename Iterator2, typename Iterator3, typename Iterator4, typename BinaryPredicate, typename Iterator5> __global__ void reduce_by_key_kernel(ExecutionPolicy exec, Iterator1 keys_first, Iterator1 keys_last, Iterator2 values_first, Iterator3 keys_result, Iterator4 values_result, BinaryPredicate pred, Iterator5 result) { *result = thrust::reduce_by_key(exec, keys_first, keys_last, values_first, keys_result, values_result, pred); } template<typename ExecutionPolicy, typename Iterator1, typename Iterator2, typename Iterator3, typename Iterator4, typename BinaryPredicate, typename BinaryFunction, typename Iterator5> __global__ void reduce_by_key_kernel(ExecutionPolicy exec, Iterator1 keys_first, Iterator1 keys_last, Iterator2 values_first, Iterator3 keys_result, Iterator4 values_result, BinaryPredicate pred, BinaryFunction binary_op, Iterator5 result) { *result = thrust::reduce_by_key(exec, keys_first, keys_last, values_first, keys_result, values_result, pred, binary_op); } template<typename T> struct is_equal_div_10_reduce { __host__ __device__ bool operator()(const T x, const T& y) const { return ((int) x / 10) == ((int) y / 10); } }; template<typename Vector> void initialize_keys(Vector& keys) { keys.resize(9); keys[0] = 11; keys[1] = 11; keys[2] = 21; keys[3] = 20; keys[4] = 21; keys[5] = 21; keys[6] = 21; keys[7] = 37; keys[8] = 37; } template<typename Vector> void initialize_values(Vector& values) { values.resize(9); values[0] = 0; values[1] = 1; values[2] = 2; values[3] = 3; values[4] = 4; values[5] = 5; values[6] = 6; values[7] = 7; values[8] = 8; } template<typename ExecutionPolicy> void TestReduceByKeyDevice(ExecutionPolicy exec) { typedef int T; thrust::device_vector<T> keys; thrust::device_vector<T> values; typedef typename thrust::pair< typename thrust::device_vector<T>::iterator, typename thrust::device_vector<T>::iterator > iterator_pair; thrust::device_vector<iterator_pair> new_last_vec(1); iterator_pair new_last; // basic test initialize_keys(keys); initialize_values(values); thrust::device_vector<T> output_keys(keys.size()); thrust::device_vector<T> output_values(values.size()); reduce_by_key_kernel<<<1,1>>>(exec, keys.begin(), keys.end(), values.begin(), output_keys.begin(), output_values.begin(), new_last_vec.begin()); { cudaError_t const err = cudaDeviceSynchronize(); ASSERT_EQUAL(cudaSuccess, err); } new_last = new_last_vec[0]; ASSERT_EQUAL(new_last.first - output_keys.begin(), 5); ASSERT_EQUAL(new_last.second - output_values.begin(), 5); ASSERT_EQUAL(output_keys[0], 11); ASSERT_EQUAL(output_keys[1], 21); ASSERT_EQUAL(output_keys[2], 20); ASSERT_EQUAL(output_keys[3], 21); ASSERT_EQUAL(output_keys[4], 37); ASSERT_EQUAL(output_values[0], 1); ASSERT_EQUAL(output_values[1], 2); ASSERT_EQUAL(output_values[2], 3); ASSERT_EQUAL(output_values[3], 15); ASSERT_EQUAL(output_values[4], 15); // test BinaryPredicate initialize_keys(keys); initialize_values(values); reduce_by_key_kernel<<<1,1>>>(exec, keys.begin(), keys.end(), values.begin(), output_keys.begin(), output_values.begin(), is_equal_div_10_reduce<T>(), new_last_vec.begin()); { cudaError_t const err = cudaDeviceSynchronize(); ASSERT_EQUAL(cudaSuccess, err); } new_last = new_last_vec[0]; ASSERT_EQUAL(new_last.first - output_keys.begin(), 3); ASSERT_EQUAL(new_last.second - output_values.begin(), 3); ASSERT_EQUAL(output_keys[0], 11); ASSERT_EQUAL(output_keys[1], 21); ASSERT_EQUAL(output_keys[2], 37); ASSERT_EQUAL(output_values[0], 1); ASSERT_EQUAL(output_values[1], 20); ASSERT_EQUAL(output_values[2], 15); // test BinaryFunction initialize_keys(keys); initialize_values(values); reduce_by_key_kernel<<<1,1>>>(exec, keys.begin(), keys.end(), values.begin(), output_keys.begin(), output_values.begin(), thrust::equal_to<T>(), thrust::plus<T>(), new_last_vec.begin()); { cudaError_t const err = cudaDeviceSynchronize(); ASSERT_EQUAL(cudaSuccess, err); } new_last = new_last_vec[0]; ASSERT_EQUAL(new_last.first - output_keys.begin(), 5); ASSERT_EQUAL(new_last.second - output_values.begin(), 5); ASSERT_EQUAL(output_keys[0], 11); ASSERT_EQUAL(output_keys[1], 21); ASSERT_EQUAL(output_keys[2], 20); ASSERT_EQUAL(output_keys[3], 21); ASSERT_EQUAL(output_keys[4], 37); ASSERT_EQUAL(output_values[0], 1); ASSERT_EQUAL(output_values[1], 2); ASSERT_EQUAL(output_values[2], 3); ASSERT_EQUAL(output_values[3], 15); ASSERT_EQUAL(output_values[4], 15); } void TestReduceByKeyDeviceSeq() { TestReduceByKeyDevice(thrust::seq); } DECLARE_UNITTEST(TestReduceByKeyDeviceSeq); void TestReduceByKeyDeviceDevice() { TestReduceByKeyDevice(thrust::device); } DECLARE_UNITTEST(TestReduceByKeyDeviceDevice); void TestReduceByKeyDeviceNoSync() { TestReduceByKeyDevice(thrust::cuda::par_nosync); } DECLARE_UNITTEST(TestReduceByKeyDeviceNoSync); template<typename ExecutionPolicy> void TestReduceByKeyCudaStreams(ExecutionPolicy policy) { typedef thrust::device_vector<int> Vector; typedef Vector::value_type T; Vector keys; Vector values; thrust::pair<Vector::iterator, Vector::iterator> new_last; // basic test initialize_keys(keys); initialize_values(values); Vector output_keys(keys.size()); Vector output_values(values.size()); cudaStream_t s; cudaStreamCreate(&s); auto streampolicy = policy.on(s); new_last = thrust::reduce_by_key(streampolicy, keys.begin(), keys.end(), values.begin(), output_keys.begin(), output_values.begin()); ASSERT_EQUAL(new_last.first - output_keys.begin(), 5); ASSERT_EQUAL(new_last.second - output_values.begin(), 5); ASSERT_EQUAL(output_keys[0], 11); ASSERT_EQUAL(output_keys[1], 21); ASSERT_EQUAL(output_keys[2], 20); ASSERT_EQUAL(output_keys[3], 21); ASSERT_EQUAL(output_keys[4], 37); ASSERT_EQUAL(output_values[0], 1); ASSERT_EQUAL(output_values[1], 2); ASSERT_EQUAL(output_values[2], 3); ASSERT_EQUAL(output_values[3], 15); ASSERT_EQUAL(output_values[4], 15); // test BinaryPredicate initialize_keys(keys); initialize_values(values); new_last = thrust::reduce_by_key(streampolicy, keys.begin(), keys.end(), values.begin(), output_keys.begin(), output_values.begin(), is_equal_div_10_reduce<T>()); ASSERT_EQUAL(new_last.first - output_keys.begin(), 3); ASSERT_EQUAL(new_last.second - output_values.begin(), 3); ASSERT_EQUAL(output_keys[0], 11); ASSERT_EQUAL(output_keys[1], 21); ASSERT_EQUAL(output_keys[2], 37); ASSERT_EQUAL(output_values[0], 1); ASSERT_EQUAL(output_values[1], 20); ASSERT_EQUAL(output_values[2], 15); // test BinaryFunction initialize_keys(keys); initialize_values(values); new_last = thrust::reduce_by_key(streampolicy, keys.begin(), keys.end(), values.begin(), output_keys.begin(), output_values.begin(), thrust::equal_to<T>(), thrust::plus<T>()); ASSERT_EQUAL(new_last.first - output_keys.begin(), 5); ASSERT_EQUAL(new_last.second - output_values.begin(), 5); ASSERT_EQUAL(output_keys[0], 11); ASSERT_EQUAL(output_keys[1], 21); ASSERT_EQUAL(output_keys[2], 20); ASSERT_EQUAL(output_keys[3], 21); ASSERT_EQUAL(output_keys[4], 37); ASSERT_EQUAL(output_values[0], 1); ASSERT_EQUAL(output_values[1], 2); ASSERT_EQUAL(output_values[2], 3); ASSERT_EQUAL(output_values[3], 15); ASSERT_EQUAL(output_values[4], 15); cudaStreamDestroy(s); } void TestReduceByKeyCudaStreamsSync() { TestReduceByKeyCudaStreams(thrust::cuda::par); } DECLARE_UNITTEST(TestReduceByKeyCudaStreamsSync); void TestReduceByKeyCudaStreamsNoSync() { TestReduceByKeyCudaStreams(thrust::cuda::par_nosync); } DECLARE_UNITTEST(TestReduceByKeyCudaStreamsNoSync); // Maps indices to key ids class div_op : public thrust::unary_function<std::int64_t, std::int64_t> { std::int64_t m_divisor; public: __host__ div_op(std::int64_t divisor) : m_divisor(divisor) {} __host__ __device__ std::int64_t operator()(std::int64_t x) const { return x / m_divisor; } }; // Produces unique sequence for key class mod_op : public thrust::unary_function<std::int64_t, std::int64_t> { std::int64_t m_divisor; public: __host__ mod_op(std::int64_t divisor) : m_divisor(divisor) {} __host__ __device__ std::int64_t operator()(std::int64_t x) const { // div: 2 // idx: 0 1 2 3 4 5 // key: 0 0 | 1 1 | 2 2 // mod: 0 1 | 0 1 | 0 1 // ret: 0 1 1 2 2 3 return (x % m_divisor) + (x / m_divisor); } }; void TestReduceByKeyWithBigIndexesHelper(int magnitude) { const std::int64_t key_size_magnitude = 8; ASSERT_EQUAL(true, key_size_magnitude < magnitude); const std::int64_t num_items = 1ll << magnitude; const std::int64_t num_unique_keys = 1ll << key_size_magnitude; // Size of each key group const std::int64_t key_size = num_items / num_unique_keys; using counting_it = thrust::counting_iterator<std::int64_t>; using transform_key_it = thrust::transform_iterator<div_op, counting_it>; using transform_val_it = thrust::transform_iterator<mod_op, counting_it>; counting_it count_begin(0ll); counting_it count_end = count_begin + num_items; ASSERT_EQUAL(static_cast<std::int64_t>(thrust::distance(count_begin, count_end)), num_items); transform_key_it keys_begin(count_begin, div_op{key_size}); transform_key_it keys_end(count_end, div_op{key_size}); transform_val_it values_begin(count_begin, mod_op{key_size}); thrust::device_vector<std::int64_t> output_keys(num_unique_keys); thrust::device_vector<std::int64_t> output_values(num_unique_keys); // example: // items: 6 // unique_keys: 2 // key_size: 3 // keys: 0 0 0 | 1 1 1 // values: 0 1 2 | 1 2 3 // result: 3 6 = sum(range(key_size)) + key_size * key_id thrust::reduce_by_key(keys_begin, keys_end, values_begin, output_keys.begin(), output_values.begin()); ASSERT_EQUAL( true, thrust::equal(output_keys.begin(), output_keys.end(), count_begin)); thrust::host_vector<std::int64_t> result = output_values; const std::int64_t sum = (key_size - 1) * key_size / 2; for (std::int64_t key_id = 0; key_id < num_unique_keys; key_id++) { ASSERT_EQUAL(result[key_id], sum + key_id * key_size); } } void TestReduceByKeyWithBigIndexes() { TestReduceByKeyWithBigIndexesHelper(30); TestReduceByKeyWithBigIndexesHelper(31); TestReduceByKeyWithBigIndexesHelper(32); TestReduceByKeyWithBigIndexesHelper(33); } DECLARE_UNITTEST(TestReduceByKeyWithBigIndexes);
the_stack
#include <ATen/ATen.h> #include <ATen/cuda/CUDAContext.h> #include <THC/THC.h> #include <THC/THCAtomics.cuh> #include <THC/THCDeviceUtils.cuh> #define CUDA_KERNEL_LOOP(i, n) \ for (int i = blockIdx.x * blockDim.x + threadIdx.x; \ i < (n); \ i += blockDim.x * gridDim.x) const int CUDA_NUM_THREADS = 1024; inline int GET_BLOCKS(const int N) { return (N + CUDA_NUM_THREADS - 1) / CUDA_NUM_THREADS; } __device__ float dmcn_im2col_bilinear(const float *bottom_data, const int data_width, const int height, const int width, float h, float w) { int h_low = floor(h); int w_low = floor(w); int h_high = h_low + 1; int w_high = w_low + 1; float lh = h - h_low; float lw = w - w_low; float hh = 1 - lh, hw = 1 - lw; float v1 = 0; if (h_low >= 0 && w_low >= 0) v1 = bottom_data[h_low * data_width + w_low]; float v2 = 0; if (h_low >= 0 && w_high <= width - 1) v2 = bottom_data[h_low * data_width + w_high]; float v3 = 0; if (h_high <= height - 1 && w_low >= 0) v3 = bottom_data[h_high * data_width + w_low]; float v4 = 0; if (h_high <= height - 1 && w_high <= width - 1) v4 = bottom_data[h_high * data_width + w_high]; float w1 = hh * hw, w2 = hh * lw, w3 = lh * hw, w4 = lh * lw; float val = (w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4); return val; } __device__ float dmcn_get_gradient_weight(float argmax_h, float argmax_w, const int h, const int w, const int height, const int width) { if (argmax_h <= -1 || argmax_h >= height || argmax_w <= -1 || argmax_w >= width) { //empty return 0; } int argmax_h_low = floor(argmax_h); int argmax_w_low = floor(argmax_w); int argmax_h_high = argmax_h_low + 1; int argmax_w_high = argmax_w_low + 1; float weight = 0; if (h == argmax_h_low && w == argmax_w_low) weight = (h + 1 - argmax_h) * (w + 1 - argmax_w); if (h == argmax_h_low && w == argmax_w_high) weight = (h + 1 - argmax_h) * (argmax_w + 1 - w); if (h == argmax_h_high && w == argmax_w_low) weight = (argmax_h + 1 - h) * (w + 1 - argmax_w); if (h == argmax_h_high && w == argmax_w_high) weight = (argmax_h + 1 - h) * (argmax_w + 1 - w); return weight; } __device__ float dmcn_get_coordinate_weight(float argmax_h, float argmax_w, const int height, const int width, const float *im_data, const int data_width, const int bp_dir) { if (argmax_h <= -1 || argmax_h >= height || argmax_w <= -1 || argmax_w >= width) { //empty return 0; } int argmax_h_low = floor(argmax_h); int argmax_w_low = floor(argmax_w); int argmax_h_high = argmax_h_low + 1; int argmax_w_high = argmax_w_low + 1; float weight = 0; if (bp_dir == 0) { if (argmax_h_low >= 0 && argmax_w_low >= 0) weight += -1 * (argmax_w_low + 1 - argmax_w) * im_data[argmax_h_low * data_width + argmax_w_low]; if (argmax_h_low >= 0 && argmax_w_high <= width - 1) weight += -1 * (argmax_w - argmax_w_low) * im_data[argmax_h_low * data_width + argmax_w_high]; if (argmax_h_high <= height - 1 && argmax_w_low >= 0) weight += (argmax_w_low + 1 - argmax_w) * im_data[argmax_h_high * data_width + argmax_w_low]; if (argmax_h_high <= height - 1 && argmax_w_high <= width - 1) weight += (argmax_w - argmax_w_low) * im_data[argmax_h_high * data_width + argmax_w_high]; } else if (bp_dir == 1) { if (argmax_h_low >= 0 && argmax_w_low >= 0) weight += -1 * (argmax_h_low + 1 - argmax_h) * im_data[argmax_h_low * data_width + argmax_w_low]; if (argmax_h_low >= 0 && argmax_w_high <= width - 1) weight += (argmax_h_low + 1 - argmax_h) * im_data[argmax_h_low * data_width + argmax_w_high]; if (argmax_h_high <= height - 1 && argmax_w_low >= 0) weight += -1 * (argmax_h - argmax_h_low) * im_data[argmax_h_high * data_width + argmax_w_low]; if (argmax_h_high <= height - 1 && argmax_w_high <= width - 1) weight += (argmax_h - argmax_h_low) * im_data[argmax_h_high * data_width + argmax_w_high]; } return weight; } __global__ void modulated_deformable_im2col_gpu_kernel(const int n, const float *data_im, const float *data_offset, const float *data_mask, const int height, const int width, const int kernel_h, const int kernel_w, const int pad_h, const int pad_w, const int stride_h, const int stride_w, const int dilation_h, const int dilation_w, const int channel_per_deformable_group, const int batch_size, const int num_channels, const int deformable_group, const int height_col, const int width_col, float *data_col) { // launch channels * batch_size * height_col * width_col cores CUDA_KERNEL_LOOP(index, n) { // NOTE(CharlesShang): different from Dai Jifeng's MXNet implementation, col_buffer is of shape (c*kw*kh, N, oh, ow) // here columns is of shape (N, c*kw*kh, oh * ow), need to adapt axis // index index of output matrix const int w_col = index % width_col; const int h_col = (index / width_col) % height_col; // const int b_col = (index / width_col / height_col) % batch_size; const int b_col = (index / width_col / height_col / num_channels) % batch_size; // const int c_im = (index / width_col / height_col) / batch_size; const int c_im = (index / width_col / height_col) % num_channels; // const int c_col = c_im * kernel_h * kernel_w; const int c_col = c_im * kernel_h * kernel_w; // compute deformable group index const int deformable_group_index = c_im / channel_per_deformable_group; const int h_in = h_col * stride_h - pad_h; const int w_in = w_col * stride_w - pad_w; // float *data_col_ptr = data_col + ((c_col * batch_size + b_col) * height_col + h_col) * width_col + w_col; float *data_col_ptr = data_col + ((b_col * num_channels * kernel_w * kernel_h + c_col) * height_col + h_col) * width_col + w_col; //const float* data_im_ptr = data_im + ((b_col * num_channels + c_im) * height + h_in) * width + w_in; const float *data_im_ptr = data_im + (b_col * num_channels + c_im) * height * width; const float *data_offset_ptr = data_offset + (b_col * deformable_group + deformable_group_index) * 2 * kernel_h * kernel_w * height_col * width_col; const float *data_mask_ptr = data_mask + (b_col * deformable_group + deformable_group_index) * kernel_h * kernel_w * height_col * width_col; for (int i = 0; i < kernel_h; ++i) { for (int j = 0; j < kernel_w; ++j) { const int data_offset_h_ptr = ((2 * (i * kernel_w + j)) * height_col + h_col) * width_col + w_col; const int data_offset_w_ptr = ((2 * (i * kernel_w + j) + 1) * height_col + h_col) * width_col + w_col; const int data_mask_hw_ptr = ((i * kernel_w + j) * height_col + h_col) * width_col + w_col; const float offset_h = data_offset_ptr[data_offset_h_ptr]; const float offset_w = data_offset_ptr[data_offset_w_ptr]; const float mask = data_mask_ptr[data_mask_hw_ptr]; float val = static_cast<float>(0); const float h_im = h_in + i * dilation_h + offset_h; const float w_im = w_in + j * dilation_w + offset_w; //if (h_im >= 0 && w_im >= 0 && h_im < height && w_im < width) { if (h_im > -1 && w_im > -1 && h_im < height && w_im < width) { //const float map_h = i * dilation_h + offset_h; //const float map_w = j * dilation_w + offset_w; //const int cur_height = height - h_in; //const int cur_width = width - w_in; //val = dmcn_im2col_bilinear(data_im_ptr, width, cur_height, cur_width, map_h, map_w); val = dmcn_im2col_bilinear(data_im_ptr, width, height, width, h_im, w_im); } *data_col_ptr = val * mask; // data_col_ptr += batch_size * height_col * width_col; data_col_ptr += height_col * width_col; } } } } __global__ void modulated_deformable_col2im_gpu_kernel(const int n, const float *data_col, const float *data_offset, const float *data_mask, const int channels, const int height, const int width, const int kernel_h, const int kernel_w, const int pad_h, const int pad_w, const int stride_h, const int stride_w, const int dilation_h, const int dilation_w, const int channel_per_deformable_group, const int batch_size, const int deformable_group, const int height_col, const int width_col, float *grad_im) { CUDA_KERNEL_LOOP(index, n) { const int j = (index / width_col / height_col / batch_size) % kernel_w; const int i = (index / width_col / height_col / batch_size / kernel_w) % kernel_h; const int c = index / width_col / height_col / batch_size / kernel_w / kernel_h; // compute the start and end of the output const int deformable_group_index = c / channel_per_deformable_group; int w_out = index % width_col; int h_out = (index / width_col) % height_col; int b = (index / width_col / height_col) % batch_size; int w_in = w_out * stride_w - pad_w; int h_in = h_out * stride_h - pad_h; const float *data_offset_ptr = data_offset + (b * deformable_group + deformable_group_index) * 2 * kernel_h * kernel_w * height_col * width_col; const float *data_mask_ptr = data_mask + (b * deformable_group + deformable_group_index) * kernel_h * kernel_w * height_col * width_col; const int data_offset_h_ptr = ((2 * (i * kernel_w + j)) * height_col + h_out) * width_col + w_out; const int data_offset_w_ptr = ((2 * (i * kernel_w + j) + 1) * height_col + h_out) * width_col + w_out; const int data_mask_hw_ptr = ((i * kernel_w + j) * height_col + h_out) * width_col + w_out; const float offset_h = data_offset_ptr[data_offset_h_ptr]; const float offset_w = data_offset_ptr[data_offset_w_ptr]; const float mask = data_mask_ptr[data_mask_hw_ptr]; const float cur_inv_h_data = h_in + i * dilation_h + offset_h; const float cur_inv_w_data = w_in + j * dilation_w + offset_w; const float cur_top_grad = data_col[index] * mask; const int cur_h = (int)cur_inv_h_data; const int cur_w = (int)cur_inv_w_data; for (int dy = -2; dy <= 2; dy++) { for (int dx = -2; dx <= 2; dx++) { if (cur_h + dy >= 0 && cur_h + dy < height && cur_w + dx >= 0 && cur_w + dx < width && abs(cur_inv_h_data - (cur_h + dy)) < 1 && abs(cur_inv_w_data - (cur_w + dx)) < 1) { int cur_bottom_grad_pos = ((b * channels + c) * height + cur_h + dy) * width + cur_w + dx; float weight = dmcn_get_gradient_weight(cur_inv_h_data, cur_inv_w_data, cur_h + dy, cur_w + dx, height, width); atomicAdd(grad_im + cur_bottom_grad_pos, weight * cur_top_grad); } } } } } __global__ void modulated_deformable_col2im_coord_gpu_kernel(const int n, const float *data_col, const float *data_im, const float *data_offset, const float *data_mask, const int channels, const int height, const int width, const int kernel_h, const int kernel_w, const int pad_h, const int pad_w, const int stride_h, const int stride_w, const int dilation_h, const int dilation_w, const int channel_per_deformable_group, const int batch_size, const int offset_channels, const int deformable_group, const int height_col, const int width_col, float *grad_offset, float *grad_mask) { CUDA_KERNEL_LOOP(index, n) { float val = 0, mval = 0; int w = index % width_col; int h = (index / width_col) % height_col; int c = (index / width_col / height_col) % offset_channels; int b = (index / width_col / height_col) / offset_channels; // compute the start and end of the output const int deformable_group_index = c / (2 * kernel_h * kernel_w); const int col_step = kernel_h * kernel_w; int cnt = 0; const float *data_col_ptr = data_col + deformable_group_index * channel_per_deformable_group * batch_size * width_col * height_col; const float *data_im_ptr = data_im + (b * deformable_group + deformable_group_index) * channel_per_deformable_group / kernel_h / kernel_w * height * width; const float *data_offset_ptr = data_offset + (b * deformable_group + deformable_group_index) * 2 * kernel_h * kernel_w * height_col * width_col; const float *data_mask_ptr = data_mask + (b * deformable_group + deformable_group_index) * kernel_h * kernel_w * height_col * width_col; const int offset_c = c - deformable_group_index * 2 * kernel_h * kernel_w; for (int col_c = (offset_c / 2); col_c < channel_per_deformable_group; col_c += col_step) { const int col_pos = (((col_c * batch_size + b) * height_col) + h) * width_col + w; const int bp_dir = offset_c % 2; int j = (col_pos / width_col / height_col / batch_size) % kernel_w; int i = (col_pos / width_col / height_col / batch_size / kernel_w) % kernel_h; int w_out = col_pos % width_col; int h_out = (col_pos / width_col) % height_col; int w_in = w_out * stride_w - pad_w; int h_in = h_out * stride_h - pad_h; const int data_offset_h_ptr = (((2 * (i * kernel_w + j)) * height_col + h_out) * width_col + w_out); const int data_offset_w_ptr = (((2 * (i * kernel_w + j) + 1) * height_col + h_out) * width_col + w_out); const int data_mask_hw_ptr = (((i * kernel_w + j) * height_col + h_out) * width_col + w_out); const float offset_h = data_offset_ptr[data_offset_h_ptr]; const float offset_w = data_offset_ptr[data_offset_w_ptr]; const float mask = data_mask_ptr[data_mask_hw_ptr]; float inv_h = h_in + i * dilation_h + offset_h; float inv_w = w_in + j * dilation_w + offset_w; if (inv_h <= -1 || inv_w <= -1 || inv_h >= height || inv_w >= width) { inv_h = inv_w = -2; } else { mval += data_col_ptr[col_pos] * dmcn_im2col_bilinear(data_im_ptr + cnt * height * width, width, height, width, inv_h, inv_w); } const float weight = dmcn_get_coordinate_weight( inv_h, inv_w, height, width, data_im_ptr + cnt * height * width, width, bp_dir); val += weight * data_col_ptr[col_pos] * mask; cnt += 1; } // KERNEL_ASSIGN(grad_offset[index], offset_req, val); grad_offset[index] = val; if (offset_c % 2 == 0) // KERNEL_ASSIGN(grad_mask[(((b * deformable_group + deformable_group_index) * kernel_h * kernel_w + offset_c / 2) * height_col + h) * width_col + w], mask_req, mval); grad_mask[(((b * deformable_group + deformable_group_index) * kernel_h * kernel_w + offset_c / 2) * height_col + h) * width_col + w] = mval; } } void modulated_deformable_im2col_cuda(cudaStream_t stream, const float* data_im, const float* data_offset, const float* data_mask, const int batch_size, const int channels, const int height_im, const int width_im, const int height_col, const int width_col, const int kernel_h, const int kernel_w, const int pad_h, const int pad_w, const int stride_h, const int stride_w, const int dilation_h, const int dilation_w, const int deformable_group, float* data_col) { // num_axes should be smaller than block size const int channel_per_deformable_group = channels / deformable_group; const int num_kernels = channels * batch_size * height_col * width_col; modulated_deformable_im2col_gpu_kernel <<<GET_BLOCKS(num_kernels), CUDA_NUM_THREADS, 0, stream>>>( num_kernels, data_im, data_offset, data_mask, height_im, width_im, kernel_h, kernel_w, pad_h, pad_w, stride_h, stride_w, dilation_h, dilation_w, channel_per_deformable_group, batch_size, channels, deformable_group, height_col, width_col, data_col); cudaError_t err = cudaGetLastError(); if (err != cudaSuccess) { printf("error in modulated_deformable_im2col_cuda: %s\n", cudaGetErrorString(err)); } } void modulated_deformable_col2im_cuda(cudaStream_t stream, const float* data_col, const float* data_offset, const float* data_mask, const int batch_size, const int channels, const int height_im, const int width_im, const int height_col, const int width_col, const int kernel_h, const int kernel_w, const int pad_h, const int pad_w, const int stride_h, const int stride_w, const int dilation_h, const int dilation_w, const int deformable_group, float* grad_im){ const int channel_per_deformable_group = channels / deformable_group; const int num_kernels = channels * kernel_h * kernel_w * batch_size * height_col * width_col; modulated_deformable_col2im_gpu_kernel <<<GET_BLOCKS(num_kernels), CUDA_NUM_THREADS, 0, stream>>>( num_kernels, data_col, data_offset, data_mask, channels, height_im, width_im, kernel_h, kernel_w, pad_h, pad_h, stride_h, stride_w, dilation_h, dilation_w, channel_per_deformable_group, batch_size, deformable_group, height_col, width_col, grad_im); cudaError_t err = cudaGetLastError(); if (err != cudaSuccess) { printf("error in modulated_deformable_col2im_cuda: %s\n", cudaGetErrorString(err)); } } void modulated_deformable_col2im_coord_cuda(cudaStream_t stream, const float* data_col, const float* data_im, const float* data_offset, const float* data_mask, const int batch_size, const int channels, const int height_im, const int width_im, const int height_col, const int width_col, const int kernel_h, const int kernel_w, const int pad_h, const int pad_w, const int stride_h, const int stride_w, const int dilation_h, const int dilation_w, const int deformable_group, float* grad_offset, float* grad_mask) { const int num_kernels = batch_size * height_col * width_col * 2 * kernel_h * kernel_w * deformable_group; const int channel_per_deformable_group = channels * kernel_h * kernel_w / deformable_group; modulated_deformable_col2im_coord_gpu_kernel <<<GET_BLOCKS(num_kernels), CUDA_NUM_THREADS, 0, stream>>>( num_kernels, data_col, data_im, data_offset, data_mask, channels, height_im, width_im, kernel_h, kernel_w, pad_h, pad_w, stride_h, stride_w, dilation_h, dilation_w, channel_per_deformable_group, batch_size, 2 * kernel_h * kernel_w * deformable_group, deformable_group, height_col, width_col, grad_offset, grad_mask); cudaError_t err = cudaGetLastError(); if (err != cudaSuccess) { printf("error in modulated_deformable_col2im_coord_cuda: %s\n", cudaGetErrorString(err)); } }
the_stack
* \file * The cub::BlockDiscontinuity class provides [<em>collective</em>](index.html#sec0) methods for flagging discontinuities within an ordered set of items partitioned across a CUDA thread block. */ #pragma once #include "../util_type.cuh" #include "../util_ptx.cuh" #include "../util_namespace.cuh" /// Optional outer namespace(s) CUB_NS_PREFIX /// CUB namespace namespace cub { template < typename T, int BLOCK_DIM_X, int BLOCK_DIM_Y = 1, int BLOCK_DIM_Z = 1, int PTX_ARCH = CUB_PTX_ARCH> class BlockAdjacentDifference { private: /****************************************************************************** * Constants and type definitions ******************************************************************************/ /// Constants enum { /// The thread block size in threads BLOCK_THREADS = BLOCK_DIM_X * BLOCK_DIM_Y * BLOCK_DIM_Z, }; /// Shared memory storage layout type (last element from each thread's input) struct _TempStorage { T first_items[BLOCK_THREADS]; T last_items[BLOCK_THREADS]; }; /****************************************************************************** * Utility methods ******************************************************************************/ /// Internal storage allocator __device__ __forceinline__ _TempStorage& PrivateStorage() { __shared__ _TempStorage private_storage; return private_storage; } /// Specialization for when FlagOp has third index param template <typename FlagOp, bool HAS_PARAM = BinaryOpHasIdxParam<T, FlagOp>::HAS_PARAM> struct ApplyOp { // Apply flag operator static __device__ __forceinline__ T FlagT(FlagOp flag_op, const T &a, const T &b, int idx) { return flag_op(b, a, idx); } }; /// Specialization for when FlagOp does not have a third index param template <typename FlagOp> struct ApplyOp<FlagOp, false> { // Apply flag operator static __device__ __forceinline__ T FlagT(FlagOp flag_op, const T &a, const T &b, int /*idx*/) { return flag_op(b, a); } }; /// Templated unrolling of item comparison (inductive case) template <int ITERATION, int MAX_ITERATIONS> struct Iterate { // Head flags template < int ITEMS_PER_THREAD, typename FlagT, typename FlagOp> static __device__ __forceinline__ void FlagHeads( int linear_tid, FlagT (&flags)[ITEMS_PER_THREAD], ///< [out] Calling thread's discontinuity head_flags T (&input)[ITEMS_PER_THREAD], ///< [in] Calling thread's input items T (&preds)[ITEMS_PER_THREAD], ///< [out] Calling thread's predecessor items FlagOp flag_op) ///< [in] Binary boolean flag predicate { preds[ITERATION] = input[ITERATION - 1]; flags[ITERATION] = ApplyOp<FlagOp>::FlagT( flag_op, preds[ITERATION], input[ITERATION], (linear_tid * ITEMS_PER_THREAD) + ITERATION); Iterate<ITERATION + 1, MAX_ITERATIONS>::FlagHeads(linear_tid, flags, input, preds, flag_op); } // Tail flags template < int ITEMS_PER_THREAD, typename FlagT, typename FlagOp> static __device__ __forceinline__ void FlagTails( int linear_tid, FlagT (&flags)[ITEMS_PER_THREAD], ///< [out] Calling thread's discontinuity head_flags T (&input)[ITEMS_PER_THREAD], ///< [in] Calling thread's input items FlagOp flag_op) ///< [in] Binary boolean flag predicate { flags[ITERATION] = ApplyOp<FlagOp>::FlagT( flag_op, input[ITERATION], input[ITERATION + 1], (linear_tid * ITEMS_PER_THREAD) + ITERATION + 1); Iterate<ITERATION + 1, MAX_ITERATIONS>::FlagTails(linear_tid, flags, input, flag_op); } }; /// Templated unrolling of item comparison (termination case) template <int MAX_ITERATIONS> struct Iterate<MAX_ITERATIONS, MAX_ITERATIONS> { // Head flags template < int ITEMS_PER_THREAD, typename FlagT, typename FlagOp> static __device__ __forceinline__ void FlagHeads( int /*linear_tid*/, FlagT (&/*flags*/)[ITEMS_PER_THREAD], ///< [out] Calling thread's discontinuity head_flags T (&/*input*/)[ITEMS_PER_THREAD], ///< [in] Calling thread's input items T (&/*preds*/)[ITEMS_PER_THREAD], ///< [out] Calling thread's predecessor items FlagOp /*flag_op*/) ///< [in] Binary boolean flag predicate {} // Tail flags template < int ITEMS_PER_THREAD, typename FlagT, typename FlagOp> static __device__ __forceinline__ void FlagTails( int /*linear_tid*/, FlagT (&/*flags*/)[ITEMS_PER_THREAD], ///< [out] Calling thread's discontinuity head_flags T (&/*input*/)[ITEMS_PER_THREAD], ///< [in] Calling thread's input items FlagOp /*flag_op*/) ///< [in] Binary boolean flag predicate {} }; /****************************************************************************** * Thread fields ******************************************************************************/ /// Shared storage reference _TempStorage &temp_storage; /// Linear thread-id unsigned int linear_tid; public: /// \smemstorage{BlockDiscontinuity} struct TempStorage : Uninitialized<_TempStorage> {}; /******************************************************************//** * \name Collective constructors *********************************************************************/ //@{ /** * \brief Collective constructor using a private static allocation of shared memory as temporary storage. */ __device__ __forceinline__ BlockAdjacentDifference() : temp_storage(PrivateStorage()), linear_tid(RowMajorTid(BLOCK_DIM_X, BLOCK_DIM_Y, BLOCK_DIM_Z)) {} /** * \brief Collective constructor using the specified memory allocation as temporary storage. */ __device__ __forceinline__ BlockAdjacentDifference( TempStorage &temp_storage) ///< [in] Reference to memory allocation having layout type TempStorage : temp_storage(temp_storage.Alias()), linear_tid(RowMajorTid(BLOCK_DIM_X, BLOCK_DIM_Y, BLOCK_DIM_Z)) {} //@} end member group /******************************************************************//** * \name Head flag operations *********************************************************************/ //@{ #ifndef DOXYGEN_SHOULD_SKIP_THIS // Do not document template < int ITEMS_PER_THREAD, typename FlagT, typename FlagOp> __device__ __forceinline__ void FlagHeads( FlagT (&head_flags)[ITEMS_PER_THREAD], ///< [out] Calling thread's discontinuity head_flags T (&input)[ITEMS_PER_THREAD], ///< [in] Calling thread's input items T (&preds)[ITEMS_PER_THREAD], ///< [out] Calling thread's predecessor items FlagOp flag_op) ///< [in] Binary boolean flag predicate { // Share last item temp_storage.last_items[linear_tid] = input[ITEMS_PER_THREAD - 1]; CTA_SYNC(); if (linear_tid == 0) { // Set flag for first thread-item (preds[0] is undefined) head_flags[0] = 1; } else { preds[0] = temp_storage.last_items[linear_tid - 1]; head_flags[0] = ApplyOp<FlagOp>::FlagT(flag_op, preds[0], input[0], linear_tid * ITEMS_PER_THREAD); } // Set head_flags for remaining items Iterate<1, ITEMS_PER_THREAD>::FlagHeads(linear_tid, head_flags, input, preds, flag_op); } template < int ITEMS_PER_THREAD, typename FlagT, typename FlagOp> __device__ __forceinline__ void FlagHeads( FlagT (&head_flags)[ITEMS_PER_THREAD], ///< [out] Calling thread's discontinuity head_flags T (&input)[ITEMS_PER_THREAD], ///< [in] Calling thread's input items T (&preds)[ITEMS_PER_THREAD], ///< [out] Calling thread's predecessor items FlagOp flag_op, ///< [in] Binary boolean flag predicate T tile_predecessor_item) ///< [in] <b>[<em>thread</em><sub>0</sub> only]</b> Item with which to compare the first tile item (<tt>input<sub>0</sub></tt> from <em>thread</em><sub>0</sub>). { // Share last item temp_storage.last_items[linear_tid] = input[ITEMS_PER_THREAD - 1]; CTA_SYNC(); // Set flag for first thread-item preds[0] = (linear_tid == 0) ? tile_predecessor_item : // First thread temp_storage.last_items[linear_tid - 1]; head_flags[0] = ApplyOp<FlagOp>::FlagT(flag_op, preds[0], input[0], linear_tid * ITEMS_PER_THREAD); // Set head_flags for remaining items Iterate<1, ITEMS_PER_THREAD>::FlagHeads(linear_tid, head_flags, input, preds, flag_op); } #endif // DOXYGEN_SHOULD_SKIP_THIS template < int ITEMS_PER_THREAD, typename FlagT, typename FlagOp> __device__ __forceinline__ void FlagHeads( FlagT (&head_flags)[ITEMS_PER_THREAD], ///< [out] Calling thread's discontinuity head_flags T (&input)[ITEMS_PER_THREAD], ///< [in] Calling thread's input items FlagOp flag_op) ///< [in] Binary boolean flag predicate { T preds[ITEMS_PER_THREAD]; FlagHeads(head_flags, input, preds, flag_op); } template < int ITEMS_PER_THREAD, typename FlagT, typename FlagOp> __device__ __forceinline__ void FlagHeads( FlagT (&head_flags)[ITEMS_PER_THREAD], ///< [out] Calling thread's discontinuity head_flags T (&input)[ITEMS_PER_THREAD], ///< [in] Calling thread's input items FlagOp flag_op, ///< [in] Binary boolean flag predicate T tile_predecessor_item) ///< [in] <b>[<em>thread</em><sub>0</sub> only]</b> Item with which to compare the first tile item (<tt>input<sub>0</sub></tt> from <em>thread</em><sub>0</sub>). { T preds[ITEMS_PER_THREAD]; FlagHeads(head_flags, input, preds, flag_op, tile_predecessor_item); } template < int ITEMS_PER_THREAD, typename FlagT, typename FlagOp> __device__ __forceinline__ void FlagTails( FlagT (&tail_flags)[ITEMS_PER_THREAD], ///< [out] Calling thread's discontinuity tail_flags T (&input)[ITEMS_PER_THREAD], ///< [in] Calling thread's input items FlagOp flag_op) ///< [in] Binary boolean flag predicate { // Share first item temp_storage.first_items[linear_tid] = input[0]; CTA_SYNC(); // Set flag for last thread-item tail_flags[ITEMS_PER_THREAD - 1] = (linear_tid == BLOCK_THREADS - 1) ? 1 : // Last thread ApplyOp<FlagOp>::FlagT( flag_op, input[ITEMS_PER_THREAD - 1], temp_storage.first_items[linear_tid + 1], (linear_tid * ITEMS_PER_THREAD) + ITEMS_PER_THREAD); // Set tail_flags for remaining items Iterate<0, ITEMS_PER_THREAD - 1>::FlagTails(linear_tid, tail_flags, input, flag_op); } template < int ITEMS_PER_THREAD, typename FlagT, typename FlagOp> __device__ __forceinline__ void FlagTails( FlagT (&tail_flags)[ITEMS_PER_THREAD], ///< [out] Calling thread's discontinuity tail_flags T (&input)[ITEMS_PER_THREAD], ///< [in] Calling thread's input items FlagOp flag_op, ///< [in] Binary boolean flag predicate T tile_successor_item) ///< [in] <b>[<em>thread</em><sub><tt>BLOCK_THREADS</tt>-1</sub> only]</b> Item with which to compare the last tile item (<tt>input</tt><sub><em>ITEMS_PER_THREAD</em>-1</sub> from <em>thread</em><sub><em>BLOCK_THREADS</em>-1</sub>). { // Share first item temp_storage.first_items[linear_tid] = input[0]; CTA_SYNC(); // Set flag for last thread-item T successor_item = (linear_tid == BLOCK_THREADS - 1) ? tile_successor_item : // Last thread temp_storage.first_items[linear_tid + 1]; tail_flags[ITEMS_PER_THREAD - 1] = ApplyOp<FlagOp>::FlagT( flag_op, input[ITEMS_PER_THREAD - 1], successor_item, (linear_tid * ITEMS_PER_THREAD) + ITEMS_PER_THREAD); // Set tail_flags for remaining items Iterate<0, ITEMS_PER_THREAD - 1>::FlagTails(linear_tid, tail_flags, input, flag_op); } template < int ITEMS_PER_THREAD, typename FlagT, typename FlagOp> __device__ __forceinline__ void FlagHeadsAndTails( FlagT (&head_flags)[ITEMS_PER_THREAD], ///< [out] Calling thread's discontinuity head_flags FlagT (&tail_flags)[ITEMS_PER_THREAD], ///< [out] Calling thread's discontinuity tail_flags T (&input)[ITEMS_PER_THREAD], ///< [in] Calling thread's input items FlagOp flag_op) ///< [in] Binary boolean flag predicate { // Share first and last items temp_storage.first_items[linear_tid] = input[0]; temp_storage.last_items[linear_tid] = input[ITEMS_PER_THREAD - 1]; CTA_SYNC(); T preds[ITEMS_PER_THREAD]; // Set flag for first thread-item preds[0] = temp_storage.last_items[linear_tid - 1]; if (linear_tid == 0) { head_flags[0] = 1; } else { head_flags[0] = ApplyOp<FlagOp>::FlagT( flag_op, preds[0], input[0], linear_tid * ITEMS_PER_THREAD); } // Set flag for last thread-item tail_flags[ITEMS_PER_THREAD - 1] = (linear_tid == BLOCK_THREADS - 1) ? 1 : // Last thread ApplyOp<FlagOp>::FlagT( flag_op, input[ITEMS_PER_THREAD - 1], temp_storage.first_items[linear_tid + 1], (linear_tid * ITEMS_PER_THREAD) + ITEMS_PER_THREAD); // Set head_flags for remaining items Iterate<1, ITEMS_PER_THREAD>::FlagHeads(linear_tid, head_flags, input, preds, flag_op); // Set tail_flags for remaining items Iterate<0, ITEMS_PER_THREAD - 1>::FlagTails(linear_tid, tail_flags, input, flag_op); } template < int ITEMS_PER_THREAD, typename FlagT, typename FlagOp> __device__ __forceinline__ void FlagHeadsAndTails( FlagT (&head_flags)[ITEMS_PER_THREAD], ///< [out] Calling thread's discontinuity head_flags FlagT (&tail_flags)[ITEMS_PER_THREAD], ///< [out] Calling thread's discontinuity tail_flags T tile_successor_item, ///< [in] <b>[<em>thread</em><sub><tt>BLOCK_THREADS</tt>-1</sub> only]</b> Item with which to compare the last tile item (<tt>input</tt><sub><em>ITEMS_PER_THREAD</em>-1</sub> from <em>thread</em><sub><em>BLOCK_THREADS</em>-1</sub>). T (&input)[ITEMS_PER_THREAD], ///< [in] Calling thread's input items FlagOp flag_op) ///< [in] Binary boolean flag predicate { // Share first and last items temp_storage.first_items[linear_tid] = input[0]; temp_storage.last_items[linear_tid] = input[ITEMS_PER_THREAD - 1]; CTA_SYNC(); T preds[ITEMS_PER_THREAD]; // Set flag for first thread-item if (linear_tid == 0) { head_flags[0] = 1; } else { preds[0] = temp_storage.last_items[linear_tid - 1]; head_flags[0] = ApplyOp<FlagOp>::FlagT( flag_op, preds[0], input[0], linear_tid * ITEMS_PER_THREAD); } // Set flag for last thread-item T successor_item = (linear_tid == BLOCK_THREADS - 1) ? tile_successor_item : // Last thread temp_storage.first_items[linear_tid + 1]; tail_flags[ITEMS_PER_THREAD - 1] = ApplyOp<FlagOp>::FlagT( flag_op, input[ITEMS_PER_THREAD - 1], successor_item, (linear_tid * ITEMS_PER_THREAD) + ITEMS_PER_THREAD); // Set head_flags for remaining items Iterate<1, ITEMS_PER_THREAD>::FlagHeads(linear_tid, head_flags, input, preds, flag_op); // Set tail_flags for remaining items Iterate<0, ITEMS_PER_THREAD - 1>::FlagTails(linear_tid, tail_flags, input, flag_op); } template < int ITEMS_PER_THREAD, typename FlagT, typename FlagOp> __device__ __forceinline__ void FlagHeadsAndTails( FlagT (&head_flags)[ITEMS_PER_THREAD], ///< [out] Calling thread's discontinuity head_flags T tile_predecessor_item, ///< [in] <b>[<em>thread</em><sub>0</sub> only]</b> Item with which to compare the first tile item (<tt>input<sub>0</sub></tt> from <em>thread</em><sub>0</sub>). FlagT (&tail_flags)[ITEMS_PER_THREAD], ///< [out] Calling thread's discontinuity tail_flags T (&input)[ITEMS_PER_THREAD], ///< [in] Calling thread's input items FlagOp flag_op) ///< [in] Binary boolean flag predicate { // Share first and last items temp_storage.first_items[linear_tid] = input[0]; temp_storage.last_items[linear_tid] = input[ITEMS_PER_THREAD - 1]; CTA_SYNC(); T preds[ITEMS_PER_THREAD]; // Set flag for first thread-item preds[0] = (linear_tid == 0) ? tile_predecessor_item : // First thread temp_storage.last_items[linear_tid - 1]; head_flags[0] = ApplyOp<FlagOp>::FlagT( flag_op, preds[0], input[0], linear_tid * ITEMS_PER_THREAD); // Set flag for last thread-item tail_flags[ITEMS_PER_THREAD - 1] = (linear_tid == BLOCK_THREADS - 1) ? 1 : // Last thread ApplyOp<FlagOp>::FlagT( flag_op, input[ITEMS_PER_THREAD - 1], temp_storage.first_items[linear_tid + 1], (linear_tid * ITEMS_PER_THREAD) + ITEMS_PER_THREAD); // Set head_flags for remaining items Iterate<1, ITEMS_PER_THREAD>::FlagHeads(linear_tid, head_flags, input, preds, flag_op); // Set tail_flags for remaining items Iterate<0, ITEMS_PER_THREAD - 1>::FlagTails(linear_tid, tail_flags, input, flag_op); } template < int ITEMS_PER_THREAD, typename FlagT, typename FlagOp> __device__ __forceinline__ void FlagHeadsAndTails( FlagT (&head_flags)[ITEMS_PER_THREAD], ///< [out] Calling thread's discontinuity head_flags T tile_predecessor_item, ///< [in] <b>[<em>thread</em><sub>0</sub> only]</b> Item with which to compare the first tile item (<tt>input<sub>0</sub></tt> from <em>thread</em><sub>0</sub>). FlagT (&tail_flags)[ITEMS_PER_THREAD], ///< [out] Calling thread's discontinuity tail_flags T tile_successor_item, ///< [in] <b>[<em>thread</em><sub><tt>BLOCK_THREADS</tt>-1</sub> only]</b> Item with which to compare the last tile item (<tt>input</tt><sub><em>ITEMS_PER_THREAD</em>-1</sub> from <em>thread</em><sub><em>BLOCK_THREADS</em>-1</sub>). T (&input)[ITEMS_PER_THREAD], ///< [in] Calling thread's input items FlagOp flag_op) ///< [in] Binary boolean flag predicate { // Share first and last items temp_storage.first_items[linear_tid] = input[0]; temp_storage.last_items[linear_tid] = input[ITEMS_PER_THREAD - 1]; CTA_SYNC(); T preds[ITEMS_PER_THREAD]; // Set flag for first thread-item preds[0] = (linear_tid == 0) ? tile_predecessor_item : // First thread temp_storage.last_items[linear_tid - 1]; head_flags[0] = ApplyOp<FlagOp>::FlagT( flag_op, preds[0], input[0], linear_tid * ITEMS_PER_THREAD); // Set flag for last thread-item T successor_item = (linear_tid == BLOCK_THREADS - 1) ? tile_successor_item : // Last thread temp_storage.first_items[linear_tid + 1]; tail_flags[ITEMS_PER_THREAD - 1] = ApplyOp<FlagOp>::FlagT( flag_op, input[ITEMS_PER_THREAD - 1], successor_item, (linear_tid * ITEMS_PER_THREAD) + ITEMS_PER_THREAD); // Set head_flags for remaining items Iterate<1, ITEMS_PER_THREAD>::FlagHeads(linear_tid, head_flags, input, preds, flag_op); // Set tail_flags for remaining items Iterate<0, ITEMS_PER_THREAD - 1>::FlagTails(linear_tid, tail_flags, input, flag_op); } }; } // CUB namespace CUB_NS_POSTFIX // Optional outer namespace(s)
the_stack
using namespace std::chrono; typedef high_resolution_clock myclock; typedef duration<float> myduration; #define MAX_WG_SIZE 256 template <typename T> T* mem_alloc (const int align, const size_t size) { return (T*) aligned_alloc(align, size * sizeof(T)); } template <typename T> void mem_free (T* p) { free(p); } __device__ float gammafunction(unsigned int n) { if(n == 0) return 0.0f; float x = ((float)n + 0.5f) * logf((float) n) - ((float)n - 1.0f) * logf(expf(1.0f)); return x; } __global__ void epi(const unsigned int* dev_data_zeros, const unsigned int* dev_data_ones, float* dev_scores, const int num_snp, const int PP_zeros, const int PP_ones, const int mask_zeros, const int mask_ones) { int i, j, tid, p, k; float score = FLT_MAX; j = blockDim.x * blockIdx.x + threadIdx.x; i = blockDim.y * blockIdx.y + threadIdx.y; tid = i * num_snp + j; if (j > i && i < num_snp && j < num_snp) { unsigned int ft[2 * 9]; for(k = 0; k < 2 * 9; k++) ft[k] = 0; unsigned int t00, t01, t02, t10, t11, t12, t20, t21, t22; unsigned int di2, dj2; unsigned int* SNPi; unsigned int* SNPj; // Phenotype 0 SNPi = (unsigned int*) &dev_data_zeros[i * 2]; SNPj = (unsigned int*) &dev_data_zeros[j * 2]; for (p = 0; p < 2 * PP_zeros * num_snp - 2 * num_snp; p += 2 * num_snp) { di2 = ~(SNPi[p] | SNPi[p + 1]); dj2 = ~(SNPj[p] | SNPj[p + 1]); t00 = SNPi[p] & SNPj[p]; t01 = SNPi[p] & SNPj[p + 1]; t02 = SNPi[p] & dj2; t10 = SNPi[p + 1] & SNPj[p]; t11 = SNPi[p + 1] & SNPj[p + 1]; t12 = SNPi[p + 1] & dj2; t20 = di2 & SNPj[p]; t21 = di2 & SNPj[p + 1]; t22 = di2 & dj2; ft[0] += __popc(t00); ft[1] += __popc(t01); ft[2] += __popc(t02); ft[3] += __popc(t10); ft[4] += __popc(t11); ft[5] += __popc(t12); ft[6] += __popc(t20); ft[7] += __popc(t21); ft[8] += __popc(t22); } // remainder p = 2 * PP_zeros * num_snp - 2 * num_snp; di2 = ~(SNPi[p] | SNPi[p + 1]); dj2 = ~(SNPj[p] | SNPj[p + 1]); di2 = di2 & mask_zeros; dj2 = dj2 & mask_zeros; t00 = SNPi[p] & SNPj[p]; t01 = SNPi[p] & SNPj[p + 1]; t02 = SNPi[p] & dj2; t10 = SNPi[p + 1] & SNPj[p]; t11 = SNPi[p + 1] & SNPj[p + 1]; t12 = SNPi[p + 1] & dj2; t20 = di2 & SNPj[p]; t21 = di2 & SNPj[p + 1]; t22 = di2 & dj2; ft[0] += __popc(t00); ft[1] += __popc(t01); ft[2] += __popc(t02); ft[3] += __popc(t10); ft[4] += __popc(t11); ft[5] += __popc(t12); ft[6] += __popc(t20); ft[7] += __popc(t21); ft[8] += __popc(t22); // Phenotype 1 SNPi = (unsigned int*) &dev_data_ones[i * 2]; SNPj = (unsigned int*) &dev_data_ones[j * 2]; for(p = 0; p < 2 * PP_ones * num_snp - 2 * num_snp; p += 2 * num_snp) { di2 = ~(SNPi[p] | SNPi[p + 1]); dj2 = ~(SNPj[p] | SNPj[p + 1]); t00 = SNPi[p] & SNPj[p]; t01 = SNPi[p] & SNPj[p + 1]; t02 = SNPi[p] & dj2; t10 = SNPi[p + 1] & SNPj[p]; t11 = SNPi[p + 1] & SNPj[p + 1]; t12 = SNPi[p + 1] & dj2; t20 = di2 & SNPj[p]; t21 = di2 & SNPj[p + 1]; t22 = di2 & dj2; ft[9] += __popc(t00); ft[10] += __popc(t01); ft[11] += __popc(t02); ft[12] += __popc(t10); ft[13] += __popc(t11); ft[14] += __popc(t12); ft[15] += __popc(t20); ft[16] += __popc(t21); ft[17] += __popc(t22); } p = 2 * PP_ones * num_snp - 2 * num_snp; di2 = ~(SNPi[p] | SNPi[p + 1]); dj2 = ~(SNPj[p] | SNPj[p + 1]); di2 = di2 & mask_ones; dj2 = dj2 & mask_ones; t00 = SNPi[p] & SNPj[p]; t01 = SNPi[p] & SNPj[p + 1]; t02 = SNPi[p] & dj2; t10 = SNPi[p + 1] & SNPj[p]; t11 = SNPi[p + 1] & SNPj[p + 1]; t12 = SNPi[p + 1] & dj2; t20 = di2 & SNPj[p]; t21 = di2 & SNPj[p + 1]; t22 = di2 & dj2; ft[9] += __popc(t00); ft[10] += __popc(t01); ft[11] += __popc(t02); ft[12] += __popc(t10); ft[13] += __popc(t11); ft[14] += __popc(t12); ft[15] += __popc(t20); ft[16] += __popc(t21); ft[17] += __popc(t22); // compute score score = 0.0f; for(k = 0; k < 9; k++) score += gammafunction(ft[k] + ft[9 + k] + 1) - gammafunction(ft[k]) - gammafunction(ft[9 + k]); score = fabs((float) score); if(score == 0.0f) score = FLT_MAX; dev_scores[tid] = score; } } int main(int argc, char **argv) { int i, j, x; int num_pac = atoi(argv[1]); // #samples int num_snp = atoi(argv[2]); // #SNPs int iteration = atoi(argv[3]);// #kernel run int block_snp = 64; srand(100); unsigned char *SNP_Data = mem_alloc<unsigned char>(64, num_pac * num_snp); unsigned char *Ph_Data = mem_alloc<unsigned char>(64, num_pac); // generate SNPs between 0 and 2 for (i = 0; i < num_pac; i++) for(j = 0; j < num_snp; j++) SNP_Data[i * num_snp + j] = rand() % 3; // generate phenotype between 0 and 1 for(int i = 0; i < num_pac; i++) Ph_Data[i] = rand() % 2; // transpose the SNP data unsigned char *SNP_Data_trans = mem_alloc<unsigned char>(64, num_pac * num_snp); for (i = 0; i < num_pac; i++) for(j = 0; j < num_snp; j++) SNP_Data_trans[j * num_pac + i] = SNP_Data[i * num_snp + j]; int phen_ones = 0; for(i = 0; i < num_pac; i++) if(Ph_Data[i] == 1) phen_ones++; // transform SNP data to a binary format int PP_zeros = ceil((1.0*(num_pac - phen_ones))/32.0); int PP_ones = ceil((1.0*phen_ones)/32.0); unsigned int *bin_data_zeros = mem_alloc<unsigned int>(64, num_snp * PP_zeros * 2); unsigned int *bin_data_ones = mem_alloc<unsigned int>(64, num_snp * PP_ones * 2); memset(bin_data_zeros, 0, num_snp*PP_zeros*2*sizeof(unsigned int)); memset(bin_data_ones, 0, num_snp*PP_ones*2*sizeof(unsigned int)); for(i = 0; i < num_snp; i++) { int x_zeros = -1; int x_ones = -1; int n_zeros = 0; int n_ones = 0; for(j = 0; j < num_pac; j++){ unsigned int temp = (unsigned int) SNP_Data_trans[i * num_pac + j]; if(Ph_Data[j] == 1){ if(n_ones%32 == 0){ x_ones ++; } // apply 1 shift left to 2 components bin_data_ones[i * PP_ones * 2 + x_ones*2 + 0] <<= 1; bin_data_ones[i * PP_ones * 2 + x_ones*2 + 1] <<= 1; // insert '1' in correct component if(temp == 0 || temp == 1){ bin_data_ones[i * PP_ones * 2 + x_ones*2 + temp ] |= 1; } n_ones ++; } else { if(n_zeros%32 == 0){ x_zeros ++; } // apply 1 shift left to 2 components bin_data_zeros[i * PP_zeros * 2 + x_zeros*2 + 0] <<= 1; bin_data_zeros[i * PP_zeros * 2 + x_zeros*2 + 1] <<= 1; // insert '1' in correct component if(temp == 0 || temp == 1){ bin_data_zeros[i * PP_zeros * 2 + x_zeros*2 + temp] |= 1; } n_zeros ++; } } } unsigned int mask_zeros = 0xFFFFFFFF; for(int x = num_pac - phen_ones; x < PP_zeros * 32; x++) mask_zeros = mask_zeros >> 1; unsigned int mask_ones = 0xFFFFFFFF; for(x = phen_ones; x < PP_ones * 32; x++) mask_ones = mask_ones >> 1; // transpose the binary data structures unsigned int* bin_data_ones_trans = mem_alloc<unsigned int>(64, num_snp * PP_ones * 2); for(i = 0; i < num_snp; i++) for(j = 0; j < PP_ones; j++) { bin_data_ones_trans[(j * num_snp + i) * 2 + 0] = bin_data_ones[(i * PP_ones + j) * 2 + 0]; bin_data_ones_trans[(j * num_snp + i) * 2 + 1] = bin_data_ones[(i * PP_ones + j) * 2 + 1]; } unsigned int* bin_data_zeros_trans = mem_alloc<unsigned int>(64, num_snp * PP_zeros * 2); for(i = 0; i < num_snp; i++) for(j = 0; j < PP_zeros; j++) { bin_data_zeros_trans[(j * num_snp + i) * 2 + 0] = bin_data_zeros[(i * PP_zeros + j) * 2 + 0]; bin_data_zeros_trans[(j * num_snp + i) * 2 + 1] = bin_data_zeros[(i * PP_zeros + j) * 2 + 1]; } float *scores = mem_alloc<float>(64, num_snp * num_snp); for(x = 0; x < num_snp * num_snp; x++) scores[x] = FLT_MAX; auto start = myclock::now(); unsigned int* d_data_zeros; hipMalloc((void**)&d_data_zeros, num_snp*PP_zeros*2*sizeof(unsigned int)); hipMemcpy(d_data_zeros, bin_data_zeros_trans, num_snp*PP_zeros*2*sizeof(unsigned int), hipMemcpyHostToDevice); unsigned int* d_data_ones; hipMalloc((void**)&d_data_ones, num_snp*PP_ones*2*sizeof(unsigned int)); hipMemcpy(d_data_ones, bin_data_ones_trans, num_snp*PP_ones*2*sizeof(unsigned int), hipMemcpyHostToDevice); float* d_scores; hipMalloc((void**)&d_scores, num_snp*num_snp*sizeof(float)); // setup kernel ND-range int num_snp_m = num_snp; while(num_snp_m % block_snp != 0) num_snp_m++; dim3 grid(num_snp_m / block_snp, num_snp_m); dim3 block(block_snp, 1); // epistasis detection kernel for (int i = 0; i < iteration; i++) { hipMemcpy(d_scores, scores, sizeof(float) * num_snp * num_snp, hipMemcpyHostToDevice); hipLaunchKernelGGL(epi, dim3(grid), dim3(block), 0, 0, d_data_zeros, d_data_ones, d_scores, num_snp, PP_zeros, PP_ones, mask_zeros, mask_ones); } hipMemcpy(scores, d_scores, sizeof(float) * num_snp * num_snp, hipMemcpyDeviceToHost); auto end = myclock::now(); myduration elapsed = end - start; std::cout << "Total offloading time: " << elapsed.count() << " sec" << std::endl; // compute the minimum score on a host float score = scores[0]; int solution = 0; for (int i = 1; i < num_snp * num_snp; i++) { if (score > scores[i]) { score = scores[i]; solution = i; } } std::cout << "Score: " << score << std::endl; std::cout << "Solution: " << solution / num_snp << ", " << solution % num_snp << std::endl; if ( (fabsf(score - 83.844f) > 1e-3f) || (solution / num_snp != 1253) || (solution % num_snp != 25752) ) std::cout << "FAIL\n"; else std::cout << "PASS\n"; hipFree(d_data_zeros); hipFree(d_data_ones); hipFree(d_scores); mem_free(bin_data_zeros); mem_free(bin_data_ones); mem_free(bin_data_zeros_trans); mem_free(bin_data_ones_trans); mem_free(scores); mem_free(SNP_Data); mem_free(SNP_Data_trans); mem_free(Ph_Data); return 0; }
the_stack
#include "cutil_math.h" #include "radixsort.cu" // Build in RadixSort __constant__ FluidParams simData; __constant__ uint gridActive; __global__ void insertParticles ( bufList buf, int pnum ) { uint i = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; // particle index if ( i >= pnum ) return; register float3 gridMin = simData.gridMin; register float3 gridDelta = simData.gridDelta; register int3 gridRes = simData.gridRes; register int3 gridScan = simData.gridScanMax; register float poff = simData.psmoothradius / simData.psimscale; register int gs; register float3 gcf; register int3 gc; gcf = (buf.mpos[i] - gridMin) * gridDelta; gc = make_int3( int(gcf.x), int(gcf.y), int(gcf.z) ); gs = (gc.y * gridRes.z + gc.z)*gridRes.x + gc.x; if ( gc.x >= 1 && gc.x <= gridScan.x && gc.y >= 1 && gc.y <= gridScan.y && gc.z >= 1 && gc.z <= gridScan.z ) { buf.mgcell[i] = gs; // Grid cell insert. buf.mgndx[i] = atomicAdd ( &buf.mgridcnt[ gs ], 1 ); // Grid counts. gcf = (-make_float3(poff,poff,poff) + buf.mpos[i] - gridMin) * gridDelta; gc = make_int3( int(gcf.x), int(gcf.y), int(gcf.z) ); gs = ( gc.y * gridRes.z + gc.z)*gridRes.x + gc.x; } else { buf.mgcell[i] = GRID_UNDEF; } } // the mutex variable __device__ int g_mutex = 0; // GPU simple synchronization function __device__ void __gpu_sync(int goalVal) { __threadfence (); // only thread 0 is used for synchronization if (threadIdx.x == 0) atomicAdd(&g_mutex, 1); // only when all blocks add 1 to g_mutex will // g_mutex equal to goalVal while(g_mutex < goalVal) { // infinite loop until g_mutx = goalVal } if ( blockIdx.x == 0 && threadIdx.x == 0 ) g_mutex = 0; __syncthreads(); } // countingSortInPlace -- GPU_SYNC DOES NOT WORK /*uint i = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; // particle index if ( i >= pnum ) { __gpu_sync ( 2 ); return; } register float3 ipos, ivel, iveleval, iforce; register float ipress, idens; register int icell, indx, iclr; icell = buf.mgcell [ i ]; indx = buf.mgndx [ i ]; int sort_ndx = buf.mgridoff[ icell ] + indx; // global_ndx = grid_cell_offet + particle_offset if ( icell == GRID_UNDEF ) { __gpu_sync ( 2 ); return; } ipos = buf.mpos [ i ]; ivel = buf.mvel [ i ]; iveleval = buf.mveleval [ i ]; iforce = buf.mforce [ i ]; ipress = buf.mpress [ i ]; idens = buf.mdensity [ i ]; iclr = buf.mclr [ i ]; __gpu_sync ( 2 ) ; //threadfence(); // make sure every thread in all blocks has their data buf.mpos [ sort_ndx ] = ipos; buf.mvel [ sort_ndx ] = ivel; buf.mveleval [ sort_ndx ] = iveleval; buf.mforce [ sort_ndx ] = iforce; buf.mpress [ sort_ndx ] = ipress; buf.mdensity [ sort_ndx ] = idens; buf.mclr [ sort_ndx ] = iclr;*/ // Counting Sort - Index __global__ void countingSortIndex ( bufList buf, int pnum ) { uint i = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; // particle index if ( i >= pnum ) return; uint icell = buf.mgcell[i]; uint indx = buf.mgndx[i]; int sort_ndx = buf.mgridoff[ icell ] + indx; // global_ndx = grid_cell_offet + particle_offset if ( icell != GRID_UNDEF ) { buf.mgrid[ sort_ndx ] = i; // index sort, grid refers to original particle order } } // Counting Sort - Full (deep copy) __global__ void countingSortFull ( bufList buf, int pnum ) { uint i = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; // particle index if ( i >= pnum ) return; // Copy particle from original, unsorted buffer (msortbuf), // into sorted memory location on device (mpos/mvel) uint icell = *(uint*) (buf.msortbuf + pnum*BUF_GCELL + i*sizeof(uint) ); uint indx = *(uint*) (buf.msortbuf + pnum*BUF_GNDX + i*sizeof(uint) ); if ( icell != GRID_UNDEF ) { // Determine the sort_ndx, location of the particle after sort int sort_ndx = buf.mgridoff[ icell ] + indx; // global_ndx = grid_cell_offet + particle_offset // Find the original particle data, offset into unsorted buffer (msortbuf) char* bpos = buf.msortbuf + i*sizeof(float3); // Transfer data to sort location buf.mgrid[ sort_ndx ] = sort_ndx; // full sort, grid indexing becomes identity buf.mpos[ sort_ndx ] = *(float3*) (bpos); buf.mvel[ sort_ndx ] = *(float3*) (bpos + pnum*BUF_VEL ); buf.mveleval[ sort_ndx ] = *(float3*) (bpos + pnum*BUF_VELEVAL ); buf.mforce[ sort_ndx ] = *(float3*) (bpos + pnum*BUF_FORCE ); buf.mpress[ sort_ndx ] = *(float*) (buf.msortbuf + pnum*BUF_PRESS + i*sizeof(float) ); buf.mdensity[ sort_ndx ] = *(float*) (buf.msortbuf + pnum*BUF_DENS + i*sizeof(float) ); buf.mclr[ sort_ndx ] = *(uint*) (buf.msortbuf + pnum*BUF_CLR+ i*sizeof(uint) ); // ((uint) 255)<<24; -- dark matter buf.mgcell[ sort_ndx ] = icell; buf.mgndx[ sort_ndx ] = indx; } } // ***** UNUSED CODE (not working) ****** __global__ void countActiveCells ( bufList buf, int pnum ) { if ( threadIdx.x == 0 ) { // use only one processor //gridActive = -1; int last_ndx = buf.mgridoff [ simData.gridTotal-1 ] + buf.mgridcnt[ simData.gridTotal-1 ] - 1; int last_p = buf.mgrid[ last_ndx ]; int last_cell = buf.mgcell[ last_p ]; int first_p = buf.mgrid[ 0 ]; int first_cell = buf.mgcell[ first_p ] ; int cell, cnt = 0, curr = 0; cell = first_cell; while ( cell < last_cell ) { buf.mgridactive[ cnt ] = cell; // add cell to active list cnt++; curr += buf.mgridcnt[cell]; // advance to next active cell // id = buf.mgrid[curr]; // get particle id -- when unsorted only cell = buf.mgcell [ curr ]; // get cell we are in -- use id when unsorted } // gridActive = cnt; } __syncthreads(); } __device__ float contributePressure ( int i, float3 p, int cell, bufList buf ) { float3 dist; float dsq, c, sum; register float d2 = simData.psimscale * simData.psimscale; register float r2 = simData.r2 / d2; sum = 0.0; if ( buf.mgridcnt[cell] == 0 ) return 0.0; int cfirst = buf.mgridoff[ cell ]; int clast = cfirst + buf.mgridcnt[ cell ]; for ( int cndx = cfirst; cndx < clast; cndx++ ) { dist = p - buf.mpos[ buf.mgrid[cndx] ]; dsq = (dist.x*dist.x + dist.y*dist.y + dist.z*dist.z); if ( dsq < r2 ) { c = (r2 - dsq)*d2; sum += c * c * c; } } return sum; } __global__ void computePressure ( bufList buf, int pnum ) { uint i = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; // particle index if ( i >= pnum ) return; // Get search cell int nadj = (1*simData.gridRes.z + 1)*simData.gridRes.x + 1; uint gc = buf.mgcell[ i ]; if ( gc == GRID_UNDEF ) return; // particle out-of-range gc -= nadj; // Sum Pressures float3 pos = buf.mpos[ i ]; float sum = 0.0; for (int c=0; c < simData.gridAdjCnt; c++) { sum += contributePressure ( i, pos, gc + simData.gridAdj[c], buf ); } __syncthreads(); // Compute Density & Pressure sum = sum * simData.pmass * simData.poly6kern; if ( sum == 0.0 ) sum = 1.0; buf.mpress[ i ] = ( sum - simData.prest_dens ) * simData.pintstiff; buf.mdensity[ i ] = 1.0f / sum; } __global__ void computeQuery ( bufList buf, int pnum ) { uint i = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; // particle index if ( i >= pnum ) return; // Get search cell int nadj = (1*simData.gridRes.z + 1)*simData.gridRes.x + 1; uint gc = buf.mgcell[ i ]; if ( gc == GRID_UNDEF ) return; // particle out-of-range gc -= nadj; // Sum Pressures float3 pos = buf.mpos[ i ]; float sum = 0.0; for (int c=0; c < simData.gridAdjCnt; c++) { sum += 1.0; } __syncthreads(); } /*FindNeighbors int cid = blockIdx.x * blockSize.x + blockIdx.y; // cluster id int pid = threadIdx.x; // 0 to 85 (max particles per cell) __shared__ Particle clist[ 85 ]; __shared__ Particle plist[ 85*8 ]; if ( pid < clusterCnt[cid] ) clist [ pid ] = particles [ clusterNdx[cid] + pid ]; for ( gid = 0; gid < 8; gid++ ) { if ( pid < gridCnt[ cid + group[gid] ] ) plist [ cid*CELL_CNT + pid ] = particles [ sortNdx[ cid + group[gid] ] + pid ]; } __syncthreads(); for ( int j = 0; j < cellcnt; j++ ) { dst = plist[ pid ] - plist[ j ]; if ( dst < R2 ) { ... } }*/ /*grid block <gx, gy, gz> <1, 32, 64> 256, 256, 256 total: */ #define LOCAL_PMAX 896 #define NUM_CELL 27 #define LAST_CELL 26 #define CENTER_CELL 13 __global__ void computePressureGroup ( bufList buf, int pnum ) { __shared__ float3 cpos[ LOCAL_PMAX ]; __shared__ int ncnt[ NUM_CELL ]; __shared__ int ngridoff[ NUM_CELL ]; __shared__ int noff[ NUM_CELL ]; int bid = __mul24( blockIdx.y, gridDim.x ) + blockIdx.x; if ( bid > gridActive ) return; // block must be in a valid grid uint cell = buf.mgridactive [ bid ]; // get grid cell (from blockID 1:1) register int i = -1; register float3 ipos; uint ndx = threadIdx.x; if ( ndx < buf.mgridcnt[cell] ) { i = buf.mgridoff[cell] + ndx; // particle id to process ipos = buf.mpos[ i ]; } int gid = threadIdx.x; register float d2 = simData.psimscale * simData.psimscale; register float r2 = simData.r2 / d2; register float3 dist; register float c, dsq, sum; int neighbor; // copy neighbor cell counts to shared mem if ( gid < NUM_CELL ) { int nadj = (1*simData.gridRes.z + 1)*simData.gridRes.x + 1; neighbor = cell - nadj + simData.gridAdj[gid]; // neighbor cell id ncnt[gid] = buf.mgridcnt [ neighbor ]; ngridoff[gid] = buf.mgridoff [ neighbor ]; } __syncthreads (); if ( gid == 0 ) { // compute neighbor local ndx (as prefix sum) int nsum = 0; for (int z=0; z < NUM_CELL; z++) { // 27-step prefix sum noff[z] = nsum; nsum += ncnt[z]; } } __syncthreads (); // copy particles into shared memory if ( gid < NUM_CELL ) { for (int j=0; j < ncnt[gid]; j++ ) { neighbor = buf.mgrid [ ngridoff[gid] + j ]; // neighbor particle id ndx = noff[ gid ] + j; cpos[ ndx ] = buf.mpos [ neighbor ]; } } __syncthreads (); // compute pressure for current particle if ( i == -1 ) return; int jnum = noff[LAST_CELL] + ncnt[LAST_CELL]; sum = 0.0; for (int j = 0; j < jnum; j++) { dist = ipos - cpos[ j ]; dsq = (dist.x*dist.x + dist.y*dist.y + dist.z*dist.z); if ( dsq > 0.0 && dsq < r2 ) { c = (r2 - dsq)*d2; sum += c * c * c; } } __syncthreads (); // put result into global mem sum = sum * simData.pmass * simData.poly6kern; if ( sum == 0.0 ) sum = 1.0; buf.mpress[ i ] = ( sum - simData.prest_dens ) * simData.pintstiff; buf.mdensity[ i ] = 1.0f / sum; } __device__ float3 contributeForce ( int i, float3 ipos, float3 iveleval, float ipress, float idens, int cell, bufList buf ) { float dsq, c; float pterm; float3 dist, force; int j; if ( buf.mgridcnt[cell] == 0 ) return make_float3(0,0,0); force = make_float3(0,0,0); for ( int cndx = buf.mgridoff[ cell ]; cndx < buf.mgridoff[ cell ] + buf.mgridcnt[ cell ]; cndx++ ) { j = buf.mgrid[ cndx ]; dist = ( ipos - buf.mpos[ j ] ); // dist in cm dsq = (dist.x*dist.x + dist.y*dist.y + dist.z*dist.z); if ( dsq < simData.rd2 && dsq > 0) { dsq = sqrt(dsq * simData.d2); c = ( simData.psmoothradius - dsq ); pterm = simData.psimscale * -0.5f * c * simData.spikykern * ( ipress + buf.mpress[ j ] ) / dsq; force += ( pterm * dist + simData.vterm * ( buf.mveleval[ j ] - iveleval )) * c * idens * (buf.mdensity[ j ] ); } } return force; } __global__ void computeForce ( bufList buf, int pnum) { uint i = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; // particle index if ( i >= pnum ) return; // Get search cell uint gc = buf.mgcell[ i ]; if ( gc == GRID_UNDEF ) return; // particle out-of-range gc -= (1*simData.gridRes.z + 1)*simData.gridRes.x + 1; // Sum Pressures register float3 force; force = make_float3(0,0,0); for (int c=0; c < simData.gridAdjCnt; c++) { force += contributeForce ( i, buf.mpos[ i ], buf.mveleval[ i ], buf.mpress[ i ], buf.mdensity[ i ], gc + simData.gridAdj[c], buf ); } buf.mforce[ i ] = force; } /*__global__ void computeForceNbr ( char* bufPnts, int* bufGrid, int numPnt ) { uint ndx = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; // particle index if ( ndx >= numPnt ) return; char* ioffs = bufPnts + __mul24(ndx, simData.stride ); float3 ipos = *(float3*) (ioffs + OFFSET_POS); float3 ivelval = *(float3*) (ioffs + OFFSET_VELEVAL); float press = *(float*) (ioffs + OFFSET_PRESS); float dens = *(float*) (ioffs + OFFSET_DENS); int icnt = *(int*) (ioffs + OFFSET_NBRCNT); char* joffs; float3 jpos, jveleval; float3 dist, force; float c, ndistj, pterm, dterm, vterm; vterm = simData.lapkern * simData.visc; force = make_float3(0,0,0); for (int nbr=0; nbr < icnt; nbr++) { // base 1, n[0] = count ndistj = bufNdist[ndx][nbr]; joffs = bufPnts + __mul24(bufNeighbor[ndx][nbr], simData.stride); jpos = *(float3*) (joffs + OFFSET_POS); jveleval = *(float3*) (joffs + OFFSET_VELEVAL); c = ( simData.smooth_rad - ndistj ); dist.x = ( ipos.x - jpos.x ); // dist in cm dist.y = ( ipos.y - jpos.y ); dist.z = ( ipos.z - jpos.z ); pterm = simData.sim_scale * -0.5f * c * simData.spikykern * ( press + *(float*)(joffs+OFFSET_PRESS) ) / ndistj; dterm = c * dens * *(float*)(joffs+OFFSET_DENS); force.x += ( pterm * dist.x + vterm * ( jveleval.x - ivelval.x )) * dterm; force.y += ( pterm * dist.y + vterm * ( jveleval.y - ivelval.y )) * dterm; force.z += ( pterm * dist.z + vterm * ( jveleval.z - ivelval.z )) * dterm; } *(float3*) ( ioffs + OFFSET_FORCE ) = force; }*/ __global__ void advanceParticles ( float time, float dt, float ss, bufList buf, int numPnts ) { uint i = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; // particle index if ( i >= numPnts ) return; if ( buf.mgcell[i] == GRID_UNDEF ) { buf.mpos[i] = make_float3(-1000,-1000,-1000); buf.mvel[i] = make_float3(0,0,0); return; } // Get particle vars register float3 accel, norm; register float diff, adj, speed; register float3 pos = buf.mpos[i]; register float3 veval = buf.mveleval[i]; // Leapfrog integration accel = buf.mforce[i]; accel *= simData.pmass; // Boundaries // Y-axis diff = simData.pradius - (pos.y - (simData.pboundmin.y + (pos.x-simData.pboundmin.x)*simData.pground_slope )) * ss; if ( diff > EPSILON ) { norm = make_float3( -simData.pground_slope, 1.0 - simData.pground_slope, 0); adj = simData.pextstiff * diff - simData.pdamp * dot(norm, veval ); norm *= adj; accel += norm; } diff = simData.pradius - ( simData.pboundmax.y - pos.y )*ss; if ( diff > EPSILON ) { norm = make_float3(0, -1, 0); adj = simData.pextstiff * diff - simData.pdamp * dot(norm, veval ); norm *= adj; accel += norm; } // X-axis diff = simData.pradius - (pos.x - (simData.pboundmin.x + (sin(time*simData.pforce_freq)+1)*0.5 * simData.pforce_min))*ss; if ( diff > EPSILON ) { norm = make_float3( 1, 0, 0); adj = (simData.pforce_min+1) * simData.pextstiff * diff - simData.pdamp * dot(norm, veval ); norm *= adj; accel += norm; } diff = simData.pradius - ( (simData.pboundmax.x - (sin(time*simData.pforce_freq)+1)*0.5*simData.pforce_max) - pos.x)*ss; if ( diff > EPSILON ) { norm = make_float3(-1, 0, 0); adj = (simData.pforce_max+1) * simData.pextstiff * diff - simData.pdamp * dot(norm, veval ); norm *= adj; accel += norm; } // Z-axis diff = simData.pradius - (pos.z - simData.pboundmin.z ) * ss; if ( diff > EPSILON ) { norm = make_float3( 0, 0, 1 ); adj = simData.pextstiff * diff - simData.pdamp * dot(norm, veval ); norm *= adj; accel += norm; } diff = simData.pradius - ( simData.pboundmax.z - pos.z )*ss; if ( diff > EPSILON ) { norm = make_float3( 0, 0, -1 ); adj = simData.pextstiff * diff - simData.pdamp * dot(norm, veval ); norm *= adj; accel += norm; } // Gravity accel += simData.pgravity; // Accel Limit speed = accel.x*accel.x + accel.y*accel.y + accel.z*accel.z; if ( speed > simData.AL2 ) { accel *= simData.AL / sqrt(speed); } // Velocity Limit float3 vel = buf.mvel[i]; speed = vel.x*vel.x + vel.y*vel.y + vel.z*vel.z; if ( speed > simData.VL2 ) { speed = simData.VL2; vel *= simData.VL / sqrt(speed); } // Ocean colors if ( speed > simData.VL2*0.2) { adj = simData.VL2*0.2; buf.mclr[i] += (( buf.mclr[i] & 0xFF) < 0xFD ) ? +0x00000002 : 0; // decrement R by one buf.mclr[i] += (( (buf.mclr[i]>>8) & 0xFF) < 0xFD ) ? +0x00000200 : 0; // decrement G by one buf.mclr[i] += (( (buf.mclr[i]>>16) & 0xFF) < 0xFD ) ? +0x00020000 : 0; // decrement G by one } if ( speed < 0.03 ) { int v = int(speed/.01)+1; buf.mclr[i] += (( buf.mclr[i] & 0xFF) > 0x80 ) ? -0x00000001 * v : 0; // decrement R by one buf.mclr[i] += (( (buf.mclr[i]>>8) & 0xFF) > 0x80 ) ? -0x00000100 * v : 0; // decrement G by one } //-- surface particle density //buf.mclr[i] = buf.mclr[i] & 0x00FFFFFF; //if ( buf.mdensity[i] > 0.0014 ) buf.mclr[i] += 0xAA000000; // Leap-frog Integration float3 vnext = accel*dt + vel; // v(t+1/2) = v(t-1/2) + a(t) dt buf.mveleval[i] = (vel + vnext) * 0.5; // v(t+1) = [v(t-1/2) + v(t+1/2)] * 0.5 buf.mvel[i] = vnext; buf.mpos[i] += vnext * (dt/ss); // p(t+1) = p(t) + v(t+1/2) dt } void updateSimParams ( FluidParams* cpufp ) { cudaError_t status; #ifdef CUDA_42 // Only for CUDA 4.x or earlier. Depricated in CUDA 5.0+ // Original worked even if symbol was declared __device__ status = cudaMemcpyToSymbol ( "simData", cpufp, sizeof(FluidParams) ); #else // CUDA 5.x+. Only works if symbol is declared __constant__ status = cudaMemcpyToSymbol ( simData, cpufp, sizeof(FluidParams) ); #endif /*app_printf ( "SIM PARAMETERS:\n" ); app_printf ( " CPU: %p\n", cpufp ); app_printf ( " GPU: %p\n", &simData ); */ }
the_stack
//////////////////////////////////////////////////////////////////////////////////// // BASELINE FUNCTIONS //////////////////////////////////////////////////////////////////////////////////// // All "baseline" code is at the top of this file. The baseline code is a simple // implementation of the algorithm, with only minor CPU optimizations in place. // Following these functions are a number of optimized variants, // which each deploy a different combination of optimizations strategies. By // default, XSBench will only run the baseline implementation. Optimized variants // are not yet implemented in this CUDA port. //////////////////////////////////////////////////////////////////////////////////// __global__ void lookup ( int *num_nucs, double *concs, int*mats, NuclideGridPoint* nuclide_grid, int* verification, double* unionized_energy_array, int *index_grid, int n_lookups, long n_isotopes, long n_gridpoints, int grid_type, int hash_bins, int max_num_nucs ) { // get the index to operate on, first dimemsion size_t i = threadIdx.x + blockIdx.x * blockDim.x; if (i < n_lookups) { // Set the initial seed value uint64_t seed = STARTING_SEED; // Forward seed to lookup index (we need 2 samples per lookup) seed = fast_forward_LCG(seed, 2*i); // Randomly pick an energy and material for the particle double p_energy = LCG_random_double(&seed); int mat = pick_mat(&seed); // debugging //printf("E = %lf mat = %d\n", p_energy, mat); double macro_xs_vector[5] = {0}; // Perform macroscopic Cross Section Lookup calculate_macro_xs( p_energy, // Sampled neutron energy (in lethargy) mat, // Sampled material type index neutron is in n_isotopes, // Total number of isotopes in simulation n_gridpoints, // Number of gridpoints per isotope in simulation num_nucs, // 1-D array with number of nuclides per material concs, // Flattened 2-D array with concentration of each nuclide in each material unionized_energy_array, // 1-D Unionized energy array index_grid, // Flattened 2-D grid holding indices into nuclide grid for each unionized energy level nuclide_grid, // Flattened 2-D grid holding energy levels and XS_data for all nuclides in simulation mats, // Flattened 2-D array with nuclide indices defining composition of each type of material macro_xs_vector, // 1-D array with result of the macroscopic cross section (5 different reaction channels) grid_type, // Lookup type (nuclide, hash, or unionized) hash_bins, // Number of hash bins used (if using hash lookup type) max_num_nucs // Maximum number of nuclides present in any material ); // For verification, and to prevent the compiler from optimizing // all work out, we interrogate the returned macro_xs_vector array // to find its maximum value index, then increment the verification // value by that index. In this implementation, we store to a global // array that will get tranferred back and reduced on the host. double max = -1.0; int max_idx = 0; for(int j = 0; j < 5; j++ ) { if( macro_xs_vector[j] > max ) { max = macro_xs_vector[j]; max_idx = j; } } verification[i] = max_idx+1; } } unsigned long long run_event_based_simulation(Inputs in, SimulationData SD, int mype, double * kernel_init_time) { //////////////////////////////////////////////////////////////////////////////// // SUMMARY: Simulation Data Structure Manifest for "SD" Object // Here we list all heap arrays (and lengths) in SD that would need to be // offloaded manually if using an accelerator with a seperate memory space //////////////////////////////////////////////////////////////////////////////// // int * num_nucs; // Length = length_num_nucs; // double * concs; // Length = length_concs // int * mats; // Length = length_mats // double * unionized_energy_array; // Length = length_unionized_energy_array // int * index_grid; // Length = length_index_grid // NuclideGridPoint * nuclide_grid; // Length = length_nuclide_grid // // Note: "unionized_energy_array" and "index_grid" can be of zero length // depending on lookup method. // // Note: "Lengths" are given as the number of objects in the array, not the // number of bytes. //////////////////////////////////////////////////////////////////////////////// // Let's create an extra verification array to reduce manually later on if( mype == 0 ) printf("Allocating an additional %.1lf MB of memory for verification arrays...\n", in.lookups * sizeof(int) /1024.0/1024.0); int * verification_host = (int *) malloc(in.lookups * sizeof(int)); // Timers double start = get_time(); double stop; // Scope here is important, as when we exit this blocl we will automatically sync with device // to ensure all work is done and that we can read from verification_host array. { // create a queue using the default device for the platform (cpu, gpu) cudaDeviceProp devProp; cudaGetDeviceProperties(&devProp, 0); if(mype == 0 ) printf("Running on: %s\n", devProp.name); if(mype == 0 ) printf("Initializing device buffers and JIT compiling kernel...\n"); //////////////////////////////////////////////////////////////////////////////// // Create Device Buffers //////////////////////////////////////////////////////////////////////////////// int *verification_d = nullptr; int *mats_d = nullptr ; int *num_nucs_d = nullptr; double *concs_d = nullptr; NuclideGridPoint *nuclide_grid_d = nullptr; //buffer<int, 1> num_nucs_d(SD.num_nucs,SD.length_num_nucs); cudaMalloc((void**)&num_nucs_d, sizeof(int) * SD.length_num_nucs); cudaMemcpy(num_nucs_d, SD.num_nucs, sizeof(int) * SD.length_num_nucs, cudaMemcpyHostToDevice); //buffer<double, 1> concs_d(SD.concs, SD.length_concs); cudaMalloc((void**)&concs_d, sizeof(double) * SD.length_concs); cudaMemcpy(concs_d, SD.concs, sizeof(double) * SD.length_concs, cudaMemcpyHostToDevice); //buffer<int, 1> mats_d(SD.mats, SD.length_mats); cudaMalloc((void**)&mats_d, sizeof(int) * SD.length_mats); cudaMemcpy(mats_d, SD.mats, sizeof(int) * SD.length_mats, cudaMemcpyHostToDevice); //buffer<NuclideGridPoint, 1> nuclide_grid_d(SD.nuclide_grid, SD.length_nuclide_grid); cudaMalloc((void**)&nuclide_grid_d, sizeof(NuclideGridPoint) * SD.length_nuclide_grid); cudaMemcpy(nuclide_grid_d, SD.nuclide_grid, sizeof(NuclideGridPoint) * SD.length_nuclide_grid, cudaMemcpyHostToDevice); //buffer<int, 1> verification_d(verification_host, in.lookups); cudaMalloc((void**)&verification_d, sizeof(int) * in.lookups); cudaMemcpy(verification_d, verification_host, sizeof(int) * in.lookups, cudaMemcpyHostToDevice); // These two are a bit of a hack. Sometimes they are empty buffers (if using hash or nuclide // grid methods). OpenCL will throw an example when we try to create an empty buffer. So, we // will just allocate some memory for them and move them as normal. The rest of our code // won't actually use them if they aren't needed, so this is safe. Probably a cleaner way // of doing this. if( SD.length_unionized_energy_array == 0 ) { SD.length_unionized_energy_array = 1; SD.unionized_energy_array = (double *) malloc(sizeof(double)); } //buffer<double,1> unionized_energy_array_d(SD.unionized_energy_array, SD.length_unionized_energy_array); double *unionized_energy_array_d = nullptr; cudaMalloc((void**)&unionized_energy_array_d, sizeof(double) * SD.length_unionized_energy_array); cudaMemcpy(unionized_energy_array_d, SD.unionized_energy_array, sizeof(double) * SD.length_unionized_energy_array, cudaMemcpyHostToDevice); if( SD.length_index_grid == 0 ) { SD.length_index_grid = 1; SD.index_grid = (int *) malloc(sizeof(int)); } // For the unionized grid, this is a large array. Enforce that it is able to be allocated on the // OpenCL device (as some OpenCL devices have fairly low limts here for some reason...) size_t index_grid_allocation_sz = ceil((SD.length_index_grid * sizeof(int))); //assert( index_grid_allocation_sz <= sycl_q.get_device().get_info<cl::sycl::info::device::max_mem_alloc_size>() ); //buffer<int, 1> index_grid_d(SD.index_grid, (unsigned long long ) SD.length_index_grid); int *index_grid_d = nullptr; cudaMalloc((void**)&index_grid_d, sizeof(int) * (unsigned long long)SD.length_index_grid); cudaMemcpy(index_grid_d, SD.index_grid, sizeof(int) * (unsigned long long )SD.length_index_grid, cudaMemcpyHostToDevice); //////////////////////////////////////////////////////////////////////////////// // Define Device Kernel //////////////////////////////////////////////////////////////////////////////// lookup<<< dim3((in.lookups + 255) / 256), dim3(256) >>> ( num_nucs_d, concs_d, mats_d, nuclide_grid_d, verification_d, unionized_energy_array_d, index_grid_d, in.lookups, in.n_isotopes, in.n_gridpoints, in.grid_type, in.hash_bins, SD.max_num_nucs ); cudaMemcpy(verification_host, verification_d, sizeof(int) * in.lookups, cudaMemcpyDeviceToHost); cudaDeviceSynchronize(); cudaFree(verification_d); cudaFree(mats_d); cudaFree(num_nucs_d); cudaFree(concs_d); cudaFree(nuclide_grid_d); cudaFree(unionized_energy_array_d); cudaFree(index_grid_d); stop = get_time(); if(mype==0) printf("Kernel initialization, compilation, and launch took %.2lf seconds.\n", stop-start); if(mype==0) printf("Beginning event based simulation...\n"); } // Host reduces the verification array unsigned long long verification_scalar = 0; for( int i = 0; i < in.lookups; i++ ) verification_scalar += verification_host[i]; return verification_scalar; } // binary search for energy on unionized energy grid // returns lower index template <class T> __device__ long grid_search( long n, double quarry, T A) { long lowerLimit = 0; long upperLimit = n-1; long examinationPoint; long length = upperLimit - lowerLimit; while( length > 1 ) { examinationPoint = lowerLimit + ( length / 2 ); if( A[examinationPoint] > quarry ) upperLimit = examinationPoint; else lowerLimit = examinationPoint; length = upperLimit - lowerLimit; } return lowerLimit; } // Calculates the microscopic cross section for a given nuclide & energy template <class Double_Type, class Int_Type, class NGP_Type> __device__ void calculate_micro_xs( double p_energy, int nuc, long n_isotopes, long n_gridpoints, Double_Type egrid, Int_Type index_data, NGP_Type nuclide_grids, long idx, double * xs_vector, int grid_type, int hash_bins ){ // Variables double f; NuclideGridPoint low, high; long low_idx, high_idx; // If using only the nuclide grid, we must perform a binary search // to find the energy location in this particular nuclide's grid. if( grid_type == NUCLIDE ) { // Perform binary search on the Nuclide Grid to find the index long offset = nuc * n_gridpoints; idx = grid_search_nuclide( n_gridpoints, p_energy, nuclide_grids, offset, offset + n_gridpoints-1); // pull ptr from nuclide grid and check to ensure that // we're not reading off the end of the nuclide's grid if( idx == n_gridpoints - 1 ) low_idx = idx - 1; else low_idx = idx; } else if( grid_type == UNIONIZED) // Unionized Energy Grid - we already know the index, no binary search needed. { // pull ptr from energy grid and check to ensure that // we're not reading off the end of the nuclide's grid if( index_data[idx * n_isotopes + nuc] == n_gridpoints - 1 ) low_idx = nuc*n_gridpoints + index_data[idx * n_isotopes + nuc] - 1; else { low_idx = nuc*n_gridpoints + index_data[idx * n_isotopes + nuc]; } } else // Hash grid { // load lower bounding index int u_low = index_data[idx * n_isotopes + nuc]; // Determine higher bounding index int u_high; if( idx == hash_bins - 1 ) u_high = n_gridpoints - 1; else u_high = index_data[(idx+1)*n_isotopes + nuc] + 1; // Check edge cases to make sure energy is actually between these // Then, if things look good, search for gridpoint in the nuclide grid // within the lower and higher limits we've calculated. double e_low = nuclide_grids[nuc*n_gridpoints + u_low].energy; double e_high = nuclide_grids[nuc*n_gridpoints + u_high].energy; long lower; if( p_energy <= e_low ) lower = nuc*n_gridpoints; else if( p_energy >= e_high ) lower = nuc*n_gridpoints + n_gridpoints - 1; else { long offset = nuc*n_gridpoints; lower = grid_search_nuclide( n_gridpoints, p_energy, nuclide_grids, offset+u_low, offset+u_high); } if( (lower % n_gridpoints) == n_gridpoints - 1 ) low_idx = lower - 1; else low_idx = lower; } high_idx = low_idx + 1; low = nuclide_grids[low_idx]; high = nuclide_grids[high_idx]; // calculate the re-useable interpolation factor f = (high.energy - p_energy) / (high.energy - low.energy); // Total XS xs_vector[0] = high.total_xs - f * (high.total_xs - low.total_xs); // Elastic XS xs_vector[1] = high.elastic_xs - f * (high.elastic_xs - low.elastic_xs); // Absorbtion XS xs_vector[2] = high.absorbtion_xs - f * (high.absorbtion_xs - low.absorbtion_xs); // Fission XS xs_vector[3] = high.fission_xs - f * (high.fission_xs - low.fission_xs); // Nu Fission XS xs_vector[4] = high.nu_fission_xs - f * (high.nu_fission_xs - low.nu_fission_xs); } // Calculates macroscopic cross section based on a given material & energy template <class Double_Type, class Int_Type, class NGP_Type, class E_GRID_TYPE, class INDEX_TYPE> __device__ void calculate_macro_xs( double p_energy, int mat, long n_isotopes, long n_gridpoints, Int_Type num_nucs, Double_Type concs, E_GRID_TYPE egrid, INDEX_TYPE index_data, NGP_Type nuclide_grids, Int_Type mats, double * macro_xs_vector, int grid_type, int hash_bins, int max_num_nucs ){ int p_nuc; // the nuclide we are looking up long idx = -1; double conc; // the concentration of the nuclide in the material // cleans out macro_xs_vector for( int k = 0; k < 5; k++ ) macro_xs_vector[k] = 0; // If we are using the unionized energy grid (UEG), we only // need to perform 1 binary search per macroscopic lookup. // If we are using the nuclide grid search, it will have to be // done inside of the "calculate_micro_xs" function for each different // nuclide in the material. if( grid_type == UNIONIZED ) idx = grid_search( n_isotopes * n_gridpoints, p_energy, egrid); else if( grid_type == HASH ) { double du = 1.0 / hash_bins; idx = p_energy / du; } // Once we find the pointer array on the UEG, we can pull the data // from the respective nuclide grids, as well as the nuclide // concentration data for the material // Each nuclide from the material needs to have its micro-XS array // looked up & interpolatied (via calculate_micro_xs). Then, the // micro XS is multiplied by the concentration of that nuclide // in the material, and added to the total macro XS array. // (Independent -- though if parallelizing, must use atomic operations // or otherwise control access to the xs_vector and macro_xs_vector to // avoid simulataneous writing to the same data structure) for( int j = 0; j < num_nucs[mat]; j++ ) { double xs_vector[5]; p_nuc = mats[mat*max_num_nucs + j]; conc = concs[mat*max_num_nucs + j]; calculate_micro_xs( p_energy, p_nuc, n_isotopes, n_gridpoints, egrid, index_data, nuclide_grids, idx, xs_vector, grid_type, hash_bins ); for( int k = 0; k < 5; k++ ) macro_xs_vector[k] += xs_vector[k] * conc; } } // picks a material based on a probabilistic distribution __device__ int pick_mat( unsigned long * seed ) { // I have a nice spreadsheet supporting these numbers. They are // the fractions (by volume) of material in the core. Not a // *perfect* approximation of where XS lookups are going to occur, // but this will do a good job of biasing the system nonetheless. // Also could be argued that doing fractions by weight would be // a better approximation, but volume does a good enough job for now. double dist[12]; dist[0] = 0.140; // fuel dist[1] = 0.052; // cladding dist[2] = 0.275; // cold, borated water dist[3] = 0.134; // hot, borated water dist[4] = 0.154; // RPV dist[5] = 0.064; // Lower, radial reflector dist[6] = 0.066; // Upper reflector / top plate dist[7] = 0.055; // bottom plate dist[8] = 0.008; // bottom nozzle dist[9] = 0.015; // top nozzle dist[10] = 0.025; // top of fuel assemblies dist[11] = 0.013; // bottom of fuel assemblies double roll = LCG_random_double(seed); // makes a pick based on the distro for( int i = 0; i < 12; i++ ) { double running = 0; for( int j = i; j > 0; j-- ) running += dist[j]; if( roll < running ) return i; } return 0; } __host__ __device__ double LCG_random_double(uint64_t * seed) { // LCG parameters const uint64_t m = 9223372036854775808ULL; // 2^63 const uint64_t a = 2806196910506780709ULL; const uint64_t c = 1ULL; *seed = (a * (*seed) + c) % m; return (double) (*seed) / (double) m; } __host__ __device__ uint64_t fast_forward_LCG(uint64_t seed, uint64_t n) { // LCG parameters const uint64_t m = 9223372036854775808ULL; // 2^63 uint64_t a = 2806196910506780709ULL; uint64_t c = 1ULL; n = n % m; uint64_t a_new = 1; uint64_t c_new = 0; while(n > 0) { if(n & 1) { a_new *= a; c_new = c_new * a + c; } c *= (a + 1); a *= a; n >>= 1; } return (a_new * seed + c_new) % m; }
the_stack
#include "miner.h" #include "cuda_helper.h" #include "cuda_vectors.h" #include "streebog_arrays.cuh" __constant__ static uint2 keccak_round_constants[24] = { { 0x00000001, 0x00000000 }, { 0x00008082, 0x00000000 }, { 0x0000808a, 0x80000000 }, { 0x80008000, 0x80000000 }, { 0x0000808b, 0x00000000 }, { 0x80000001, 0x00000000 }, { 0x80008081, 0x80000000 }, { 0x00008009, 0x80000000 }, { 0x0000008a, 0x00000000 }, { 0x00000088, 0x00000000 }, { 0x80008009, 0x00000000 }, { 0x8000000a, 0x00000000 }, { 0x8000808b, 0x00000000 }, { 0x0000008b, 0x80000000 }, { 0x00008089, 0x80000000 }, { 0x00008003, 0x80000000 }, { 0x00008002, 0x80000000 }, { 0x00000080, 0x80000000 }, { 0x0000800a, 0x00000000 }, { 0x8000000a, 0x80000000 }, { 0x80008081, 0x80000000 }, { 0x00008080, 0x80000000 }, { 0x80000001, 0x00000000 }, { 0x80008008, 0x80000000 } }; __device__ __forceinline__ static void GOST_FS(const uint2 shared[8][256],const uint2 *const __restrict__ state,uint2* return_state){ return_state[0] = __ldg(&T02[__byte_perm(state[7].x,0,0x44440)]) ^ __ldg(&T12[__byte_perm(state[6].x,0,0x44440)]) ^ shared[2][__byte_perm(state[5].x,0,0x44440)] ^ shared[3][__byte_perm(state[4].x,0,0x44440)] ^ shared[4][__byte_perm(state[3].x,0,0x44440)] ^ shared[5][__byte_perm(state[2].x,0,0x44440)] ^ __ldg(&T72[__byte_perm(state[0].x,0,0x44440)]) ^ shared[6][__byte_perm(state[1].x,0,0x44440)]; return_state[1] = __ldg(&T02[__byte_perm(state[7].x,0,0x44441)]) ^ shared[2][__byte_perm(state[5].x,0,0x44441)] ^ shared[3][__byte_perm(state[4].x,0,0x44441)] ^ __ldg(&T12[__byte_perm(state[6].x,0,0x44441)]) ^ shared[4][__byte_perm(state[3].x,0,0x44441)] ^ shared[5][__byte_perm(state[2].x,0,0x44441)] ^ __ldg(&T72[__byte_perm(state[0].x,0,0x44441)]) ^ shared[6][__byte_perm(state[1].x,0,0x44441)]; return_state[2] = __ldg(&T02[__byte_perm(state[7].x,0,0x44442)]) ^ __ldg(&T12[__byte_perm(state[6].x,0,0x44442)]) ^ shared[2][__byte_perm(state[5].x,0,0x44442)] ^ shared[3][__byte_perm(state[4].x,0,0x44442)] ^ shared[4][__byte_perm(state[3].x,0,0x44442)] ^ shared[5][__byte_perm(state[2].x,0,0x44442)] ^ __ldg(&T72[__byte_perm(state[0].x,0,0x44442)]) ^ shared[6][__byte_perm(state[1].x,0,0x44442)]; return_state[3] = __ldg(&T02[__byte_perm(state[7].x,0,0x44443)]) ^ shared[1][__byte_perm(state[6].x,0,0x44443)] ^ shared[2][__byte_perm(state[5].x,0,0x44443)] ^ shared[3][__byte_perm(state[4].x,0,0x44443)] ^ __ldg(&T42[__byte_perm(state[3].x,0,0x44443)]) ^ shared[5][__byte_perm(state[2].x,0,0x44443)] ^ __ldg(&T72[__byte_perm(state[0].x,0,0x44443)]) ^ shared[6][__byte_perm(state[1].x,0,0x44443)]; return_state[4] = __ldg(&T02[__byte_perm(state[7].y,0,0x44440)]) ^ shared[1][__byte_perm(state[6].y,0,0x44440)] ^ __ldg(&T22[__byte_perm(state[5].y,0,0x44440)]) ^ shared[3][__byte_perm(state[4].y,0,0x44440)] ^ shared[4][__byte_perm(state[3].y,0,0x44440)] ^ __ldg(&T62[__byte_perm(state[1].y,0,0x44440)]) ^ shared[5][__byte_perm(state[2].y,0,0x44440)] ^ __ldg(&T72[__byte_perm(state[0].y,0,0x44440)]); return_state[5] = __ldg(&T02[__byte_perm(state[7].y,0,0x44441)]) ^ shared[2][__byte_perm(state[5].y,0,0x44441)] ^ __ldg(&T12[__byte_perm(state[6].y,0,0x44441)]) ^ shared[3][__byte_perm(state[4].y,0,0x44441)] ^ shared[4][__byte_perm(state[3].y,0,0x44441)] ^ shared[5][__byte_perm(state[2].y,0,0x44441)] ^ __ldg(&T62[__byte_perm(state[1].y,0,0x44441)]) ^ __ldg(&T72[__byte_perm(state[0].y,0,0x44441)]); return_state[6] = __ldg(&T02[__byte_perm(state[7].y,0,0x44442)]) ^ shared[1][__byte_perm(state[6].y,0,0x44442)] ^ shared[2][__byte_perm(state[5].y,0,0x44442)] ^ shared[3][__byte_perm(state[4].y,0,0x44442)] ^ shared[4][__byte_perm(state[3].y,0,0x44442)] ^ shared[5][__byte_perm(state[2].y,0,0x44442)] ^ __ldg(&T62[__byte_perm(state[1].y,0,0x44442)]) ^ __ldg(&T72[__byte_perm(state[0].y,0,0x44442)]); return_state[7] = __ldg(&T02[__byte_perm(state[7].y,0,0x44443)]) ^ __ldg(&T12[__byte_perm(state[6].y,0,0x44443)]) ^ shared[2][__byte_perm(state[5].y,0,0x44443)] ^ shared[3][__byte_perm(state[4].y,0,0x44443)] ^ shared[4][__byte_perm(state[3].y,0,0x44443)] ^ shared[5][__byte_perm(state[2].y,0,0x44443)] ^ __ldg(&T62[__byte_perm(state[1].y,0,0x44443)]) ^ __ldg(&T72[__byte_perm(state[0].y,0,0x44443)]); } __device__ __forceinline__ static void GOST_FS_LDG(const uint2 shared[8][256],const uint2 *const __restrict__ state,uint2* return_state){ return_state[0] = __ldg(&T02[__byte_perm(state[7].x,0,0x44440)]) ^ __ldg(&T12[__byte_perm(state[6].x,0,0x44440)]) ^ shared[2][__byte_perm(state[5].x,0,0x44440)] ^ shared[3][__byte_perm(state[4].x,0,0x44440)] ^ shared[4][__byte_perm(state[3].x,0,0x44440)] ^ shared[5][__byte_perm(state[2].x,0,0x44440)] ^ shared[6][__byte_perm(state[1].x,0,0x44440)] ^ __ldg(&T72[__byte_perm(state[0].x,0,0x44440)]); return_state[1] = __ldg(&T02[__byte_perm(state[7].x,0,0x44441)]) ^ shared[1][__byte_perm(state[6].x,0,0x44441)] ^ shared[2][__byte_perm(state[5].x,0,0x44441)] ^ shared[3][__byte_perm(state[4].x,0,0x44441)] ^ shared[4][__byte_perm(state[3].x,0,0x44441)] ^ shared[5][__byte_perm(state[2].x,0,0x44441)] ^ shared[6][__byte_perm(state[1].x,0,0x44441)] ^ __ldg(&T72[__byte_perm(state[0].x,0,0x44441)]); return_state[2] = __ldg(&T02[__byte_perm(state[7].x,0,0x44442)]) ^ __ldg(&T12[__byte_perm(state[6].x,0,0x44442)]) ^ shared[2][__byte_perm(state[5].x,0,0x44442)] ^ shared[3][__byte_perm(state[4].x,0,0x44442)] ^ shared[4][__byte_perm(state[3].x,0,0x44442)] ^ shared[5][__byte_perm(state[2].x,0,0x44442)] ^ shared[6][__byte_perm(state[1].x,0,0x44442)] ^ __ldg(&T72[__byte_perm(state[0].x,0,0x44442)]); return_state[3] = __ldg(&T02[__byte_perm(state[7].x,0,0x44443)]) ^ __ldg(&T12[__byte_perm(state[6].x,0,0x44443)]) ^ shared[2][__byte_perm(state[5].x,0,0x44443)] ^ shared[3][__byte_perm(state[4].x,0,0x44443)] ^ shared[4][__byte_perm(state[3].x,0,0x44443)] ^ shared[5][__byte_perm(state[2].x,0,0x44443)] ^ __ldg(&T62[__byte_perm(state[1].x,0,0x44443)]) ^ __ldg(&T72[__byte_perm(state[0].x,0,0x44443)]); return_state[4] = __ldg(&T02[__byte_perm(state[7].y,0,0x44440)]) ^ shared[1][__byte_perm(state[6].y,0,0x44440)] ^ __ldg(&T22[__byte_perm(state[5].y,0,0x44440)]) ^ shared[3][__byte_perm(state[4].y,0,0x44440)] ^ shared[4][__byte_perm(state[3].y,0,0x44440)] ^ shared[5][__byte_perm(state[2].y,0,0x44440)] ^ __ldg(&T62[__byte_perm(state[1].y,0,0x44440)]) ^ __ldg(&T72[__byte_perm(state[0].y,0,0x44440)]); return_state[5] = __ldg(&T02[__byte_perm(state[7].y,0,0x44441)]) ^ __ldg(&T12[__byte_perm(state[6].y,0,0x44441)]) ^ shared[2][__byte_perm(state[5].y,0,0x44441)] ^ shared[3][__byte_perm(state[4].y,0,0x44441)] ^ shared[4][__byte_perm(state[3].y,0,0x44441)] ^ shared[5][__byte_perm(state[2].y,0,0x44441)] ^ __ldg(&T62[__byte_perm(state[1].y,0,0x44441)]) ^ __ldg(&T72[__byte_perm(state[0].y,0,0x44441)]); return_state[6] = __ldg(&T02[__byte_perm(state[7].y,0,0x44442)]) ^ __ldg(&T12[__byte_perm(state[6].y,0,0x44442)]) ^ shared[2][__byte_perm(state[5].y,0,0x44442)] ^ shared[3][__byte_perm(state[4].y,0,0x44442)] ^ shared[4][__byte_perm(state[3].y,0,0x44442)] ^ shared[5][__byte_perm(state[2].y,0,0x44442)] ^ __ldg(&T62[__byte_perm(state[1].y,0,0x44442)]) ^ __ldg(&T72[__byte_perm(state[0].y,0,0x44442)]); return_state[7] = __ldg(&T02[__byte_perm(state[7].y,0,0x44443)]) ^ shared[1][__byte_perm(state[6].y,0,0x44443)] ^ __ldg(&T22[__byte_perm(state[5].y,0,0x44443)]) ^ shared[3][__byte_perm(state[4].y,0,0x44443)] ^ shared[4][__byte_perm(state[3].y,0,0x44443)] ^ shared[5][__byte_perm(state[2].y,0,0x44443)] ^ __ldg(&T62[__byte_perm(state[1].y,0,0x44443)]) ^ __ldg(&T72[__byte_perm(state[0].y,0,0x44443)]); } __device__ __forceinline__ static void GOST_E12(const uint2 shared[8][256],uint2 *const __restrict__ K, uint2 *const __restrict__ state){ uint2 t[ 8]; // #pragma unroll 2 for(int i=0; i<12; i++){ GOST_FS(shared,state, t); #pragma unroll 8 for(int j=0;j<8;j++) K[ j] ^= *(uint2*)&CC[i][j]; #pragma unroll 8 for(int j=0;j<8;j++) state[ j] = t[ j]; GOST_FS_LDG(shared,K, t); #pragma unroll 8 for(int j=0;j<8;j++) state[ j]^= t[ j]; #pragma unroll 8 for(int j=0;j<8;j++) K[ j] = t[ j]; } } __device__ __forceinline__ static void keccak_kernel(uint2* s){ uint2 u[5],t[5], v, w; /*theta*/ t[ 0] = vectorize(devectorize(s[ 0])^devectorize(s[ 5])); t[ 1] = vectorize(devectorize(s[ 1])^devectorize(s[ 6])); t[ 2] = vectorize(devectorize(s[ 2])^devectorize(s[ 7])); t[ 3] = vectorize(devectorize(s[ 3])^devectorize(s[ 8])); t[ 4] = s[4]; /*theta*/ #pragma unroll 5 for(int j=0;j<5;j++){ u[ j] = ROL2(t[ j], 1); } s[ 4] = xor3x(s[ 4], t[3], u[ 0]); s[24] = s[19] = s[14] = s[ 9] = t[ 3] ^ u[ 0]; s[ 0] = xor3x(s[ 0], t[4], u[ 1]); s[ 5] = xor3x(s[ 5], t[4], u[ 1]); s[20] = s[15] = s[10] = t[4] ^ u[ 1]; s[ 1] = xor3x(s[ 1], t[0], u[ 2]); s[ 6] = xor3x(s[ 6], t[0], u[ 2]); s[21] = s[16] = s[11] = t[0] ^ u[ 2]; s[ 2] = xor3x(s[ 2], t[1], u[ 3]); s[ 7] = xor3x(s[ 7], t[1], u[ 3]); s[22] = s[17] = s[12] = t[1] ^ u[ 3]; s[ 3] = xor3x(s[ 3], t[2], u[ 4]);s[ 8] = xor3x(s[ 8], t[2], u[ 4]); s[23] = s[18] = s[13] = t[2] ^ u[ 4]; /* rho pi: b[..] = rotl(a[..], ..) */ v = s[1]; s[1] = ROL2(s[6], 44); s[6] = ROL2(s[9], 20); s[9] = ROL2(s[22], 61); s[22] = ROL2(s[14], 39); s[14] = ROL2(s[20], 18); s[20] = ROL2(s[2], 62); s[2] = ROL2(s[12], 43); s[12] = ROL2(s[13], 25); s[13] = ROL8(s[19]); s[19] = ROR8(s[23]); s[23] = ROL2(s[15], 41); s[15] = ROL2(s[4], 27); s[4] = ROL2(s[24], 14); s[24] = ROL2(s[21], 2); s[21] = ROL2(s[8], 55); s[8] = ROL2(s[16], 45); s[16] = ROL2(s[5], 36); s[5] = ROL2(s[3], 28); s[3] = ROL2(s[18], 21); s[18] = ROL2(s[17], 15); s[17] = ROL2(s[11], 10); s[11] = ROL2(s[7], 6); s[7] = ROL2(s[10], 3); s[10] = ROL2(v, 1); /* chi: a[i,j] ^= ~b[i,j+1] & b[i,j+2] */ #pragma unroll 5 for(int j=0;j<25;j+=5){ v=s[j];w=s[j + 1];s[j] = chi(v,w,s[j+2]);s[j+1] = chi(w,s[j+2],s[j+3]);s[j+2]=chi(s[j+2],s[j+3],s[j+4]);s[j+3]=chi(s[j+3],s[j+4],v);s[j+4]=chi(s[j+4],v,w); } /* iota: a[0,0] ^= round constant */ s[0] ^= keccak_round_constants[ 0]; for (int i = 1; i < 23; i++) { /*theta*/ #pragma unroll 5 for(int j=0;j<5;j++){ t[ j] = vectorize(xor5(devectorize(s[ j]),devectorize(s[j+5]),devectorize(s[j+10]),devectorize(s[j+15]),devectorize(s[j+20]))); } /*theta*/ #pragma unroll 5 for(int j=0;j<5;j++){ u[ j] = ROL2(t[ j], 1); } s[ 4] = xor3x(s[ 4], t[3], u[ 0]);s[ 9] = xor3x(s[ 9], t[3], u[ 0]);s[14] = xor3x(s[14], t[3], u[ 0]);s[19] = xor3x(s[19], t[3], u[ 0]);s[24] = xor3x(s[24], t[3], u[ 0]); s[ 0] = xor3x(s[ 0], t[4], u[ 1]);s[ 5] = xor3x(s[ 5], t[4], u[ 1]);s[10] = xor3x(s[10], t[4], u[ 1]);s[15] = xor3x(s[15], t[4], u[ 1]);s[20] = xor3x(s[20], t[4], u[ 1]); s[ 1] = xor3x(s[ 1], t[0], u[ 2]);s[ 6] = xor3x(s[ 6], t[0], u[ 2]);s[11] = xor3x(s[11], t[0], u[ 2]);s[16] = xor3x(s[16], t[0], u[ 2]);s[21] = xor3x(s[21], t[0], u[ 2]); s[ 2] = xor3x(s[ 2], t[1], u[ 3]);s[ 7] = xor3x(s[ 7], t[1], u[ 3]);s[12] = xor3x(s[12], t[1], u[ 3]);s[17] = xor3x(s[17], t[1], u[ 3]);s[22] = xor3x(s[22], t[1], u[ 3]); s[ 3] = xor3x(s[ 3], t[2], u[ 4]);s[ 8] = xor3x(s[ 8], t[2], u[ 4]);s[13] = xor3x(s[13], t[2], u[ 4]);s[18] = xor3x(s[18], t[2], u[ 4]);s[23] = xor3x(s[23], t[2], u[ 4]); /* rho pi: b[..] = rotl(a[..], ..) */ v = s[1]; s[1] = ROL2(s[6], 44); s[6] = ROL2(s[9], 20); s[9] = ROL2(s[22], 61); s[22] = ROL2(s[14], 39); s[14] = ROL2(s[20], 18); s[20] = ROL2(s[2], 62); s[2] = ROL2(s[12], 43); s[12] = ROL2(s[13], 25); s[13] = ROL8(s[19]); s[19] = ROR8(s[23]); s[23] = ROL2(s[15], 41); s[15] = ROL2(s[4], 27); s[4] = ROL2(s[24], 14); s[24] = ROL2(s[21], 2); s[21] = ROL2(s[8], 55); s[8] = ROL2(s[16], 45); s[16] = ROL2(s[5], 36); s[5] = ROL2(s[3], 28); s[3] = ROL2(s[18], 21); s[18] = ROL2(s[17], 15); s[17] = ROL2(s[11], 10); s[11] = ROL2(s[7], 6); s[7] = ROL2(s[10], 3); s[10] = ROL2(v, 1); /* chi: a[i,j] ^= ~b[i,j+1] & b[i,j+2] */ #pragma unroll 5 for(int j=0;j<25;j+=5){ v=s[j];w=s[j + 1];s[j] = chi(v,w,s[j+2]);s[j+1] = chi(w,s[j+2],s[j+3]);s[j+2]=chi(s[j+2],s[j+3],s[j+4]);s[j+3]=chi(s[j+3],s[j+4],v);s[j+4]=chi(s[j+4],v,w); } /* iota: a[0,0] ^= round constant */ s[0] ^= keccak_round_constants[i]; } //theta #pragma unroll 5 for(int j=0;j<5;j++){ t[ j] = xor3x(xor3x(s[j+0],s[j+5],s[j+10]),s[j+15],s[j+20]); } //theta #pragma unroll 5 for(int j=0;j<5;j++){ u[ j] = ROL2(t[ j], 1); } s[ 9] = xor3x(s[ 9], t[3], u[ 0]); s[24] = xor3x(s[24], t[3], u[ 0]); s[ 0] = xor3x(s[ 0], t[4], u[ 1]); s[10] = xor3x(s[10], t[4], u[ 1]); s[ 6] = xor3x(s[ 6], t[0], u[ 2]); s[16] = xor3x(s[16], t[0], u[ 2]); s[12] = xor3x(s[12], t[1], u[ 3]); s[22] = xor3x(s[22], t[1], u[ 3]); s[ 3] = xor3x(s[ 3], t[2], u[ 4]); s[18] = xor3x(s[18], t[2], u[ 4]); // rho pi: b[..] = rotl(a[..], ..) s[ 1] = ROL2(s[ 6], 44); s[ 2] = ROL2(s[12], 43); s[ 5] = ROL2(s[ 3], 28); s[ 7] = ROL2(s[10], 3); s[ 3] = ROL2(s[18], 21); s[ 4] = ROL2(s[24], 14); s[ 6] = ROL2(s[ 9], 20); s[ 8] = ROL2(s[16], 45); s[ 9] = ROL2(s[22], 61); // chi: a[i,j] ^= ~b[i,j+1] & b[i,j+2] v=s[ 0];w=s[ 1];s[ 0] = chi(v,w,s[ 2]);s[ 1] = chi(w,s[ 2],s[ 3]);s[ 2]=chi(s[ 2],s[ 3],s[ 4]);s[ 3]=chi(s[ 3],s[ 4],v);s[ 4]=chi(s[ 4],v,w); v=s[ 5];w=s[ 6];s[ 5] = chi(v,w,s[ 7]);s[ 6] = chi(w,s[ 7],s[ 8]);s[ 7]=chi(s[ 7],s[ 8],s[ 9]); // iota: a[0,0] ^= round constant s[0] ^= keccak_round_constants[23]; } __device__ __forceinline__ static void streebog_kernel(const uint2 shared[8][256],uint2* s){ uint2 buf[8], t[8], temp[8],K0[8]; K0[0] = vectorize(0x74a5d4ce2efc83b3); #pragma unroll 8 for(int i=0;i<8;i++){ buf[ i] = K0[ 0] ^ s[ i]; } // #pragma unroll 11 for(int i=0; i<12; i++){ GOST_FS(shared,buf, temp); #pragma unroll 8 for(int j=0;j<8;j++){ buf[ j] = temp[ j] ^ *(uint2*)&precomputed_values[i][j]; } } #pragma unroll 8 for(int j=0;j<8;j++){ buf[ j]^= s[ j]; } #pragma unroll 8 for(int j=0;j<8;j++){ K0[ j] = buf[ j]; } K0[7].y ^= 0x00020000; GOST_FS(shared,K0, t); #pragma unroll 8 for(int i=0;i<8;i++) K0[ i] = t[ i]; t[7].y ^= 0x01000000; GOST_E12(shared,K0, t); #pragma unroll 8 for(int j=0;j<8;j++) buf[ j] ^= t[ j]; buf[7].y ^= 0x01000000; GOST_FS(shared,buf,K0); buf[7].y ^= 0x00020000; #pragma unroll 8 for(int j=0;j<8;j++) t[ j] = K0[ j]; t[7].y ^= 0x00020000; GOST_E12(shared,K0, t); #pragma unroll 8 for(int j=0;j<8;j++) buf[ j] ^= t[ j]; GOST_FS(shared,buf,K0); // K = F(h) s[7]+= vectorize(0x0100000000000000); #pragma unroll 8 for(int j=0;j<8;j++) t[ j] = K0[ j] ^ s[ j]; GOST_E12(shared,K0, t); #pragma unroll 8 for(int i=0;i<8;i++) s[i] = s[ i] ^ buf[ i] ^ t[ i]; } #define TPB 256 __global__ __launch_bounds__(TPB,3) void keccak_streebog_gpu_hash_64(const uint32_t threads,uint64_t *g_hash) { const uint32_t thread = (blockDim.x * blockIdx.x + threadIdx.x); uint2 s[25]; __shared__ uint2 shared[8][256]; shared[0][threadIdx.x] = __ldg(&T02[threadIdx.x]); shared[1][threadIdx.x] = __ldg(&T12[threadIdx.x]); shared[2][threadIdx.x] = __ldg(&T22[threadIdx.x]); shared[3][threadIdx.x] = __ldg(&T32[threadIdx.x]); shared[4][threadIdx.x] = __ldg(&T42[threadIdx.x]); shared[5][threadIdx.x] = __ldg(&T52[threadIdx.x]); shared[6][threadIdx.x] = __ldg(&T62[threadIdx.x]); shared[7][threadIdx.x] = __ldg(&T72[threadIdx.x]); // shared[threadIdx.x] = __ldg(&T02[threadIdx.x]); // shared[256+threadIdx.x] = __ldg(&T12[threadIdx.x]); // shared[512+threadIdx.x] = __ldg(&T22[threadIdx.x]); // shared[768+threadIdx.x] = __ldg(&T32[threadIdx.x]); // shared[1024+threadIdx.x] = __ldg(&T42[threadIdx.x]); // shared[1280+threadIdx.x] = T52[threadIdx.x]; // shared[1536+threadIdx.x] = T62[threadIdx.x]; // shared[1792+threadIdx.x] = T72[threadIdx.x]; uint64_t* inout = &g_hash[thread<<3]; __threadfence_block(); *(uint2x4*)&s[ 0] = __ldg4((uint2x4 *)&inout[ 0]); *(uint2x4*)&s[ 4] = __ldg4((uint2x4 *)&inout[ 4]); s[8] = make_uint2(1,0x80000000); keccak_kernel(s); streebog_kernel(shared,s); *(uint2x4*)&inout[ 0] = *(uint2x4*)&s[ 0]; *(uint2x4*)&inout[ 4] = *(uint2x4*)&s[ 4]; } __host__ void keccak_streebog_cpu_hash_64(int thr_id, uint32_t threads, uint32_t *d_hash) { dim3 grid((threads + TPB-1) / TPB); dim3 block(TPB); keccak_streebog_gpu_hash_64<<<grid, block>>>(threads,(uint64_t*)d_hash); }
the_stack
* COMPILATION TIP * nvcc main_draft1.cu ../grid2d/grid2d.cu ../grid2d/sysparam.cu ../dynam/XORMRGgens.cu ../common/gridsetup.cu -o main * * */ #include "../grid2d/grid2d.h" // Spins2d (struct) #include "../grid2d/sysparam.h" // Sysparam, Avg, TransProb, Sysparam_ptr, Avg_ptr, TransProb_ptr, constTransProb #include "../dynam/XORMRGgens.h" // devStatesXOR, devStatesMRG, devStatesPhilox4_32_10_t #include "../common/gridsetup.h" // get_maxGridSize() //#include "../boundary/boundary.h" // periodic boundary conditions as inline __device__ function periodic #include <algorithm> // std::min #include <cooperative_groups.h> // this should go with metropolis.h, initialize_allup_kernel #include <iostream> #include <chrono> namespace cg = cooperative_groups; // this should go with metropolis.h, initialize_allup_kernel /** @fn init_allup_partialsumM * @brief initialize spins all up and calculate partial sums for magnetization M * @details 1st part of initialize_allup_kernel, 2nd. part is block_sumM * */ __device__ int init_allup_partialsumM(int* Sptr,size_t Lx,size_t Ly) { int sum=0; // partial sum of the magnetization M // global thread index, k_x = 0,1,...N_x*M_x, k_y = 0,1,...N_y*M_y unsigned int k_x = threadIdx.x + blockDim.x * blockIdx.x ; unsigned int k_y = threadIdx.y + blockDim.y * blockIdx.y ; unsigned int k = k_x + gridDim.x * blockDim.x * k_y; for (unsigned int idx = k; idx < Lx*Ly/4; idx+= blockDim.x * gridDim.x * blockDim.y * gridDim.y ) { reinterpret_cast<int4*>(Sptr)[idx] = {1,1,1,1} ; int4 s4 = ((int4*) Sptr)[idx]; sum += s4.x + s4.y + s4.z + s4.w; } // process remaining elements for (unsigned int idx = k + Lx*Ly/4 *4; idx < Lx*Ly; idx += 4) { Sptr[idx] = 1; sum += Sptr[idx]; } return sum; }; /** @fn blocksumM * @brief reduce sum on thread block of partial sums of spins for magnetization M * @details 2nd. part of initialize_allup_kernel, 1st. part is init_allup_partialsumM * */ __device__ int block_sumM(cg::thread_group tg, int* temp, int sumresult) { unsigned int lane = tg.thread_rank(); // Each iteration halves number of active threads // Each thread adds to partial sum[i] its sum[lane+i] for (unsigned int idx = tg.size()/2; idx >0; idx/=2) { // load the array values with this thread block into temp temp[lane] = sumresult; tg.sync(); // wait for all threads to store into temp if (lane<idx) { sumresult += temp[lane+idx]; } tg.sync(); // wait for all threads to load } return sumresult; // note: only thread 0 will return full sum }; /** @fn calcE * @brief computes E, a summation of all unique nearest neighbor pairs of spins * @details do summation in shared memory, that include halo cells of width 1 "to the right" * */ __device__ int calcE(cg::thread_group & tg, int* Sptr, int* temp, size_t Lx, size_t Ly, const float J) { int resultE =0; const int RAD = 1; // "radius" of "halo" cells, of width 1 (in this case) // old way of thread, block indexing unsigned int k_x = threadIdx.x + blockDim.x * blockIdx.x ; unsigned int k_y = threadIdx.y + blockDim.y * blockIdx.y ; unsigned int S_x = static_cast<int>(blockDim.x + RAD); unsigned int S_y = static_cast<int>(blockDim.y + RAD); unsigned int s_x = threadIdx.x + RAD; // s_x = 1,2,...S_x-1 unsigned int s_y = threadIdx.y + RAD; // s_y = 1,2,...S_y-1 // unsigned int k = k_x + gridDim.x * blockDim.x * k_y; // what's the point of making modular via cg::thread_group // dim3 tgtidx = tg.thread_index(); // error class "cooperative_groups::__v1::thread_group" has no member "thread_index" // use these loops to account for elements not "covered" by the threads in grid that's launched for (unsigned int l_y=k_y,idxy=0; l_y < Ly; idxy++, l_y += blockDim.y *gridDim.y) { for (unsigned int l_x=k_x, idxx=0; l_x < Lx; idxx++, l_x += blockDim.x*gridDim.x ) { int lx =0; // lx gives back global index on lattice grid of spins int ly =0; // ly gives back global index on lattice grid of spins /* 0, M_x * 1 * ... * M_x-1 * */ for (int i = threadIdx.x; i<S_x; i+=static_cast<int>(blockDim.x) ) { for (int j = threadIdx.y; j <S_y; j+= static_cast<int>(blockDim.y) ) { lx = i + static_cast<int>(blockDim.x*blockIdx.x); ly = j + static_cast<int>(blockDim.y*blockIdx.y); /* lx+idxx*gridDim.x*blockDim.x, idxx=0,1,.. to how many multiples of gridDim.x*blockDim.x for * multiples of thread grids to "cover" our lattice grid of spins. * (lx+idxx*gridDim.x*blockDim.x)%Lx because we want periodic boundary conditions * I try to future proof this by using inline function periodic * */ temp[i+j*S_x] = static_cast<float>( Sptr[ periodic((lx+idxx*gridDim.x*blockDim.x),Lx) + blockDim.x * gridDim.x * periodic((ly + idxy*gridDim.y*blockDim.y),Ly) ] ); } } if ( l_x >= Lx || l_y >= Ly) { return resultE; } tg.sync(); // do the nearest neighbor (unique) pair of spins summation entirely in shared memory int stencilindex_x = 0; // stencil index in x-direction int stencilindex_y = 0; // stencil index in y-direction stencilindex_x = s_x - RAD; // = 0,1,...S_x-2 = (M_x+1)-2 = M_x -1 stencilindex_y = s_y - RAD; // actual calculation of E resultE += (-1.f * J) * temp[ stencilindex_x + stencilindex_y * S_x] * (temp[ stencilindex_x + 1 + stencilindex_y * S_x] + temp[ stencilindex_x + (stencilindex_y + 1)*S_x] ); } } // END of loops to make threads do "double duty" to cover other elements in our spin lattice grid that wasn't "covered" by our thread grid return resultE; } __global__ void initialize_allup_kernel(int* Sptr, Sysparam* sysparams, size_t Lx, size_t Ly, const float J) { // global thread index, k_x = 0,1,...N_x*M_x, k_y = 0,1,...N_y*M_y /* unsigned int k_x = threadIdx.x + blockDim.x * blockIdx.x ; unsigned int k_y = threadIdx.y + blockDim.y * blockIdx.y ; unsigned int k = k_x + gridDim.x * blockDim.x * k_y; for (unsigned int idx = k; idx < Lx*Ly/4; idx+= blockDim.x * gridDim.x * blockDim.y * gridDim.y ) { reinterpret_cast<int4*>(Sptr)[idx] = {1,1,1,1} ; } // process remaining elements for (unsigned int idx = k + Lx*Ly/4 *4; idx < Lx*Ly; idx += 4) { Sptr[idx] = 1; } */ // partial sum of spins for magnetization M int sum4M = init_allup_partialsumM( Sptr, Lx,Ly); extern __shared__ int temp[]; auto ttb = cg::this_thread_block(); int block_sum = block_sumM(ttb, temp, sum4M) ; if (ttb.thread_rank() == 0) { atomicAdd(&(sysparams->M), ((float) block_sum)); } int threadsumE = calcE(ttb, Sptr, temp, Lx,Ly,J); // for this thread, here's its partial sum contribution to total energy E atomicAdd(&(sysparams->E), ((float) threadsumE) ); }; /** * @fn initialize_allup * @brief "driver" function to initialize energy, spin matrix, and magnetization * */ void initialize_allup(Spins2d& spins2d, Sysparam_ptr& sysParams,const std::array<int,3> MAXGRIDSIZES,const dim3 M_is={32,32}) { // size_t L = spins2d.L; // total number of spins of system size_t Lx = spins2d.L_is[0]; // total number of spins of system size_t Ly = spins2d.L_is[1]; // total number of spins of system const float J = spins2d.J; unsigned int RAD = 1; // "radius" or width of "halo" cells needed /* ========== (thread) grid,block dims ========== */ unsigned long MAX_BLOCKS_y = (MAXGRIDSIZES[1] + M_is.y - 1)/ M_is.y; // notice how we're only launching 1/4 of Ly threads in y-direction needed unsigned int N_y = std::min( MAX_BLOCKS_y, ((Ly/4 + M_is.y - 1)/ M_is.y)); unsigned long MAX_BLOCKS_x = (MAXGRIDSIZES[0] + M_is.x - 1)/ M_is.x; // notice how we're only launching 1/4 of Lx threads in x-direction needed unsigned int N_x = std::min( MAX_BLOCKS_x, ((Lx/4 + M_is.x - 1)/ M_is.x)); dim3 N_is { N_x,N_y }; // single (thread) block dims., i.e. number of threads in a single (thread) block int sharedBytes = (M_is.x+RAD)*(M_is.y + RAD)* sizeof(int); /* ========== END of (thread) grid,block dims ========== */ initialize_allup_kernel<<<N_is,M_is, sharedBytes>>>(spins2d.S.get(),sysParams.d_sysparams.get(),Lx,Ly,J); }; // end of function initialize_allup int main(int argc, char* argv[]) { constexpr const float initial_temp = 1.f; // typically 1. constexpr const float final_temp = 3.f; // typically 3. constexpr const float tempstep = 0.05f; // typically 0.05 // number of spins, related to 2-dim. grid size Lx x Ly std::array<size_t, 2> L_is { 1<<9, 1<<9 }; // 1<<10 = 1024 std::array<size_t, 2> L_is_small { 4, 4 }; Spins2d spins = {L_is}; Spins2d spins_small = {L_is_small}; std::cout << " L : " << spins.L_is[0]*spins.L_is[1] << std::endl; Sysparam_ptr sysparams_ptr = { initial_temp } ; TransProb_ptr transprob_ptr = { initial_temp , 1.f } ; Avg_ptr avgs_ptr; Sysparam_ptr sysparams_ptr_small = { initial_temp } ; Avg_ptr avgs_ptr_small; /* ***** (thread) grid,block dims ***** */ /* min of N_x, number of (thread) blocks on grid in x-direction, and MAX_BLOCKS allowed is * determined here */ size_t MAXGRIDSIZE = get_maxGridSize(); auto MAXGRIDSIZES = get_maxGridSizes(); std::cout << " MAXGRIDSIZE : " << MAXGRIDSIZE << std::endl; // (thread) block dims., remember max. no. threads per block is 1024, as of compute capability 5.2 dim3 M_is { 1<<5, 1<<5 }; size_t L = spins.L_is[0]*spins.L_is[1]; // doesn't output correct values for n = 1<<30 unsigned int MAX_BLOCKS_y = (MAXGRIDSIZES[1] + M_is.y - 1)/ M_is.y; // notice how we're only launching 1/4 of L threads // unsigned int N_x = min( MAX_BLOCKS, ((L/4 + M_x - 1)/ M_x)); // int sharedBytes = M_x * sizeof(int); /* ***** END of (thread) grid,block dims ***** */ initialize_allup(spins,sysparams_ptr, MAXGRIDSIZES, M_is); /* sanity check */ Sysparam h_sysparams_out ; cudaMemcpy(&h_sysparams_out, sysparams_ptr.d_sysparams.get(), 1*sizeof(Sysparam), cudaMemcpyDeviceToHost); // possible error have to be of same type std::cout << " h_sysparams_out : " << h_sysparams_out.E << " " << h_sysparams_out.M << " " << h_sysparams_out.T << std::endl; }
the_stack
#include "GReduce.h" #include <pycaUtils.h> #include <mem.h> #include "ReduceKernel.cu" #include <gmem.h> #include <Vec2D.h> #include "CudaUtils.h" // TEST make sure boost isn't included in nvcc code #if defined(BOOST_COMPILER) int bla[-1]; #endif namespace PyCA { template<typename T, typename opers> T accumulate(T* h_temp, size_t N){ T sum = opers::identity(); for (size_t i=0; i< N; ++i) opers::iop(sum, h_temp[i]); return sum; } GReduce::GReduce() { uint size = MAX_NUMBER_REDUCE_BLOCKS * MAX_NUMBER_REDUCE_STREAMS; dmemAlloc(d_temp, size); phmemAlloc(h_temp, size); CudaUtils::CheckCUDAError(__FILE__,__LINE__, "GReduce::GReduce"); } GReduce::~GReduce() { if (d_temp) dmemFree(d_temp); if (h_temp) phmemFree(h_temp); } void getNumBlocksAndThreads(int n, int maxBlocks, int maxThreads, int &blocks, int &threads) { threads = nextPowerOf2(iDivUp(n, 2)); threads = (threads < maxThreads) ? threads : maxThreads; blocks = iDivUp(n, threads * 2); blocks = blocks < maxBlocks ? blocks : maxBlocks; } template <typename T, typename oper> void reduce(uint n, int threads, int blocks, const T *d_i, T *d_o, StreamT stream){ dim3 dimBlock(threads); dim3 dimGrid(blocks); switch (threads) { case 512: reduce_kernel<T, oper, 512><<< dimGrid, dimBlock, 0, stream>>>(d_i, d_o, n); break; case 256: reduce_kernel<T, oper, 256><<< dimGrid, dimBlock, 0, stream>>>(d_i, d_o, n); break; case 128: reduce_kernel<T, oper, 128><<< dimGrid, dimBlock, 0, stream>>>(d_i, d_o, n); break; case 64: reduce_kernel<T, oper, 64><<< dimGrid, dimBlock, 0, stream>>>(d_i, d_o, n); break; case 32: reduce_kernel<T, oper, 32><<< dimGrid, dimBlock, 0, stream>>>(d_i, d_o, n); break; case 16: reduce_kernel<T, oper, 16><<< dimGrid, dimBlock, 0, stream>>>(d_i, d_o, n); break; case 8: reduce_kernel<T, oper, 8><<< dimGrid, dimBlock, 0, stream>>>(d_i, d_o, n); break; case 4: reduce_kernel<T, oper, 4><<< dimGrid, dimBlock, 0, stream>>>(d_i, d_o, n); break; case 2: reduce_kernel<T, oper, 2><<< dimGrid, dimBlock, 0, stream>>>(d_i, d_o, n); break; case 1: reduce_kernel<T, oper, 1><<< dimGrid, dimBlock, 0, stream>>>(d_i, d_o, n); break; } } template <typename T, MATH_OPS op> void GReduce::reduce(T* h_o, const T* d_i, size_t n, bool update, StreamT stream){ int blocks, threads; typedef class MOpers<T, op> oper; getNumBlocksAndThreads(n, MAX_NUMBER_REDUCE_BLOCKS, MAX_NUMBER_REDUCE_THREADS, blocks, threads); PyCA::reduce<T, oper> (n, threads, blocks, d_i, (T*)d_temp, stream); cpyArrayD2H((T*)h_temp, (T*) d_temp, blocks); T s = accumulate<T, oper>((T*)h_temp, blocks); h_o[0] = update ? oper::op(h_o[0], s) : s; } template <typename T, typename oper, typename oper1> inline void reduce(size_t n, int threads, int blocks, const T *d_i, T *d_o, StreamT stream){ dim3 dimBlock(threads, 1, 1); dim3 dimGrid(blocks, 1, 1); int smem = threads * sizeof(T); switch (threads) { case 512: reduce_kernel<T, oper, oper1, 512><<< dimGrid, dimBlock, smem, stream>>>(d_i, d_o, n); break; case 256: reduce_kernel<T, oper, oper1, 256><<< dimGrid, dimBlock, smem, stream>>>(d_i, d_o, n); break; case 128: reduce_kernel<T, oper, oper1, 128><<< dimGrid, dimBlock, smem, stream>>>(d_i, d_o, n); break; case 64: reduce_kernel<T, oper, oper1, 64><<< dimGrid, dimBlock, smem, stream>>>(d_i, d_o, n); break; case 32: reduce_kernel<T, oper, oper1, 32><<< dimGrid, dimBlock, smem, stream>>>(d_i, d_o, n); break; case 16: reduce_kernel<T, oper, oper1, 16><<< dimGrid, dimBlock, smem, stream>>>(d_i, d_o, n); break; case 8: reduce_kernel<T, oper, oper1, 8><<< dimGrid, dimBlock, smem, stream>>>(d_i, d_o, n); break; case 4: reduce_kernel<T, oper, oper1, 4><<< dimGrid, dimBlock, smem, stream>>>(d_i, d_o, n); break; case 2: reduce_kernel<T, oper, oper1, 2><<< dimGrid, dimBlock, smem, stream>>>(d_i, d_o, n); break; case 1: reduce_kernel<T, oper, oper1, 1><<< dimGrid, dimBlock, smem, stream>>>(d_i, d_o, n); break; } } template <typename T, MATH_OPS op, MATH_OPS op1> void GReduce::compReduce(T* h_o, const T* d_i, size_t n, bool update, StreamT stream){ typedef class MOpers<T, op> oper; typedef class MOpers<T, op1> oper1; int blocks, threads; getNumBlocksAndThreads(n, MAX_NUMBER_REDUCE_BLOCKS, MAX_NUMBER_REDUCE_THREADS, blocks, threads); PyCA::reduce<T, oper, oper1>(n, threads, blocks, d_i, (T*) d_temp, stream); cudaMemcpy(h_temp, d_temp, sizeof(T) * blocks, cudaMemcpyDeviceToHost); T s = accumulate<T, oper>((T*)h_temp, blocks); h_o[0] = update ? oper::op(h_o[0], s) : s; } /** * @brief Perform cuda kernel parallel reductin * s = a1 + a2 + a3 + .... + an * @param[in] T Input data type (currently int, float) * oper Binary operation (+, max, min) * oper1 Self data function (square, cude, sqrt, abs) * blockSize Size of block (related to optimize problem) * d_i Input data * n Size of the input * @param[out] array of output redution perform for each block * */ template <typename T, MATH_OPS op, MATH_OPS op1> void GReduce::bireduce(T* h_o, const T* d_i, size_t n, bool update, StreamT stream) { typedef class MOpers<T, op> oper; typedef class MOpers<T, op1> oper1; const uint blockSize = MAX_NUMBER_REDUCE_THREADS; dim3 threads(blockSize); uint nBlocks = fminf(iDivUp(n, 2 * blockSize), MAX_NUMBER_REDUCE_BLOCKS); dim3 grids(nBlocks); T* d_rd0 = (T*) d_temp; T* d_rd1 = d_rd0 + MAX_NUMBER_REDUCE_BLOCKS; T* h_rd0 = (T*) h_temp; T* h_rd1 = h_rd0 + MAX_NUMBER_REDUCE_BLOCKS; biReduce_kernel<T, oper, oper1, blockSize><<<grids, threads, 0, stream>>>(d_rd0, d_rd1, d_i, n); cudaMemcpy(h_rd0, d_rd0, sizeof(T) * (nBlocks + MAX_NUMBER_REDUCE_BLOCKS), cudaMemcpyDeviceToHost); T rd0 = oper::identity(); T rd1 = oper1::identity(); for (uint i=0; i< nBlocks; ++i){ oper::iop(rd0, h_rd0[i]); oper1::iop(rd1, h_rd1[i]); } h_o[0] = (update) ? oper::op(h_o[0], rd0) : rd0; h_o[1] = (update) ? oper1::op(h_o[1], rd1) : rd1; } template <typename T, MATH_OPS op, MATH_OPS op1> void GReduce::product(T* h_o, const T*d_i, const T*d_i1, size_t n, bool update,StreamT stream) { typedef class MOpers<T, op> oper; typedef class MOpers<T, op1> oper1; const uint blockSize = MAX_NUMBER_REDUCE_THREADS; dim3 threads(blockSize); uint nBlocks = fminf(iDivUp(n, 2 * blockSize), MAX_NUMBER_REDUCE_BLOCKS); dim3 grids(nBlocks); reduceProduct_kernel<T, oper, oper1, blockSize><<<grids, threads, 0, stream>>>((T*)d_temp, d_i, d_i1, n); cudaMemcpy(h_temp, d_temp, sizeof(T) * nBlocks, cudaMemcpyDeviceToHost); T s = accumulate<T, oper>((T*)h_temp, nBlocks); h_o[0] = (update) ? oper::op(h_o[0], s) : s; } //////////////////////////////////////////////////////////////////////////////// // Instantiate for implementation //////////////////////////////////////////////////////////////////////////////// template<typename T> void GReduce::Max(T& h_o, const T* d_i, size_t n, bool update, StreamT stream){ reduce<T, MATH_Max>((T*)&h_o, d_i, n, update, stream); } template void GReduce::Max(float& h_o, const float* d_i, size_t n, bool update, StreamT stream); template void GReduce::Max(int& h_o, const int* d_i, size_t n, bool update, StreamT stream); template void GReduce::Max(uint& h_o, const uint* d_i, size_t n, bool update, StreamT stream); //////////////////////////////////////////////////////////////////////////////// template<typename T> void GReduce::Min(T& h_o, const T* d_i, size_t n, bool update, StreamT stream){ reduce<T, MATH_Min>((T*)&h_o, d_i, n, update, stream); } template void GReduce::Min(float& h_o, const float* d_i, size_t n, bool update, StreamT stream); template void GReduce::Min(int& h_o, const int* d_i, size_t n, bool update, StreamT stream); template void GReduce::Min(uint& h_o, const uint* d_i, size_t n, bool update, StreamT stream); //////////////////////////////////////////////////////////////////////////////// template<typename T> void GReduce::Sum(T& h_o, const T* d_i, size_t n, bool update, StreamT stream){ reduce<T, MATH_Add>((T*)&h_o, d_i, n, update, stream); } template void GReduce::Sum(float& h_o, const float* d_i, size_t n, bool update, StreamT stream); template void GReduce::Sum(int& h_o, const int* d_i, size_t n, bool update, StreamT stream); template void GReduce::Sum(uint& h_o, const uint* d_i, size_t n, bool update, StreamT stream); template<typename T> void GReduce::LInf(T& h_o, const T* d_i, size_t n, bool update, StreamT stream){ compReduce<T, MATH_Max, MATH_Abs>((T*)&h_o, d_i, n, update, stream); }; template void GReduce::LInf(float& h_o, const float* d_i, size_t n, bool update, StreamT stream); template void GReduce::LInf(int& h_o, const int* d_i, size_t n, bool update, StreamT stream); template void GReduce::LInf(uint& h_o, const uint* d_i, size_t n, bool update, StreamT stream); template<typename T> void GReduce::L1(T& h_o, const T* d_i, size_t n, bool update, StreamT stream){ compReduce<T, MATH_Add, MATH_Abs>((T*)&h_o, d_i, n, update, stream); }; template void GReduce::L1(float& h_o, const float* d_i, size_t n, bool update, StreamT stream); template void GReduce::L1(int& h_o, const int* d_i, size_t n, bool update, StreamT stream); template void GReduce::L1(uint& h_o, const uint* d_i, size_t n, bool update, StreamT stream); template<typename T> void GReduce::Sum2(T& h_o, const T* d_i, size_t n, bool update, StreamT stream){ compReduce<T, MATH_Add, MATH_Sqr>((T*)&h_o, d_i, n, update, stream); }; template void GReduce::Sum2(float& h_o, const float* d_i, size_t n, bool update, StreamT stream); template void GReduce::Sum2(int& h_o, const int* d_i, size_t n, bool update, StreamT stream); template void GReduce::Sum2(uint& h_o, const uint* d_i, size_t n, bool update, StreamT stream); template<typename T> void GReduce::MaxMin(Vec2D<T>&h_o, const T* d_i, size_t n, bool update, StreamT stream){ bireduce<T, MATH_Max, MATH_Min>((T*)&h_o.x, d_i, n, update, stream); } template void GReduce::MaxMin(Vec2D<float>& h_o, const float* d_i, size_t n, bool update, StreamT stream); template void GReduce::MaxMin(Vec2D<int>& h_o, const int* d_i, size_t n, bool update, StreamT stream); template void GReduce::MaxMin(Vec2D<uint>& h_o, const uint* d_i, size_t n, bool update, StreamT stream); template<typename T> void GReduce::Dot(T& h_o, const T* d_i, const T* d_i1, size_t n, bool update, StreamT stream){ product<T, MATH_Add, MATH_Mul>((T*)&h_o, d_i, d_i1, n, update, stream); } template void GReduce::Dot(float& h_o, const float* d_i, const float* d_i1, size_t n, bool update, StreamT stream); template void GReduce::Dot(int& h_o, const int* d_i, const int* d_i1, size_t n, bool update, StreamT stream); template void GReduce::Dot(uint& h_o, const uint* d_i, const uint* d_i1, size_t n, bool update, StreamT stream); bool GReduce::selfTest(size_t n){ int test = true; int* h_i = new int [n]; int* h_i1 = new int [n]; for (size_t j=0; j< n; ++j) h_i[j] = (rand() & 0xff); for (size_t j=0; j< n; ++j) h_i1[j] = (rand() & 0xff); int *d_i; dmemAlloc(d_i, n); cpyArrayH2D(d_i, h_i, n); int *d_i1; dmemAlloc(d_i1,n); cpyArrayH2D(d_i1,h_i1,n); int *d_o; dmemAlloc(d_o, 256); int h_max = -INT_MAX, h_min = INT_MAX; int h_LInf = 0; int h_sum2 = 0; int h_sum = 0; int h_L1 = 0; int h_dot = 0; for (size_t i=0; i< n; ++i) { h_max = fmaxf(h_max, h_i[i]); h_min = fminf(h_min, h_i[i]); h_sum += h_i[i]; h_LInf = fmaxf(h_LInf, h_i[i]); h_L1 += fabsf(h_i[i]); h_sum2+= h_i[i]*h_i[i]; h_dot += h_i1[i] * h_i[i]; } int d_max = -INT_MAX, d_min = INT_MAX; int d_LInf= 0; int d_L1= 0; int d_sum2= 0; int d_sum = 0; int d_dot = 0; Vec2D<int> d_pair; this->Sum(d_sum, d_i, n); this->Max(d_max, d_i, n); this->Min(d_min, d_i, n); this->LInf(d_LInf,d_i, n); this->L1(d_L1, d_i, n); this->Sum2(d_sum2,d_i, n); this->Dot(d_dot,d_i,d_i1, n); this->MaxMin(d_pair, d_i, n); fprintf(stderr, "Maximum value from CPU %d from GPU %d\n",h_max, d_max); fprintf(stderr, "Minumum value from CPU %d from GPU %d\n",h_min, d_min); fprintf(stderr, "Total value from CPU %d from GPU %d\n",h_sum, d_sum); fprintf(stderr, "Maximum abosulte value from CPU %d from GPU %d\n",h_LInf, d_LInf); fprintf(stderr, "Total square value from CPU %d from GPU %d\n",h_sum2, d_sum2); fprintf(stderr, "Dot product value from CPU %d from GPU %d\n",h_dot, d_dot); fprintf(stderr, "Max Min value from CPU %d %d from GPU %d %d\n",h_max, h_min, d_pair.x, d_pair.y); //Extensive test h_max = -INT_MAX, h_min = INT_MAX; h_LInf = 0; h_sum2 = 0; h_sum = 0; h_dot = 0; h_L1 = 0; for (int l=1; l < 10001;++l){ h_max = fmaxf(h_max, h_i[l-1]); h_LInf = fmaxf(h_LInf, h_i[l-1]); h_min = fminf(h_min, h_i[l-1]); h_sum += h_i[l-1]; h_sum2 += h_i[l-1]*h_i[l-1]; h_dot += h_i1[l-1] * h_i[l-1]; h_L1 += fabsf(h_i[l-1]); this->Sum(d_sum, d_i, l); this->Max(d_max, d_i, l); this->Min(d_min, d_i, l); this->LInf(d_LInf,d_i, l); this->L1(d_L1, d_i, l); this->Sum2(d_sum2,d_i, l); this->Dot(d_dot,d_i,d_i1, l); this->MaxMin(d_pair, d_i, l); if (d_max != h_max){ fprintf(stderr, "Max Test FAILED at %d GPU %d CPU %d\n", l, d_max, h_max ); test = false; } if (d_min != h_min){ fprintf(stderr, "Min Test FAILED at %d GPU %d CPU %d\n", l, d_min, h_min ); test = false; } if (d_LInf!= h_LInf){ fprintf(stderr, "MaxAbs Test FAILED at %d GPU %d CPU %d\n", l, d_LInf, h_LInf ); test = false; } if (d_sum!= h_sum){ fprintf(stderr, "Sum Test FAILED at %d GPU %d CPU %d\n", l, d_sum, h_sum ); test = false; } if (d_sum2!= h_sum2){ fprintf(stderr, "Sum SQR Test FAILED at %d GPU %d CPU %d\n", l, d_sum2, h_sum2 ); test = false; } if (d_dot!= h_dot){ fprintf(stderr, "Dot Test FAILED at %d GPU %d CPU %d\n", l, d_dot, h_dot ); test = false; } if ( d_pair.x != h_max || d_pair.y != h_min){ fprintf(stderr, "Max Min Test FAILED at %d GPU %d %d CPU %d %d\n", l, d_pair.x, d_pair.y, h_max, h_min); test = false; } if (test == false) break; } if (test) fprintf(stderr, "Test PASSED \n"); delete []h_i1; delete []h_i; cudaFree(d_i); cudaFree(d_i1); return test; } }
the_stack
TODO: Clean up by abstracting some of the common pieces into separate funcs E.g., 8/16-bit renormalization alpha/beta application TODO: Try 2D tex fetch TODO: dp4a version: Preprocess by reordering: < [time, chan,stand,pol,cpx] int8_t > [time/4,chan,stand,pol,cpx,4] int8_t = [time/4,chan,stand,pol] int2 Kernel should the be mostly unchanged, just using dp4a instead of fma dp4a: int32.mad(schar4, schar4) Need to use extra regs to store -imag sums Probably need separate kernel implems for this reason :/ UNLESS can implement special JonesMat<dp4a> that does this automatically and has a conversion to JonesMat<float>. Cmn = Amk Akn^H m is the fast dim of C (and A) This means the conjugated dim is the slow dim cublasCherk does not support conjugating, so we're stuck with this */ /* template<> class Complex<Dp4a> { int real; int imag_pos; int imag_neg; public: Complex& mad( }; */ #include "utils.hpp" #include "Complex.hpp" #include "Jones.hpp" #include "assert.hpp" #include "linalg_kernels.h" #include "cuda/stream.hpp" #define BF_USE_DIAGONAL_KERNEL 1 //#define BF_USE_DIAGONAL_KERNEL 0 template<typename T> inline __device__ T shfl_warp_sync(T var, int srcLane, int width=warpSize) { #if defined(__CUDACC_VER_MAJOR__) && __CUDACC_VER_MAJOR__ >= 9 return __shfl_sync(0xFFFFFFFF, var, srcLane, width); #else return __shfl(var, srcLane, width); #endif } inline __host__ __device__ int project_triangular(int i, int j) { // Note: Assumes i >= j // Note: i=slow, j=fast => lower triangle return i*(i+1)/2 + j; } inline __host__ __device__ void lift_triangular(int b, int* i, int* j) { // Note: Returned values obey i >= j // Note: i=slow, j=fast => lower triangle // Warning: This works up to nant=4607; beyond that, float64 is required *i = int((sqrtf(8*b+1)-1)/2); *j = b - project_triangular(*i, 0); } template<typename T> __host__ __device__ inline T correct_output_for_tex_normalization(T const& output, int input_nbit) { T ret = output; if( input_nbit == 8 || input_nbit == 16 ) { // Correct for normalization applied during tex load // Note that the (2**n)-1 scale factor used in the // tex load introduces noise into the input // mantissas, which results in increased loss of // precision during the computation. We fix this by // explicitly rounding the result here to an // integer. int input_scale_factor = (1 << (input_nbit - 1)) - 1; int output_scale_factor = (input_scale_factor * input_scale_factor); ret = rintf(output * float(output_scale_factor)); } return ret; } template<int M_REG, int N_REG, int M_THREAD, int N_THREAD> __device__ inline void bf_cherk_N_diagonal_kernel_compute(int tm, int tn, int tm0_extra, int tn0_extra, Complex<float> const& data, JonesMat<float> C[N_REG][M_REG], JonesRowVec<float>& C_extra) { JonesVec<float> A[M_REG]; JonesVec<float> B[N_REG]; #pragma unroll for( int rm=0; rm<M_REG; ++rm ) { A[rm] = JonesVec<float>(shfl_warp_sync(data, (tm*M_REG + rm)*2 + 0), shfl_warp_sync(data, (tm*M_REG + rm)*2 + 1)); } #pragma unroll for( int rn=0; rn<N_REG; ++rn ) { B[rn] = JonesVec<float>(shfl_warp_sync(data, (tn*N_REG + rn)*2 + 0), shfl_warp_sync(data, (tn*N_REG + rn)*2 + 1)); } #pragma unroll for( int rn=0; rn<N_REG; ++rn ) { #pragma unroll for( int rm=0; rm<M_REG; ++rm ) { C[rn][rm].mad(B[rn].conj(), A[rm].H().conj()); } } JonesVec<float> A_extra(shfl_warp_sync(data, tm0_extra*2 + 0), shfl_warp_sync(data, tm0_extra*2 + 1)); Complex<float> B_extra = shfl_warp_sync(data, tn0_extra); C_extra.x.mad(A_extra.x, B_extra.conj()); C_extra.y.mad(A_extra.y, B_extra.conj()); } // This kernel works by mapping a single warp to the lower triangle of a // block. The 32 threads each work on a 2x2 JonesMat reg tile, which leaves // 4 tiles uncomputed. Elements of these extra tiles are computed by the // first 16 threads. Unfortunately this has quite a large impact on perf. template<int M_THREAD, int N_THREAD, int M_REG, int N_REG> __global__ __launch_bounds__(M_THREAD*N_THREAD) void bf_cherk_N_diagonal_kernel(int N, int K, int nbatch, float alpha, cudaTextureObject_t A_tex, int A_nbit, int A_stride, int A_batchstride, int A_offset, float beta, float4* __restrict__ d_C, int C_stride, int C_batchstride) { int M = N; enum { NBUF = 4, // Must be 4 }; int Kres = K % NBUF; K -= Kres; int bid = blockIdx.x; int tid = threadIdx.x + threadIdx.y * M_THREAD; int bm, bn; int tm, tn; bm = bid; bn = bid; int tid_tile = tid; // Note: This maps threads such that the 4 leftover register tiles end up // as a square in the bottom corner. if( tid_tile >= 21 ) { tid_tile += 2; } if( tid_tile >= 28 ) { tid_tile += 2; } lift_triangular(tid_tile, &tn, &tm); int tn0_extra_out = tid / 4 + 24; int tm0_extra_out = tid % 4; int tm0_extra = tm0_extra_out; int tn0_extra = tn0_extra_out; // Buffers for input row(=col) data, stored in regs and accessed via shfl Complex<float> data[NBUF]; int input_offset0 = A_offset + bm*(M_THREAD*M_REG)*2 + tid; #define CHERK_LOAD(buf, k) \ data[buf] = \ tex1Dfetch<float2>(A_tex, (k)*A_stride*2 + input_offset) #define CHERK_COMPUTE(buf) \ bf_cherk_N_diagonal_kernel_compute \ <M_REG, N_REG, M_THREAD, N_THREAD> \ (tm, tn, tm0_extra, tn0_extra, data[buf], C, C_extra) for( int batch=blockIdx.y; batch<nbatch; batch+=gridDim.y ) { int input_offset = input_offset0 + batch*A_batchstride*2; JonesMat<float> C[N_REG][M_REG]; JonesRowVec<float> C_extra(0, 0); #pragma unroll for( int rn=0; rn<N_REG; ++rn ) { #pragma unroll for( int rm=0; rm<M_REG; ++rm ) { C[rn][rm] = JonesMat<float>(0); } } if( K >= 4 ) { CHERK_LOAD(0, 0); CHERK_LOAD(1, 1); for( int k=0; k<K-NBUF; k+=NBUF ) { __syncthreads(); CHERK_COMPUTE(0); CHERK_COMPUTE(1); CHERK_LOAD(2, k+2); CHERK_LOAD(3, k+3); __syncthreads(); CHERK_COMPUTE(2); CHERK_COMPUTE(3); CHERK_LOAD(0, k+4); CHERK_LOAD(1, k+5); } __syncthreads(); CHERK_COMPUTE(0); CHERK_COMPUTE(1); CHERK_LOAD(2, K-2); CHERK_LOAD(3, K-1); __syncthreads(); CHERK_COMPUTE(2); CHERK_COMPUTE(3); } for( int k=K; k<K+Kres; ++k ) { __syncthreads(); CHERK_LOAD(0, k); __syncthreads(); CHERK_COMPUTE(0); } #undef CHERK_COMPUTE #undef CHERK_LOAD C_extra = correct_output_for_tex_normalization(C_extra, A_nbit); int m = tm0_extra_out + M_REG*M_THREAD*bm; int n = tn0_extra_out + N_REG*M_THREAD*bn*2; if( n < N*2 && m < M ) { float4* d_C_x = &d_C[(n + 0)*C_stride + batch*C_batchstride + m]; if( alpha != 1 ) { C_extra *= alpha; } if( beta != 0 ) { JonesRowVec<float> C_old(*d_C_x); C_extra += beta * C_old; } *d_C_x = C_extra; } #pragma unroll for( int rn=0; rn<N_REG; ++rn ) { int n = rn + tn*N_REG + N_REG*M_THREAD*bn; #pragma unroll for( int rm=0; rm<M_REG; ++rm ) { int m = rm + tm*M_REG + M_REG*M_THREAD*bm; if( n < N && m < M && m <= n ) { float4* d_C_x = &d_C[(n*2 + 0)*C_stride + batch*C_batchstride + m]; float4* d_C_y = &d_C[(n*2 + 1)*C_stride + batch*C_batchstride + m]; JonesMat<float>& C_new = C[rn][rm]; C_new = correct_output_for_tex_normalization(C_new, A_nbit); if( alpha != 1 ) { C_new *= alpha; } if( beta != 0 ) { JonesMat<float> C_old(*d_C_x, *d_C_y); C_new += beta * C_old; } if( n == m ) { // Only write the xx term, not the yx over the diagonal *(Complex<float>*)d_C_x = C_new.x.x; } else { *d_C_x = C_new.x; } *d_C_y = C_new.y; } } } } // End batch loop } template<int M_THREAD, int N_THREAD, int M_REG, int N_REG> __device__ inline void bf_cherk_N_offdiagonal_kernel_compute(int tm, int tn, JonesVec<float> const s_A[M_REG][M_THREAD], JonesVec<float> const s_B[N_REG][M_THREAD], JonesMat<float> C[N_REG][M_REG]) { JonesVec<float> A[M_REG]; JonesVec<float> B[N_REG]; #pragma unroll for( int rm=0; rm<M_REG; ++rm ) { A[rm] = s_A[rm][tm]; } #pragma unroll for( int rn=0; rn<N_REG; ++rn ) { B[rn] = s_B[rn][tn]; } #pragma unroll for( int rn=0; rn<N_REG; ++rn ) { #pragma unroll for( int rm=0; rm<M_REG; ++rm ) { C[rn][rm].mad(B[rn].conj(), A[rm].H().conj()); } } } // Cmn = Amk Bkn (m = fastest-changing dim) // Cherk kernel based on xGPU // This is designed for large k template<int M_THREAD, int N_THREAD, int M_REG, int N_REG> __global__ __launch_bounds__(M_THREAD*N_THREAD) void bf_cherk_N_offdiagonal_kernel(int N, int K, int nbatch, float alpha, cudaTextureObject_t A_tex, int A_nbit, int A_stride, int A_batchstride, int A_offset, float beta, float4* __restrict__ d_C, int C_stride, int C_batchstride) { enum { NBUF = 4,// Must be 4 NARRAY = 2 // Must be 2 }; int Kres = K % NBUF; K -= Kres; int bid = blockIdx.x; int tid = threadIdx.x + threadIdx.y * M_THREAD; int bm, bn; int tm, tn; lift_triangular(bid, &bn, &bm); #if BF_USE_DIAGONAL_KERNEL bn += 1; // Process below the diagonal #endif tm = threadIdx.x; tn = threadIdx.y; __shared__ JonesVec<float> smem[NBUF][NARRAY][M_REG][M_THREAD]; int input_offset0 = A_offset; if( tid < M_THREAD*M_REG ) { input_offset0 += bm*(M_THREAD*M_REG) + tid; } else { input_offset0 += bn*(M_THREAD*N_REG) + (tid - M_THREAD*M_REG); } // Note: This load is not bounds-checked, but it doesn't matter when using // texture loads. #define CHERK_LOAD(buf, k) \ if( M_REG == 4 || tid < M_THREAD*M_REG * 2 ) \ (&smem[buf][0][0][0])[tid] = \ tex1Dfetch<float4>(A_tex, (k)*A_stride + input_offset) #define CHERK_COMPUTE(buf) \ bf_cherk_N_offdiagonal_kernel_compute \ <M_THREAD, N_THREAD, M_REG, N_REG> \ (tm, tn, smem[buf][0], smem[buf][1], C) for( int batch=blockIdx.y; batch<nbatch; batch+=gridDim.y ) { int input_offset = input_offset0 + batch*A_batchstride; JonesMat<float> C[N_REG][M_REG]; #pragma unroll for( int rn=0; rn<N_REG; ++rn ) { #pragma unroll for( int rm=0; rm<M_REG; ++rm ) { C[rn][rm] = JonesMat<float>(0); } } if( K >= 4 ) { CHERK_LOAD(0, 0); CHERK_LOAD(1, 1); for( int k=0; k<K-NBUF; k+=NBUF ) { __syncthreads(); CHERK_COMPUTE(0); CHERK_COMPUTE(1); CHERK_LOAD(2, k+2); CHERK_LOAD(3, k+3); __syncthreads(); CHERK_COMPUTE(2); CHERK_COMPUTE(3); CHERK_LOAD(0, k+4); CHERK_LOAD(1, k+5); } __syncthreads(); CHERK_COMPUTE(0); CHERK_COMPUTE(1); CHERK_LOAD(2, K-2); CHERK_LOAD(3, K-1); __syncthreads(); CHERK_COMPUTE(2); CHERK_COMPUTE(3); } for( int k=K; k<K+Kres; ++k ) { __syncthreads(); CHERK_LOAD(0, k); __syncthreads(); CHERK_COMPUTE(0); } #undef CHERK_COMPUTE #undef CHERK_LOAD #pragma unroll for( int rn=0; rn<N_REG; ++rn ) { int n = tn + M_THREAD*(rn + N_REG*bn); #pragma unroll for( int rm=0; rm<M_REG; ++rm ) { int m = tm + M_THREAD*(rm + M_REG*bm); int M = N; if( n < N && m < M #if !BF_USE_DIAGONAL_KERNEL && m <= n #endif ) { float4* d_C_x = &d_C[(n*2 + 0)*C_stride + batch*C_batchstride + m]; float4* d_C_y = &d_C[(n*2 + 1)*C_stride + batch*C_batchstride + m]; JonesMat<float>& C_new = C[rn][rm]; C_new = correct_output_for_tex_normalization(C_new, A_nbit); if( alpha != 1 ) { C_new *= alpha; } if( beta != 0 ) { JonesMat<float> C_old(*d_C_x, *d_C_y); C_new += beta * C_old; } #if !BF_USE_DIAGONAL_KERNEL if( n == m ) { // Only write the xx term, not the yx over the diagonal *(Complex<float>*)d_C_x = C_new.x.x; } else { *d_C_x = C_new.x; } #else *d_C_x = C_new.x; #endif *d_C_y = C_new.y; } } } } // End batch loop } void bf_cherk_N(int N, int K, int nbatch, float alpha, void const* A_ptr, BFdtype A_type, int A_stride, int A_batchstride, float beta, void* C_ptr, BFdtype C_type, int C_stride, int C_batchstride, cudaStream_t stream) { // Note: The kernel operates on 2 elements at a time and requires alignment BF_ASSERT_EXCEPTION(N % 2 == 0, BF_STATUS_UNSUPPORTED_SHAPE); BF_ASSERT_EXCEPTION(A_stride % 2 == 0, BF_STATUS_UNSUPPORTED_STRIDE); BF_ASSERT_EXCEPTION(A_batchstride % 2 == 0, BF_STATUS_UNSUPPORTED_STRIDE); BF_ASSERT_EXCEPTION(C_stride % 2 == 0, BF_STATUS_UNSUPPORTED_STRIDE); BF_ASSERT_EXCEPTION(C_batchstride % 2 == 0, BF_STATUS_UNSUPPORTED_STRIDE); enum { TEX_ALIGNMENT = 512 }; // WAR for texture alignment constraint. This rounds the pointer down // to the alignment (which should always be safe because cudaMalloc // returns aligned pointers) and passes the offset to the kernel. int A_byte_offset = ((uintptr_t)A_ptr - (uintptr_t)A_ptr / TEX_ALIGNMENT * TEX_ALIGNMENT); A_ptr = (uint8_t*)A_ptr - A_byte_offset; BF_ASSERT_EXCEPTION((uintptr_t)A_ptr % TEX_ALIGNMENT == 0, BF_STATUS_UNSUPPORTED_STRIDE); // TODO: Assert supported limits on N and K based on texture constraints // Note: The kernel is 2x vectorized (i.e., operates on Jones vectors) N /= 2; A_stride /= 2; A_batchstride /= 2; C_stride /= 2; C_batchstride /= 2; cudaChannelFormatKind channel_format; cudaTextureReadMode tex_read_mode; switch( A_type ) { case BF_DTYPE_CI8: // Fall-through case BF_DTYPE_CI16: channel_format = cudaChannelFormatKindSigned; tex_read_mode = cudaReadModeNormalizedFloat; break; case BF_DTYPE_CF32: channel_format = cudaChannelFormatKindFloat; tex_read_mode = cudaReadModeElementType; break; default: BF_FAIL_EXCEPTION("Supported input dtype", BF_STATUS_UNSUPPORTED_DTYPE); } BF_ASSERT_EXCEPTION(C_type == BF_DTYPE_CF32, BF_STATUS_UNSUPPORTED_DTYPE); int A_nbit = BF_DTYPE_NBIT(A_type) / 2; int element_bytes = 2 * BF_DTYPE_NBYTE(A_type); BF_ASSERT_EXCEPTION(A_byte_offset % element_bytes == 0, BF_STATUS_UNSUPPORTED_STRIDE); int A_offset = A_byte_offset / element_bytes; size_t A_nelement_total = std::max(A_stride * K, A_batchstride * nbatch) + A_offset; size_t texture_element_limit = 1 << 27; BF_ASSERT_EXCEPTION(A_nelement_total <= texture_element_limit, BF_STATUS_UNSUPPORTED_SHAPE); // Create texture object cudaResourceDesc resDesc; memset(&resDesc, 0, sizeof(resDesc)); resDesc.resType = cudaResourceTypeLinear; resDesc.res.linear.devPtr = const_cast<void*>(A_ptr); resDesc.res.linear.desc.f = channel_format; resDesc.res.linear.desc.x = A_nbit; resDesc.res.linear.desc.y = A_nbit; resDesc.res.linear.desc.z = A_nbit; resDesc.res.linear.desc.w = A_nbit; resDesc.res.linear.sizeInBytes = A_nelement_total * 4 * (A_nbit / 8); cudaTextureDesc texDesc; memset(&texDesc, 0, sizeof(texDesc)); texDesc.readMode = tex_read_mode; cudaTextureObject_t A_tex = 0; BF_CHECK_CUDA_EXCEPTION( cudaCreateTextureObject(&A_tex, &resDesc, &texDesc, NULL), BF_STATUS_INTERNAL_ERROR); enum { M_THREAD = 8, N_THREAD = 8, // TODO: Don't think it's possible to support this != M_THREAD //M_REG = 4, //N_REG = 4//2 //M_REG = 3, // Best at large N //N_REG = 3 M_REG = 2, N_REG = 2 // Best at small N }; dim3 block(N_THREAD, M_THREAD); size_t nblock_m = (N - 1) / (M_THREAD * M_REG) + 1; #if BF_USE_DIAGONAL_KERNEL size_t nblock = nblock_m * (nblock_m - 1) / 2; #else size_t nblock = nblock_m * (nblock_m + 1) / 2; #endif dim3 grid(nblock, std::min(nbatch, 65535)); if( nblock > 0 ) { // TODO: Replace with cudaLaunchKernel bf_cherk_N_offdiagonal_kernel<M_THREAD, N_THREAD, M_REG, N_REG> <<<grid, block, 0, stream>>> (N, K, nbatch, alpha, A_tex, A_nbit, A_stride, A_batchstride, A_offset, beta, (float4*)C_ptr, C_stride, C_batchstride); BF_CHECK_CUDA_EXCEPTION(cudaGetLastError(), BF_STATUS_INTERNAL_ERROR); } BF_CHECK_CUDA_EXCEPTION( cudaDestroyTextureObject(A_tex), BF_STATUS_INTERNAL_ERROR); #if BF_USE_DIAGONAL_KERNEL // Note: The second texture has 2x the elements because each is only // a 2-vector instead of a 4-vector. // TODO: This condition is not currently included in the linalg.cu dispatch // between this and CUBLAS, even though this can fail when CUBLAS // would work. Really we want to increase this limit, perhaps using // 2D textures, so that it becomes very rare to run into it. BF_ASSERT_EXCEPTION(A_nelement_total*2 <= texture_element_limit, BF_STATUS_UNSUPPORTED_SHAPE); A_offset *= 2; // Create texture object cudaResourceDesc resDesc2; memset(&resDesc2, 0, sizeof(resDesc2)); resDesc2.resType = cudaResourceTypeLinear; resDesc2.res.linear.devPtr = const_cast<void*>(A_ptr); resDesc2.res.linear.desc.f = channel_format; resDesc2.res.linear.desc.x = A_nbit; resDesc2.res.linear.desc.y = A_nbit; resDesc2.res.linear.desc.z = 0; resDesc2.res.linear.desc.w = 0; resDesc2.res.linear.sizeInBytes = (A_nelement_total*2) * 2 * (A_nbit / 8); cudaTextureDesc texDesc2; memset(&texDesc2, 0, sizeof(texDesc2)); texDesc2.readMode = tex_read_mode; cudaTextureObject_t A_tex2 = 0; BF_CHECK_CUDA_EXCEPTION( cudaCreateTextureObject(&A_tex2, &resDesc2, &texDesc2, NULL), BF_STATUS_INTERNAL_ERROR); // TODO: Clean this up a bit grid.x = nblock_m; enum { N_THREAD_DIAG = 4 }; block.y = N_THREAD_DIAG; bf_cherk_N_diagonal_kernel<M_THREAD, N_THREAD_DIAG, 2, 2> <<<grid, block, 0, stream>>> (N, K, nbatch, alpha, A_tex2, A_nbit, A_stride, A_batchstride, A_offset, beta, (float4*)C_ptr, C_stride, C_batchstride); BF_CHECK_CUDA_EXCEPTION(cudaGetLastError(), BF_STATUS_INTERNAL_ERROR); BF_CHECK_CUDA_EXCEPTION( cudaDestroyTextureObject(A_tex2), BF_STATUS_INTERNAL_ERROR); #endif // BF_USE_DIAGONAL_KERNEL } template<int SIZE> struct shflable_type {}; template<> struct shflable_type<4> { typedef int type; }; template<> struct shflable_type<8> { typedef long long type; }; template<typename T, int WIDTH=32> inline __device__ T warp_all_sum(T x) { typedef typename shflable_type<sizeof(T)>::type shfl_type; #pragma unroll for( int k=WIDTH>>1; k>=1; k>>=1 ) { #if defined(__CUDACC_VER_MAJOR__) && __CUDACC_VER_MAJOR__ >= 9 x += type_pun<T>(__shfl_xor_sync(0xFFFFFFFF, type_pun<shfl_type>(x), k, WIDTH)); #else x += type_pun<T>(__shfl_xor(type_pun<shfl_type>(x), k, WIDTH)); #endif } return x; } // Btcb = Asct^T Wscb // Cmbn = Akbm^T Bkbn // This is designed for small N (nbeam) such that B fits in shared mem, // K (nstand) >= 32, and large M (ntime) * nbatch enough // to fill the GPU. // It only supports TN data ordering (i.e., k (stand) is fastest dim of both // inputs, m (time) is fastest dim of output. // It is vectorized 2x so that the input elements are loaded and processed as // JonesVecs. // Note: BLOCK_X must be == warpSize (32) // Note: BLOCK_M must be <= BLOCK_X template<int N_MAX, int BLOCK_X, int BLOCK_Y, int BLOCK_M, typename AlphaType, typename AType, typename BType, typename BetaType, typename CType> __global__ __launch_bounds__(BLOCK_X*BLOCK_Y) void bf_cgemm_TN_smallM_kernel_v2(int M, int N, int K, int nbatch, AlphaType alpha, AType const* __restrict__ d_A, int A_stride, int A_batchstride, BType const* __restrict__ d_B, int B_stride, int B_batchstride, BetaType beta, CType* __restrict__ d_C, int C_stride, int C_batchstride) { typedef JonesVec<float> ComputeType; typedef Complex<float> SumType; extern __shared__ char smem[]; BType* s_B = (BType*)smem; int xi = threadIdx.x; // Acts as the k dim for input and a block of the m dim for output int y = threadIdx.y + blockIdx.y*blockDim.y; // Blocks along the m dim int zi = blockIdx.z; // Batch dimension int K_blocks = (K - 1) / BLOCK_X + 1; int s_B_stride = K_blocks * BLOCK_X; // Grid-stride loop over batches for( int z=zi; z<nbatch; z+=gridDim.z ) { // Cache all N*K elements of B for this batch in shared mem __syncthreads(); if( threadIdx.y == 0 ) { #pragma unroll for( int n=0; n<N_MAX; ++n ) { if( n < N ) { for( int x=xi; x<K_blocks*BLOCK_X; x+=BLOCK_X ) { BType B; if( x < K ) { B = d_B[n*B_stride + z*B_batchstride + x]; } else { B = BType(0, 0); } s_B[n*s_B_stride + x] = B; } } } } __syncthreads(); SumType C[N_MAX]; // Loop through this block of M for( int mi=0; mi<BLOCK_M; ++mi ) { int m = y*BLOCK_M + mi; // HACK TESTING //if( m >= M ) { // break; //} SumType C_tmp[N_MAX]; #pragma unroll for( int n=0; n<N_MAX; ++n ) { C_tmp[n] = 0; } // Loop through blocks of K for( int x=xi; x<K_blocks*BLOCK_X; x+=BLOCK_X ) { // Load a warp of elements from A ComputeType A(0, 0); // Note: Set to 0 to make extra threads safe if( m < M && x < K ) { A = d_A[m*A_stride + z*A_batchstride + x]; } // Loop through N #pragma unroll for( int n=0; n<N_MAX; ++n ) { if( n < N ) { // Load B from the shared mem cache ComputeType B; if( x < K ) { B = s_B[n*s_B_stride + x]; } else { //B = 0; // HACK TESTING } // Compute dot product over the warp //*C_tmp[n] += warp_all_sum(A * B); C_tmp[n] += warp_all_sum(A.x * B.x + A.y * B.y); } } } #pragma unroll for( int n=0; n<N_MAX; ++n ) { if( n < N ) { // Give the results to thread mi if( mi == xi ) { C[n] = C_tmp[n]; } } } } // The first BLOCK_M threads now hold the results if( xi < BLOCK_M ) { int m = y*BLOCK_M + xi; if( m < M ) { #pragma unroll for( int n=0; n<N_MAX; ++n ) { if( n < N ) { CType* d_C_nzm = &d_C[n*C_stride + z*C_batchstride + m]; if( beta == 0 ) { *d_C_nzm = alpha * C[n]; } else { *d_C_nzm = alpha * C[n] + beta * (*d_C_nzm); } } } } } } // end z loop } template<int N_MAX> void bf_cgemm_TN_smallM_staticN_v2(int M, int N, int K, int nbatch, float alpha, void const* d_A, BFdtype A_type, int A_stride, int A_batchstride, void const* d_B, BFdtype B_type, int B_stride, int B_batchstride, float beta, void* d_C, BFdtype C_type, int C_stride, int C_batchstride, cudaStream_t stream) { enum { BLOCK_X = 32, // Must be warpSize (32) BLOCK_Y = 16, // Can be tuned BLOCK_M = 8, // Must be <= BLOCK_X (can be tuned within this bound) }; BF_ASSERT_EXCEPTION(BLOCK_X == 32, BF_STATUS_INTERNAL_ERROR); BF_ASSERT_EXCEPTION(BLOCK_M <= BLOCK_X, BF_STATUS_INTERNAL_ERROR); BF_ASSERT_EXCEPTION(K % 2 == 0, BF_STATUS_UNSUPPORTED_SHAPE); BF_ASSERT_EXCEPTION(A_stride % 2 == 0, BF_STATUS_UNSUPPORTED_STRIDE); BF_ASSERT_EXCEPTION(A_batchstride % 2 == 0, BF_STATUS_UNSUPPORTED_STRIDE); BF_ASSERT_EXCEPTION(B_stride % 2 == 0, BF_STATUS_UNSUPPORTED_STRIDE); BF_ASSERT_EXCEPTION(B_batchstride % 2 == 0, BF_STATUS_UNSUPPORTED_STRIDE); K /= 2; A_stride /= 2; A_batchstride /= 2; B_stride /= 2; B_batchstride /= 2; dim3 block(BLOCK_X, BLOCK_Y); dim3 grid(1, (M-1)/(BLOCK_Y*BLOCK_M)+1, std::min(nbatch, 65535)); BF_ASSERT_EXCEPTION(grid.y < 65536, BF_STATUS_INTERNAL_ERROR); int K_blocks = (K - 1) / BLOCK_X + 1; int s_B_stride = K_blocks * BLOCK_X; size_t smem = N * s_B_stride * BF_DTYPE_NBYTE(B_type)*2; bool B_fits_in_shared_mem = (smem <= 48*1024); BF_ASSERT_EXCEPTION(B_fits_in_shared_mem, BF_STATUS_UNSUPPORTED); /* // TODO: Use cudaLaunchKernel instead of <<< >>> void* args[] = { &M, &K, &nbatch, &alpha, (void*)&d_A, &A_stride, &A_batchstride, (void*)&d_B, &B_stride, &B_batchstride, &beta, (void*)&d_C, &C_stride, &C_batchstride }; */ #define LAUNCH_BF_CGEMM_TN_SMALLM_KERNEL(AType, BType, CType) \ bf_cgemm_TN_smallM_kernel_v2<N_MAX, BLOCK_X, BLOCK_Y, BLOCK_M> \ <<<grid,block,smem,stream>>> \ (M, N, K, nbatch, \ alpha, \ (AType*)d_A, A_stride, A_batchstride, \ (BType*)d_B, B_stride, B_batchstride, \ beta, \ (CType*)d_C, C_stride, C_batchstride) BF_ASSERT_EXCEPTION(C_type == BF_DTYPE_CF32, BF_STATUS_UNSUPPORTED_DTYPE); switch( A_type ) { case BF_DTYPE_CI4: { switch( B_type ) { case BF_DTYPE_CI16: { LAUNCH_BF_CGEMM_TN_SMALLM_KERNEL( JonesVec<FourBit>, JonesVec<int16_t>, Complex<float>); break; } #if defined(__CUDACC_VER_MAJOR__) && __CUDACC_VER_MAJOR__ >= 9 //case BF_DTYPE_CF16: { // LAUNCH_BF_CGEMM_TN_SMALLM_KERNEL( // JonesVec<FourBit>, JonesVec<half>, Complex<float>); // break; //} #endif case BF_DTYPE_CF32: { LAUNCH_BF_CGEMM_TN_SMALLM_KERNEL( JonesVec<FourBit>, JonesVec<float>, Complex<float>); break; } default: BF_FAIL_EXCEPTION("Supported dtype for B", BF_STATUS_UNSUPPORTED_DTYPE); } break; } case BF_DTYPE_CI8: { switch( B_type ) { case BF_DTYPE_CI16: { LAUNCH_BF_CGEMM_TN_SMALLM_KERNEL( JonesVec<int8_t>, JonesVec<int16_t>, Complex<float>); break; } #if defined(__CUDACC_VER_MAJOR__) && __CUDACC_VER_MAJOR__ >= 9 //case BF_DTYPE_CF16: { // LAUNCH_BF_CGEMM_TN_SMALLM_KERNEL( // JonesVec<int8_t>, JonesVec<half>, Complex<float>); // break; //} #endif case BF_DTYPE_CF32: { LAUNCH_BF_CGEMM_TN_SMALLM_KERNEL( JonesVec<int8_t>, JonesVec<float>, Complex<float>); break; } default: BF_FAIL_EXCEPTION("Supported dtype for B", BF_STATUS_UNSUPPORTED_DTYPE); } break; } default: BF_FAIL_EXCEPTION("Supported dtype for A", BF_STATUS_UNSUPPORTED_DTYPE); } #undef LAUNCH_BF_CGEMM_TN_SMALLM_KERNEL } void bf_cgemm_TN_smallM(int M, int N, int K, int nbatch, float alpha, void const* d_A, BFdtype A_type, int A_stride, int A_batchstride, void const* d_B, BFdtype B_type, int B_stride, int B_batchstride, float beta, void* d_C, BFdtype C_type, int C_stride, int C_batchstride, cudaStream_t stream) { #define CALL_BF_CGEMM_TN_SMALLM_STATICN(N_MAX) \ /*bf_cgemm_TN_smallM_staticN<N_MAX>*/ \ bf_cgemm_TN_smallM_staticN_v2<N_MAX> \ (M, N, K, nbatch, \ alpha, \ d_A, A_type, A_stride, A_batchstride, \ d_B, B_type, B_stride, B_batchstride, \ beta, \ d_C, C_type, C_stride, C_batchstride, \ stream) if( N <= 1 ) { CALL_BF_CGEMM_TN_SMALLM_STATICN( 1); } else if( N <= 2 ) { CALL_BF_CGEMM_TN_SMALLM_STATICN( 2); } else if( N <= 4 ) { CALL_BF_CGEMM_TN_SMALLM_STATICN( 4); } else if( N <= 8 ) { CALL_BF_CGEMM_TN_SMALLM_STATICN( 8); } else if( N <= 16 ) { CALL_BF_CGEMM_TN_SMALLM_STATICN(16); } else { BF_FAIL_EXCEPTION("Supported N in bf_cgemm_TN_smallM", BF_STATUS_UNSUPPORTED_SHAPE); } #undef CALL_BF_CGEMM_TN_SMALLM_STATICN }
the_stack
namespace caffe { template<typename Dtype, typename MItype, typename MOtype> void MOELayer<Dtype, MItype, MOtype>::Forward_gpu( const vector<Blob<MItype>*>& bottom, const vector<Blob<MOtype>*>& top) { int_tp select_experts = this->layer_param().moe_param().select_experts(); // Preload gating network blobs const vector<BlobBase*>& gating_input_blobs = this->gating_net_-> input_blobs(); size_t k = 0; for (size_t l = 0; l < bottom.size(); ++l) { if (this->layer_param().moe_param().map_bottom_size() <= l || this->layer_param().moe_param().map_bottom(l) == MOEParameter_BottomMapping_GATING || this->layer_param().moe_param().map_bottom(l) == MOEParameter_BottomMapping_GATING_AND_EXPERT) { this->device_->template copy<MItype>(bottom[l]->count(), bottom[l]->gpu_data(), static_cast<Blob<MItype>*>(gating_input_blobs[k])->mutable_gpu_data()); ++k; } } // Forward gating network float loss = 0; gating_ = static_cast<Blob<MOtype>*>(this->gating_net_->Forward(&loss)[0]); MOtype* gating_data = gating_->mutable_cpu_data(); vector<int_tp> select_count(gating_->shape()[1], 0); vector<vector<int_tp> > batch_selectors; // Reset all top blobs for (size_t i = 0; i < top.size(); ++i) { vptr<MOtype> top_data = top[i]->mutable_gpu_data(); this->device_->template set<MOtype>(top[i]->count(), MOtype(0), top_data); } // Select top #select_experts for (size_t i = 0; i < gating_->shape()[0]; ++i) { vector<int_tp> expert_selectors(select_experts, -1); for (size_t j = 0; j < gating_->shape()[1]; ++j) { for (size_t k = 0; k < select_experts; ++k) { if (expert_selectors[k] == -1 || gating_data[i * gating_->shape()[1] + expert_selectors[k]] < gating_data[i * gating_->shape()[1] + j]) { for (size_t l = select_experts-1; l > k; --l) { expert_selectors[l] = expert_selectors[l - 1]; } expert_selectors[k] = j; break; } } } for(size_t k = 0; k < select_experts; ++k) { select_count[expert_selectors[k]] += 1; } batch_selectors.push_back(expert_selectors); } // Generate load balancing loss if (this->phase_ == caffe::TRAIN) { MOtype* observed_count = top[top.size()-2]->mutable_cpu_data(); MOtype* expected_count = top[top.size()-1]->mutable_cpu_data(); for (size_t j = 0; j < gating_->shape()[1]; ++j) { MOtype norm_observed = static_cast<MOtype>(select_count[j]) / static_cast<MOtype>(gating_->shape()[0]); MOtype norm_expected = static_cast<MOtype>(select_experts) / static_cast<MOtype>(gating_->shape()[1]); for (size_t i = 0; i < gating_->shape()[0]; ++i) { observed_count[i * select_count.size() + j] = norm_observed; expected_count[i * select_count.size() + j] = norm_expected; } } } // Make gating data sparse and renormalize for (size_t i = 0; i < gating_->shape()[0]; ++i) { MOtype norm = MOtype(0); for (size_t j = 0; j < gating_->shape()[1]; ++j) { MOtype select = MOtype(0); for (size_t k = 0; k < select_experts; ++k) { if (batch_selectors[i][k] == j) { // std::cout << "Select " << select_experts << ", " << i << ", " // << k << ", " << j << std::endl; select = MOtype(1); break; } } gating_data[i * gating_->shape()[1] + j] *= select; norm += gating_data[i * gating_->shape()[1] + j]; } for (size_t j = 0; j < gating_->shape()[1]; ++j) { gating_data[i * gating_->shape()[1] + j] /= norm; } } // Forward experts if (this->phase_ == caffe::TEST && !this->layer_param().moe_param().full_forward()) { // Forward expert networks (partial, only forward selected experts // per batch item) #pragma omp parallel for num_threads(this->parallel_nets_) for (size_t i = 0; i < gating_->shape()[0]; ++i) { Caffe::SelectDevice(this->device_->id(), false); #ifdef USE_OPENMP int_tp tidx = omp_get_thread_num(); #else // USE_OPENMP int_tp tidx = 0; #endif // USE_OPENMP this->device_->SwitchQueue(i); vector<int_tp> expert_selectors = batch_selectors[i]; for (size_t p = 0; p < select_experts; ++p) { const vector<BlobBase*>& expert_input_blobs = this->expert_nets_[expert_selectors[p]][tidx]->input_blobs(); int_tp k = 0; for (size_t l = 0; l < bottom.size(); ++l) { if (this->layer_param().moe_param().map_bottom_size() <= l || this->layer_param().moe_param().map_bottom(l) == MOEParameter_BottomMapping_EXPERT || this->layer_param().moe_param().map_bottom(l) == MOEParameter_BottomMapping_GATING_AND_EXPERT) { this->device_->template copy<MItype>(bottom[l]->count(1), bottom[l]->gpu_data() + i * bottom[l]->count(1), static_cast<Blob<MItype>*>( expert_input_blobs[k])->mutable_gpu_data()); ++k; } } const vector<BlobBase*> result_vec = this->expert_nets_[expert_selectors[p]][tidx]-> Forward(&loss); for (size_t k = 0; k < result_vec.size(); ++k) { Blob<MOtype>* result = static_cast<Blob<MOtype>*>(result_vec[k]); this->device_->template axpby<MOtype>( top[k]->count(1), gating_data[i * gating_->shape()[1] + expert_selectors[p]], result->gpu_data(), MOtype(1), top[k]->mutable_gpu_data() + i * top[k]->count(1)); } } } this->device_->FinishQueues(); } else { // Forward expert networks (full batch) for (size_t j = 0; j < this->expert_nets_.size(); ++j) { const vector<BlobBase*>& expert_input_blobs = this->expert_nets_[j][0]-> input_blobs(); int_tp k = 0; for (size_t l = 0; l < bottom.size(); ++l) { if (this->layer_param().moe_param().map_bottom_size() <= l || this->layer_param().moe_param().map_bottom(l) == MOEParameter_BottomMapping_EXPERT || this->layer_param().moe_param().map_bottom(l) == MOEParameter_BottomMapping_GATING_AND_EXPERT) { this->device_->template copy<MItype>(bottom[l]->count(), bottom[l]->gpu_data(), static_cast<Blob<MItype>*>(expert_input_blobs[k])-> mutable_gpu_data()); ++k; } } const vector<BlobBase*> result_vec = this->expert_nets_[j][0]->Forward(&loss); for (size_t k = 0; k < result_vec.size(); ++k) { Blob<MOtype>* result = static_cast<Blob<MOtype>*>(result_vec[k]); for (size_t i = 0; i < gating_->shape()[0]; ++i) { this->device_->template axpby( top[k]->count(1), gating_data[i * gating_->shape()[1] + j], result->gpu_data() + i * top[k]->count(1), MOtype(1), top[k]->mutable_gpu_data() + i * top[k]->count(1)); } } } } } template<typename Dtype, typename MItype, typename MOtype> void MOELayer<Dtype, MItype, MOtype>::Backward_gpu( const vector<Blob<MOtype>*>& top, const vector<bool>& propagate_down, const vector<Blob<MItype>*>& bottom) { // Reset all bottom blob diffs for (size_t i = 0; i < bottom.size(); ++i) { MItype* bottom_diff = bottom[i]->mutable_cpu_diff(); caffe_set(bottom[i]->count(), MItype(0), bottom_diff); } // Set gating diff to load balancing diff const MOtype* gating_data = gating_->cpu_data(); MOtype* gating_diff = gating_->mutable_cpu_diff(); const MOtype* observed_diff = top[top.size()-2]->cpu_diff(); caffe_copy(gating_->count(), observed_diff, gating_diff); // Backward all experts for (size_t j = 0; j < this->expert_nets_.size(); ++j) { const vector<BlobBase*>& expert_output_blobs = this->expert_nets_[j][0]-> output_blobs(); for (size_t k = 0; k < expert_output_blobs.size(); ++k) { for (size_t i = 0; i < gating_->shape()[0]; ++i) { // Compute diff w.r.t expert outputs this->device_->template scale<MOtype>(top[k]->count(1), gating_data[i * gating_->shape()[1] + j], top[k]->gpu_diff() + i * top[k]->count(1), static_cast<Blob<MOtype>*>(expert_output_blobs[k])-> mutable_gpu_diff() + i * top[k]->count(1)); // Compute diff w.r.t gating outputs gating_diff[i * gating_->shape()[1] + j] += caffe_dot(top[k]->count(1), top[k]->cpu_diff() + i * top[k]->count(1), static_cast<Blob<MOtype>*>(expert_output_blobs[k])-> cpu_data() + i * top[k]->count(1)); } } // Backward expert networks (full) this->expert_nets_[j][0]->Backward(); const vector<BlobBase*>& expert_input_blobs = this->expert_nets_[j][0]-> input_blobs(); int_tp k = 0; for (size_t l = 0; l < bottom.size(); ++l) { if (this->layer_param().moe_param().map_bottom_size() <= l || this->layer_param().moe_param().map_bottom(l) == MOEParameter_BottomMapping_EXPERT || this->layer_param().moe_param().map_bottom(l) == MOEParameter_BottomMapping_GATING_AND_EXPERT) { if (propagate_down[l]) { this->device_->template axpby<MItype>( bottom[l]->count(), MItype(1), static_cast<Blob<MItype>*>(expert_input_blobs[k])->gpu_diff(), MItype(1), bottom[l]->mutable_gpu_diff()); } ++k; } } } // Backward gating network this->gating_net_->Backward(); const vector<BlobBase*>& gating_input_blobs = this->gating_net_-> input_blobs(); size_t k = 0; for (size_t l = 0; l < bottom.size(); ++l) { if (this->layer_param().moe_param().map_bottom_size() <= l || this->layer_param().moe_param().map_bottom(l) == MOEParameter_BottomMapping_GATING || this->layer_param().moe_param().map_bottom(l) == MOEParameter_BottomMapping_GATING_AND_EXPERT) { if (propagate_down[l]) { this->device_->template axpby<MItype>( bottom[l]->count(), MItype(1), static_cast<Blob<MItype>*>(gating_input_blobs[k])->gpu_diff(), MItype(1), bottom[l]->mutable_gpu_diff()); } ++k; } } } INSTANTIATE_CLASST_FUNC_3T_GUARDED(MOELayer, Forward_gpu, (half_fp), (half_fp), PROTO_TYPES); INSTANTIATE_CLASST_FUNC_3T_GUARDED(MOELayer, Forward_gpu, (float), (float), PROTO_TYPES); INSTANTIATE_CLASST_FUNC_3T_GUARDED(MOELayer, Forward_gpu, (double), (double), PROTO_TYPES); INSTANTIATE_CLASST_FUNC_3T_GUARDED(MOELayer, Forward_gpu, (uint8_t), (uint8_t), PROTO_TYPES); INSTANTIATE_CLASST_FUNC_3T_GUARDED(MOELayer, Forward_gpu, (uint16_t), (uint16_t), PROTO_TYPES); INSTANTIATE_CLASST_FUNC_3T_GUARDED(MOELayer, Forward_gpu, (uint32_t), (uint32_t), PROTO_TYPES); INSTANTIATE_CLASST_FUNC_3T_GUARDED(MOELayer, Forward_gpu, (uint64_t), (uint64_t), PROTO_TYPES); INSTANTIATE_CLASST_FUNC_3T_GUARDED(MOELayer, Backward_gpu, (half_fp), (half_fp), PROTO_TYPES); INSTANTIATE_CLASST_FUNC_3T_GUARDED(MOELayer, Backward_gpu, (float), (float), PROTO_TYPES); INSTANTIATE_CLASST_FUNC_3T_GUARDED(MOELayer, Backward_gpu, (double), (double), PROTO_TYPES); INSTANTIATE_CLASST_FUNC_3T_GUARDED(MOELayer, Backward_gpu, (uint8_t), (uint8_t), PROTO_TYPES); INSTANTIATE_CLASST_FUNC_3T_GUARDED(MOELayer, Backward_gpu, (uint16_t), (uint16_t), PROTO_TYPES); INSTANTIATE_CLASST_FUNC_3T_GUARDED(MOELayer, Backward_gpu, (uint32_t), (uint32_t), PROTO_TYPES); INSTANTIATE_CLASST_FUNC_3T_GUARDED(MOELayer, Backward_gpu, (uint64_t), (uint64_t), PROTO_TYPES); } // namespace caffe
the_stack
#include <stdio.h> #include <stdlib.h> #include <vector> #include <algorithm> #include <nvbio/basic/timer.h> #include <nvbio/basic/console.h> #include <nvBowtie/bowtie2/cuda/scoring_queues.h> namespace nvbio { namespace bowtie2 { namespace cuda { namespace { // anonymous namespace // // Setup the contents of a ReadHitsIndex object through a ReadHitsIndexDeviceView. // Implicitly also testing ReadHitsIndexDeviceView::reference. // __global__ void setup_read_hits_index_kernel(ReadHitsIndexDeviceView read_hits_index, const uint32 n_reads, uint32* error) { const uint32 read_index = threadIdx.x; if (read_index >= n_reads) return; // fetch the hits bound to this read ReadHitsIndexDeviceView::reference hit_indices( read_hits_index, read_index ); // bound 2 hits to this read hit_indices.resize( 2u ); hit_indices[0] = read_index * 2u + 0u; hit_indices[1] = read_index * 2u + 1u; } // // Check the contents of a ReadHitsIndex object through a ReadHitsIndexDeviceView. // Implicitly also testing ReadHitsIndexDeviceView::reference. // __global__ void check_read_hits_index_kernel(ReadHitsIndexDeviceView read_hits_index, const uint32 n_reads, uint32* error) { const uint32 read_index = threadIdx.x; if (read_index >= n_reads) return; // fetch the hits bound to this read ReadHitsIndexDeviceView::reference hit_indices( read_hits_index, read_index ); // make sure the number of hits bound to this read match our expectations if (hit_indices.size() != 2u) { *error = 1; return; } // make sure their values match our expectations if ((hit_indices[0] != read_index * 2u + 0u) || (hit_indices[1] != read_index * 2u + 1u)) { *error = 2; return; } // make sure direct indexing work if ((hit_indices[0] != read_hits_index( read_index, 0u )) || (hit_indices[1] != read_hits_index( read_index, 1u ))) { *error = 3; return; } } #pragma hd_warning_disable template <typename HitQueuesType> NVBIO_HOST_DEVICE void set_hit(HitReference<HitQueuesType> hit, const uint32 read_id) { hit.read_id = read_id; hit.ssa = 0; hit.loc = 1; hit.score = 2; hit.sink = 3; hit.opposite_loc = 4; hit.opposite_score = 5; hit.opposite_sink = 6; } #pragma hd_warning_disable template <typename HitQueuesType> NVBIO_HOST_DEVICE bool check_hit(HitReference<HitQueuesType> hit, const uint32 read_id) { return (hit.read_id != read_id || hit.ssa != 0 || hit.loc != 1 || hit.score != 2 || hit.sink != 3 || hit.opposite_loc != 4 || hit.opposite_score != 5 || hit.opposite_sink != 6); } // // Setup the contents of a HitQueues object through a HitQueuesDeviceView. // Implicitly also testing HitReference. // __global__ void setup_hit_queues_kernel(HitQueuesDeviceView hit_queues, const uint32 n_hits, uint32* error) { const uint32 hit_index = threadIdx.x; if (hit_index >= n_hits) return; // take a reference to this hit HitReference<HitQueuesDeviceView> hit = hit_queues[ hit_index ]; // set it up set_hit( hit, hit_index ); // assign a unique index as the read_id, for testing purposes } // // Check the contents of a HitQueues object through a HitQueuesDeviceView. // Implicitly also testing HitReference. // __global__ void check_hit_queues_kernel(HitQueuesDeviceView hit_queues, const uint32 n_hits, uint32* error) { const uint32 hit_index = threadIdx.x; if (hit_index >= n_hits) return; // take a reference to this hit HitReference<HitQueuesDeviceView> hit = hit_queues[ hit_index ]; // check that its values match our expectations if (check_hit( hit, hit_index )) *error = 1; } // // Test the ReadHitsReference object. // At this point the read_hits_index and the hit_queues have been already setup, and we just need to make sure // we can dereference them correctly through ReadHitsReference proxies. // __global__ void test_read_hits_ref_kernel(ScoringQueuesDeviceView queues, const uint32 n_reads, uint32* error) { const uint32 read_index = threadIdx.x; if (read_index >= n_reads) return; typedef ReadHitsReference<ScoringQueuesDeviceView> read_hits_reference; typedef typename read_hits_reference::reference hit_reference; // create a reference to the sequence of hits bound to this read read_hits_reference read_hits( queues, read_index ); //read_hits.set_read_info( packed_read( read_index ) ); // check that we have 2 hits for each read if (read_hits.size() != 2u) { *error = 1; return; } // make sure the hits bound to this read match our expectations { // examine the first hit hit_reference hit = read_hits[0]; if (check_hit( hit, read_index*2u + 0u )) { *error = 2; return; } } { // examine the second hit hit_reference hit = read_hits[1]; if (check_hit( hit, read_index*2u + 1u )) { *error = 3; return; } } } // // Test emission of new hits using a ReadHitsBinder // __global__ void test_read_binder_kernel(ScoringQueuesDeviceView queues, const uint32 n_reads, uint32* error) { const uint32 read_index = threadIdx.x; if (read_index >= n_reads) return; typedef ReadHitsReference<ScoringQueuesDeviceView> read_hits_reference; typedef ReadHitsBinder<ScoringQueuesDeviceView> read_hits_binder; typedef typename read_hits_reference::reference hit_reference; // create a hit binder for this read packed_read src_read_info = queues.active_read( read_index ); read_hits_binder dst_read_hits( queues ); // drop all odd reads if ((read_index & 1) == 1) return; // get a new slot for the read const uint32 dst_slot = atomicAdd( queues.active_reads.out_size, 1u ); // bind the read to its new location in the output queue dst_read_hits.bind( dst_slot ); // copy from parent dst_read_hits.set_read_info( src_read_info ); // set the number of hits dst_read_hits.resize( 1u ); // get a new slot for a hit const uint32 hit_slot = atomicAdd( queues.hits_pool, 1u ); // bind the hit dst_read_hits.bind_hit( 0u, hit_slot ); // and set it set_hit( dst_read_hits[0], read_index ); } } // anonymous namespace void test_scoring_queues() { const uint32 n_hits = 100; const uint32 n_reads = 50; const uint32 n_hits_per_read = 2; ScoringQueues queues; queues.resize( n_reads, n_hits, true ); // test ReadHitsIndex interfaces ReadHitsIndex& read_hits_index = queues.hits_index; { log_info( stderr, "test ReadHitsIndex... started\n" ); read_hits_index.setup( n_hits_per_read, n_reads ); thrust::device_vector<uint32> error(1,0); setup_read_hits_index_kernel<<<1,128>>>( read_hits_index.device_view(), n_reads, device_view( error ) ); check_read_hits_index_kernel<<<1,128>>>( read_hits_index.device_view(), n_reads, device_view( error ) ); cudaThreadSynchronize(); const uint32 error_code = error[0]; if (error_code) { log_error( stderr, "test ReadHitsIndex... failed! (error code %u)\n", error_code ); exit(1); } log_info( stderr, "test ReadHitsIndex... done\n" ); } // test HitQueues interfaces HitQueues& hit_queues = queues.hits; { log_info( stderr, "test HitQueues... started\n" ); // check host-side access for (uint32 i = 0; i < 10; ++i) { HitReference<HitQueues> hit( hit_queues, i ); set_hit( hit, i ); } for (uint32 i = 0; i < 10; ++i) { HitReference<HitQueues> hit( hit_queues, i ); if (check_hit( hit, i )) { log_error( stderr, "test HitQueues... failed! (host-side referencing)\n" ); exit(1); } } thrust::device_vector<uint32> error(1,0); setup_hit_queues_kernel<<<1,128>>>( hit_queues.device_view(), n_hits, device_view( error ) ); check_hit_queues_kernel<<<1,128>>>( hit_queues.device_view(), n_hits, device_view( error ) ); cudaThreadSynchronize(); const uint32 error_code = error[0]; if (error_code) { log_error( stderr, "test HitQueues... failed! (error code %u)\n", error_code ); exit(1); } log_info( stderr, "test HitQueues... done\n" ); } // test ReadHitsReference object { log_info( stderr, "test ReadHitsReference... started\n" ); thrust::device_vector<uint32> error(1,0); test_read_hits_ref_kernel<<<1,128>>>( queues.device_view(), n_reads, device_view( error ) ); cudaThreadSynchronize(); const uint32 error_code = error[0]; if (error_code) { log_error( stderr, "test ReadHitsReference... failed! (error code %u)\n", error_code ); exit(1); } log_info( stderr, "test ReadHitsReference... done\n" ); } // test ReadHitsBinder object { log_info( stderr, "test ReadHitsBinder... started\n" ); thrust::device_vector<uint32> error(1,0); // clear output queues queues.clear_output(); test_read_binder_kernel<<<1,128>>>( queues.device_view(), n_reads, device_view( error ) ); cudaThreadSynchronize(); const uint32 error_code = error[0]; if (error_code) { log_error( stderr, "test ReadHitsBinder... failed! (error code %u)\n", error_code ); exit(1); } log_info( stderr, "test ReadHitsBinder... done\n" ); } } } // namespace cuda } // namespace bowtie2 } // namespace nvbio
the_stack
FusedOp::FusedOp(FFModel& model, Op* op) : Op(model, OP_FUSED, op->name, 0) { numInputs = op->numInputs; for (int i = 0; i < numInputs; i++) { inputs[i] = op->inputs[i]; input_lps[i] = op->input_lps[i]; input_grad_lps[i] = op->input_grad_lps[i]; } numWeights = op->numWeights; for (int i = 0; i < numWeights; i++) { weights[i] = op->weights[i]; weights[i].owner_op = this; weights[i].owner_idx = i; } numOutputs = op->numOutputs; for (int i = 0; i < numOutputs; i++) { outputs[i] = op->outputs[i]; outputs[i].owner_op = this; outputs[i].owner_idx = i; } numOperators = 1; op_num_inputs[0] = numInputs; op_num_weights[0] = numWeights; op_num_outputs[0] = numOutputs; op_op_type[0] = op->op_type; operators[0] = op; for (int i = 0; i < numInputs; i++) { op_input_source[i] = SOURCE_INPUT; op_input_idx[i] = i; } for (int i = 0; i < numWeights; i++) { op_weight_source[i] = SOURCE_WEIGHT; op_weight_idx[i] = i; } for (int i = 0; i < numOutputs; i++) { op_output_source[i] = SOURCE_OUTPUT; op_output_idx[i] = i; } task_is = op->task_is; } bool FusedOp::add_operator(FFModel& model, Op* op) { Context ctx = model.config.lg_ctx; Runtime* runtime = model.config.lg_hlr; // Currently assume fusion optimization is performed // after create_weights and create_outputs // So task_is and op->task_is are not empty Domain my_domain = runtime->get_index_space_domain(ctx, task_is); Domain op_domain = runtime->get_index_space_domain(ctx, op->task_is); ParallelConfig my_config, op_config; assert(model.config.find_parallel_config(my_domain.get_dim(), name, my_config)); assert(model.config.find_parallel_config(op_domain.get_dim(), op->name, op_config)); if (my_config == op_config) { // Do nothing } else { return false; } int input_offset = 0, weight_offset = 0, output_offset = 0; for (int i = 0; i < numOperators; i++) { input_offset += op_num_inputs[i]; weight_offset += op_num_weights[i]; output_offset += op_num_outputs[i]; } if ((input_offset + op->numInputs > MAX_NUM_FUSED_TENSORS) || (weight_offset + op->numWeights > MAX_NUM_FUSED_TENSORS) || (output_offset + op->numOutputs > MAX_NUM_FUSED_TENSORS)) { fprintf(stderr, "Cannot fuse. Consider increase MAX_NUM_FUSED_TENSORS\n"); return false; } if (numOperators + 1 > MAX_NUM_FUSED_OPERATORS) { fprintf(stderr, "Reach to the fusion limit. Consider increase MAX_NUM_FUSED_OPERATORS"); return false; } // Set inputs for (int i = 0; i < op->numInputs; i++) { bool found = false; for (int j = 0; j < input_offset; j++) if (inputs[j].region == op->inputs[i].region) { // This input is one of my inputs assert(!found); assert(inputs[j].region != LogicalRegion::NO_REGION); op_input_source[input_offset + i] = SOURCE_INPUT; op_input_idx[input_offset + i] = j; found = true; break; } for (int j = 0; j < output_offset; j++) if ((outputs[j].region == op->inputs[i].region)&&(!found)) { // This input is one of my outputs assert(!found); assert(outputs[j].region != LogicalRegion::NO_REGION); op_input_source[input_offset + i] = SOURCE_OUTPUT; op_input_idx[input_offset + i] = j; found = true; break; } if (found) { // Do nothing } else { inputs[numInputs] = op->inputs[i]; input_lps[numInputs] = op->input_lps[i]; input_grad_lps[numInputs] = op->input_grad_lps[i]; op_input_source[input_offset+i] = SOURCE_INPUT; op_input_idx[input_offset+i] = numInputs; numInputs += 1; } } // Set weights for (int i = 0; i < op->numWeights; i++) { bool found = false; for (int j = 0; j < numWeights; j++) if (weights[j].region == op->weights[i].region) { assert(!found); assert(weights[j].region != LogicalRegion::NO_REGION); op_weight_source[weight_offset + i] = SOURCE_WEIGHT; op_weight_idx[weight_offset + i] = j; found = true; break; } if (found) { // Do nothing } else { weights[numWeights] = op->weights[i]; weights[numWeights].owner_op = this; weights[numWeights].owner_idx = numWeights; op_weight_source[weight_offset+i] = SOURCE_WEIGHT; op_weight_idx[weight_offset+i] = numWeights; numWeights += 1; } } // Set outputs for (int i = 0; i < op->numOutputs; i++) { bool found = false; for (int j = 0; j < numOutputs; j++) { if (outputs[j].region == op->outputs[i].region) { assert(!found); found = true; op_output_source[output_offset+i] = SOURCE_OUTPUT; op_output_idx[output_offset+i] = j; } } if (found) continue; outputs[numOutputs] = op->outputs[i]; outputs[numOutputs].owner_op = this; outputs[numOutputs].owner_idx = numOutputs; op_output_source[output_offset+i] = SOURCE_OUTPUT; op_output_idx[output_offset+i] = numOutputs; numOutputs += 1; } assert(op->numInputs > 0); assert(op->numWeights >= 0); assert(op->numOutputs > 0); op_num_inputs[numOperators] = op->numInputs; op_num_weights[numOperators] = op->numWeights; op_num_outputs[numOperators] = op->numOutputs; op_op_type[numOperators] = op->op_type; operators[numOperators] = op; numOperators += 1; assert(numOperators <= MAX_NUM_FUSED_OPERATORS); if (numInputs > MAX_NUM_INPUTS) { fprintf(stderr, "Reach to the #inputs limit during fusion.\n" "Consider increase MAX_NUM_INPUTS to allow more fusions.\n"); return false; } if (numWeights > MAX_NUM_WEIGHTS) { fprintf(stderr, "Reach to the #weights limit during fusion.\n" "Consider increase MAX_NUM_WEIGHTS to allow more fusions.\n"); return false; } if (numOutputs > MAX_NUM_OUTPUTS) { fprintf(stderr, "Reach to the #outputs limit during fusion.\n" "Consider increase MAX_NUM_OUTPUTS to allow more fusions.\n"); } return true; } void FusedOp::create_weights(FFModel& model) { assert(false && "Weights should be created before fusion optimizations"); } void FusedOp::create_output_and_partition(FFModel& model) { assert(false && "Outputs should be created before fusion optimizations"); } OpMeta* FusedOp::init_task(const Task *task, const std::vector<PhysicalRegion> &regions, Context ctx, Runtime *runtime) { const FusedOp* fused = (FusedOp*) task->args; const FusedOpMeta* metas = (FusedOpMeta*) task->local_args; FusedOpMeta* local_meta = new FusedOpMeta(); memcpy(local_meta, metas, sizeof(FusedOpMeta)); local_meta->fused_op = (FusedOp*) malloc(sizeof(FusedOp)); memcpy(local_meta->fused_op, fused, sizeof(FusedOp)); return ((OpMeta*)local_meta); } void FusedOp::init(const FFModel& ff) { ArgumentMap argmap; Context ctx = ff.config.lg_ctx; Runtime* runtime = ff.config.lg_hlr; // Call init methods in individual operators Domain domain = runtime->get_index_space_domain(ctx, task_is); for (int i = 0; i < numOperators; i++) { operators[i]->init(ff); for (size_t j = 0; j < domain.get_volume(); j++) fused_meta[j].meta[i] = operators[i]->meta[j]; } for (size_t j = 0; j < domain.get_volume(); j++) fused_meta[j].numOperators = numOperators; switch (domain.get_dim()) { #define DIMFUNC(DIM) \ case DIM: \ { \ Rect<DIM> rect = domain; \ int idx = 0; \ for (PointInRectIterator<DIM> it(rect); it(); it++) { \ argmap.set_point(*it, TaskArgument(&fused_meta[idx++], sizeof(FusedOpMeta))); \ } \ break; \ } LEGION_FOREACH_N(DIMFUNC) #undef DIMFUNC default: assert(false); } IndexLauncher launcher(FUSEDOP_INIT_TASK_ID, task_is, TaskArgument(this, sizeof(FusedOp)), argmap, Predicate::TRUE_PRED, false/*must*/, 0/*mapper_id*/, FFConfig::get_hash_id(std::string(name))); FutureMap fm = runtime->execute_index_space(ctx, launcher); fm.wait_all_results(); switch (domain.get_dim()) { #define DIMFUNC(DIM) \ case DIM: \ { \ Rect<DIM> rect = domain; \ int idx = 0; \ for (PointInRectIterator<DIM> it(rect); it(); it++) { \ meta[idx++] = fm.get_result<OpMeta*>(*it); \ } \ break; \ } LEGION_FOREACH_N(DIMFUNC) #undef DIMFUNC default: assert(false); } } /* regions[...](I): inputs regions[...](I): weights regions[...](I): outputs */ __host__ void FusedOp::forward_task(const Task* task, const std::vector<PhysicalRegion> &regions, Context ctx, Runtime* runtime) { //const FusedOp* fused = (FusedOp*) task->args; const FusedOpMeta* metas = *((FusedOpMeta**) task->local_args); const FusedOp* fused = metas->fused_op; assert(metas->numOperators == fused->numOperators); assert(regions.size() == task->regions.size()); assert((int)regions.size() == fused->numInputs+fused->numWeights+fused->numOutputs); Domain input_domain[MAX_NUM_INPUTS]; Domain weight_domain[MAX_NUM_WEIGHTS]; Domain output_domain[MAX_NUM_OUTPUTS]; const float* input_ptr[MAX_NUM_INPUTS]; const float* weight_ptr[MAX_NUM_WEIGHTS]; float* output_ptr[MAX_NUM_OUTPUTS]; assert(fused->numInputs <= MAX_NUM_INPUTS); for (int i = 0; i < fused->numInputs; i++) { input_domain[i] = runtime->get_index_space_domain( ctx, task->regions[i].region.get_index_space()); input_ptr[i] = helperGetTensorPointerRO<float>( regions[i], task->regions[i], FID_DATA, ctx, runtime); } int roff = fused->numInputs; assert(fused->numWeights <= MAX_NUM_WEIGHTS); for (int i = 0; i < fused->numWeights; i++) { weight_domain[i] = runtime->get_index_space_domain( ctx, task->regions[i+roff].region.get_index_space()); weight_ptr[i] = helperGetTensorPointerRO<float>( regions[i+roff], task->regions[i+roff], FID_DATA, ctx, runtime); } roff += fused->numWeights; assert(fused->numOutputs <= MAX_NUM_OUTPUTS); for (int i = 0; i < fused->numOutputs; i++) { output_domain[i] = runtime->get_index_space_domain( ctx, task->regions[i+roff].region.get_index_space()); output_ptr[i] = helperGetTensorPointerWO<float>( regions[i+roff], task->regions[i+roff], FID_DATA, ctx, runtime); } // Assert that all meta share the same dnn/blas handler int start = 0; for (start = 0; start < fused->numOperators; start++) if (metas->meta[start] != NULL) break; for (int op = start+1; op < fused->numOperators; op++) if (metas->meta[op] != NULL) { assert(metas->meta[start]->handle.blas == metas->meta[op]->handle.blas); assert(metas->meta[start]->handle.dnn == metas->meta[op]->handle.dnn); } cudaStream_t stream; if (start < fused->numOperators) { checkCUDA(get_legion_stream(&stream)); } int ioff = 0, woff = 0, ooff = 0; for (int op = 0; op < fused->numOperators; op++) { Domain my_id[MAX_NUM_INPUTS]; Domain my_wd[MAX_NUM_WEIGHTS]; Domain my_od[MAX_NUM_OUTPUTS]; const float* my_ip[MAX_NUM_INPUTS]; const float* my_wp[MAX_NUM_WEIGHTS]; float* my_op[MAX_NUM_OUTPUTS]; for (int i = 0; i < fused->op_num_inputs[op]; i++) { int my_off = fused->op_input_idx[i+ioff]; if (fused->op_input_source[i+ioff] == SOURCE_INPUT) { my_id[i] = input_domain[my_off]; my_ip[i] = input_ptr[my_off]; } else if (fused->op_input_source[i+ioff] == SOURCE_OUTPUT) { my_id[i] = output_domain[my_off]; my_ip[i] = output_ptr[my_off]; } else assert(false); } for (int i = 0; i < fused->op_num_weights[op]; i++) { assert(fused->op_weight_source[i+woff] == SOURCE_WEIGHT); my_wd[i] = weight_domain[fused->op_weight_idx[i+woff]]; my_wp[i] = weight_ptr[fused->op_weight_idx[i+woff]]; } for (int i = 0; i < fused->op_num_outputs[op]; i++) { assert(fused->op_output_source[i+ooff] == SOURCE_OUTPUT); my_od[i] = output_domain[fused->op_output_idx[i+ooff]]; my_op[i] = output_ptr[fused->op_output_idx[i+ooff]]; } switch(fused->op_op_type[op]) { case OP_CONCAT: { assert(fused->op_num_weights[op] == 0); assert(fused->op_num_outputs[op] == 1); ConcatMeta* m = (ConcatMeta*) metas->meta[op]; int num_inputs = fused->op_num_inputs[op]; Concat::forward_kernel(my_op[0], my_ip, num_inputs, m->axis, my_od[0], my_id, stream); break; } case OP_CONV2D: { assert(fused->op_num_inputs[op] == 1); assert(fused->op_num_outputs[op] == 1); assert(my_id[0].get_dim() == 4); assert(my_wd[0].get_dim() == 4); assert(my_od[0].get_dim() == 4); Conv2DMeta* m = (Conv2DMeta*) metas->meta[op]; Conv2D::forward_kernel(m, my_ip[0], my_op[0], my_wp[0], my_wp[1], stream); break; } case OP_BATCHNORM: { assert(fused->op_num_inputs[op] == 1); assert(fused->op_num_outputs[op] == 1); assert(my_id[0].get_dim() == 4); assert(my_od[0].get_dim() == 4); assert(my_wd[0].get_dim() == 1); assert(my_wd[1].get_dim() == 1); BatchNormMeta* m = (BatchNormMeta*) metas->meta[op]; BatchNorm::forward_kernel(m, my_ip[0], my_op[0], my_wp[0], my_wp[1], stream); break; } case OP_DROPOUT: { assert(fused->op_num_inputs[op] == 1); assert(fused->op_num_outputs[op] == 1); DropoutMeta* m = (DropoutMeta*) metas->meta[op]; Dropout::forward_kernel(m, my_ip[0], my_op[0], stream); break; } case OP_LINEAR: { assert(fused->op_num_inputs[op] == 1); assert(fused->op_num_weights[op] == 2); assert(fused->op_num_outputs[op] == 1); Rect<2> kernel_rect = my_wd[0]; int in_dim = kernel_rect.hi[0] - kernel_rect.lo[0] + 1; int out_dim = kernel_rect.hi[1] - kernel_rect.lo[1] + 1; int batch_size = my_id[0].get_volume() / in_dim; assert(my_od[0].get_volume() == out_dim * batch_size); assert(my_id[0].get_volume() == in_dim * batch_size); assert(my_wd[1].get_volume() == out_dim); LinearMeta* m = (LinearMeta*) metas->meta[op]; Linear::forward_kernel(m, my_ip[0], my_op[0], my_wp[0], my_wp[1], in_dim, out_dim, batch_size, stream); break; } case OP_BATCHMATMUL: { assert(fused->op_num_inputs[op] == 2); assert(fused->op_num_weights[op] == 0); assert(fused->op_num_outputs[op] == 1); Domain out_domain = my_od[0]; Domain a_domain = my_id[0]; Domain b_domain = my_id[1]; int m = b_domain.hi()[0] - b_domain.lo()[0] + 1; assert(m == out_domain.hi()[0] - out_domain.lo()[0] + 1); int n = a_domain.hi()[1] - a_domain.lo()[1] + 1; assert(n == out_domain.hi()[1] - out_domain.lo()[1] + 1); int k = a_domain.hi()[0] - a_domain.lo()[0] + 1; assert(k == b_domain.hi()[1] - b_domain.lo()[1] + 1); assert(a_domain.get_dim() == b_domain.get_dim()); assert(a_domain.get_dim() == out_domain.get_dim()); int batch = 1; for (int i = 2; i < a_domain.get_dim(); i++) { int dim_size = a_domain.hi()[i] - a_domain.lo()[i] + 1; assert(dim_size == b_domain.hi()[i] - b_domain.lo()[i] + 1); assert(dim_size == out_domain.hi()[i] - out_domain.lo()[i] + 1); batch *= dim_size; } BatchMatmulMeta* meta = (BatchMatmulMeta*) metas->meta[op]; BatchMatmul::forward_kernel(meta, my_op[0], my_ip[0], my_ip[1], NULL, m, n, k, batch, stream, meta->a_seq_length_dim, meta->b_seq_length_dim, fused->iter_config.seq_length); break; } case OP_EW_ADD: case OP_EW_SUB: case OP_EW_MUL: case OP_EW_DIV: { assert(fused->op_num_inputs[op] == 2); assert(fused->op_num_weights[op] == 0); assert(fused->op_num_outputs[op] == 1); assert(my_id[0] == my_id[1]); assert(my_id[0] == my_od[0]); ElementBinaryMeta* m = (ElementBinaryMeta*) metas->meta[op]; ElementBinary::forward_kernel(m, my_ip[0], my_ip[1], my_op[0], stream); break; } case OP_RELU: case OP_SIGMOID: case OP_TANH: case OP_ELU: { assert(fused->op_num_inputs[op] == 1); assert(fused->op_num_weights[op] == 0); assert(fused->op_num_outputs[op] == 1); assert(my_id[0] == my_od[0]); ElementUnaryMeta* m = (ElementUnaryMeta*) metas->meta[op]; ElementUnary::forward_kernel(m, my_ip[0], my_op[0], my_id[0].get_volume(), stream); break; } case OP_POOL2D: { assert(fused->op_num_inputs[op] == 1); assert(fused->op_num_weights[op] == 0); assert(fused->op_num_outputs[op] == 1); //assert(my_id[0] == my_od[0]); Pool2DMeta* m = (Pool2DMeta*) metas->meta[op]; Pool2D::forward_kernel(m, my_ip[0], my_op[0], stream); break; } case OP_FLAT: { assert(fused->op_num_inputs[op] == 1); assert(fused->op_num_weights[op] == 0); assert(fused->op_num_outputs[op] == 1); assert(my_id[0].get_volume() == my_od[0].get_volume()); Flat::forward_kernel(my_ip[0], my_op[0], my_id[0].get_volume(), stream); break; } case OP_RESHAPE: { assert(fused->op_num_inputs[op] == 1); assert(fused->op_num_weights[op] == 0); assert(fused->op_num_outputs[op] == 1); assert(my_id[0].get_volume() == my_od[0].get_volume()); Reshape::forward_kernel(my_ip[0], my_op[0], my_id[0].get_volume(), stream); break; } case OP_TRANSPOSE: { assert(fused->op_num_inputs[op] == 1); assert(fused->op_num_weights[op] == 0); assert(fused->op_num_outputs[op] == 1); assert(my_id[0].get_volume() == my_od[0].get_volume()); TransposeMeta* m = (TransposeMeta*) metas->meta[op]; Transpose::forward_kernel(m, my_ip[0], my_op[0], my_id[0], my_od[0], stream); break; } default: { fprintf(stderr, "Fusion currently does not support type = %d\n", fused->op_op_type[op]); assert(false && "Fusion currently does not support type"); } } ioff += fused->op_num_inputs[op]; woff += fused->op_num_weights[op]; ooff += fused->op_num_outputs[op]; } //for (int i = 0; i < fused->numOutputs; i++) // print_tensor<float>(output_ptr[i], output_domain[i].get_volume(), "[Fused:forward:output]"); } void FusedOp::forward(const FFModel& ff) { // Set iter_config iter_config = ff.iter_config; ArgumentMap argmap; Context ctx = ff.config.lg_ctx; Runtime* runtime = ff.config.lg_hlr; Domain domain = runtime->get_index_space_domain(ctx, task_is); switch (domain.get_dim()) { #define DIMFUNC(DIM) \ case DIM: \ { \ Rect<DIM> rect = domain; \ int idx = 0; \ for (PointInRectIterator<DIM> it(rect); it(); it++) { \ OpMeta* mp = meta[idx++]; \ argmap.set_point(*it, TaskArgument(&mp, sizeof(OpMeta*))); \ } \ break; \ } LEGION_FOREACH_N(DIMFUNC) #undef DIMFUNC default: assert(false); } IndexLauncher launcher(FUSEDOP_FWD_TASK_ID, task_is, TaskArgument(NULL, 0), argmap, Predicate::TRUE_PRED, false/*must*/, 0/*mapper_id*/, FFConfig::get_hash_id(std::string(name))); int offset = 0; for (int i = 0; i < numInputs; i++) { assert(input_lps[i] != LogicalPartition::NO_PART); assert(inputs[i].region != LogicalRegion::NO_REGION); launcher.add_region_requirement( RegionRequirement(input_lps[i], 0/*projection id*/, READ_ONLY, EXCLUSIVE, inputs[i].region)); launcher.add_field(offset+i, FID_DATA); } offset += numInputs; for (int i = 0; i < numWeights; i++) { assert(weights[i].region != LogicalRegion::NO_REGION); launcher.add_region_requirement( RegionRequirement(weights[i].part, 0/*projection id*/, READ_ONLY, EXCLUSIVE, weights[i].region)); launcher.add_field(offset+i, FID_DATA); } offset += numWeights; for (int i = 0; i < numOutputs; i++) { assert(outputs[i].region != LogicalRegion::NO_REGION); launcher.add_region_requirement( RegionRequirement(outputs[i].part, 0/*projection id*/, WRITE_ONLY, EXCLUSIVE, outputs[i].region)); launcher.add_field(offset+i, FID_DATA); } runtime->execute_index_space(ctx, launcher); } /* regions[...](I): input regions[...](I): weight regions[...](I): output regions[...](I/O): input_grad regions[...](I/O): weight_grad regions[...](I/O): output_grad */ __host__ void FusedOp::backward_task(const Task* task, const std::vector<PhysicalRegion> &regions, Context ctx, Runtime* runtime) { // const FusedOp* fused = (FusedOp*) task->args; const FusedOpMeta* metas = *((FusedOpMeta**) task->local_args); const FusedOp* fused = metas->fused_op; assert(metas->numOperators == fused->numOperators); assert(regions.size() == task->regions.size()); { int sum = fused->numInputs + fused->numWeights + fused->numOutputs; assert(sum*2 == (int)regions.size()); } Domain input_domain[MAX_NUM_INPUTS], input_grad_domain[MAX_NUM_INPUTS]; Domain weight_domain[MAX_NUM_WEIGHTS], weight_grad_domain[MAX_NUM_WEIGHTS]; Domain output_domain[MAX_NUM_OUTPUTS], output_grad_domain[MAX_NUM_OUTPUTS]; const float* input_ptr[MAX_NUM_INPUTS]; float* input_grad_ptr[MAX_NUM_INPUTS]; const float* weight_ptr[MAX_NUM_WEIGHTS]; float* weight_grad_ptr[MAX_NUM_WEIGHTS]; const float* output_ptr[MAX_NUM_OUTPUTS]; float* output_grad_ptr[MAX_NUM_OUTPUTS]; int roff = 0; assert(fused->numInputs <= MAX_NUM_INPUTS); for (int i = 0; i < fused->numInputs; i++) { input_domain[i] = runtime->get_index_space_domain( ctx, task->regions[i].region.get_index_space()); input_ptr[i] = helperGetTensorPointerRO<float>( regions[i], task->regions[i], FID_DATA, ctx, runtime); } roff += fused->numInputs; assert(fused->numWeights <= MAX_NUM_WEIGHTS); for (int i = 0; i < fused->numWeights; i++) { weight_domain[i] = runtime->get_index_space_domain( ctx, task->regions[i+roff].region.get_index_space()); weight_ptr[i] = helperGetTensorPointerRO<float>( regions[i+roff], task->regions[i+roff], FID_DATA, ctx, runtime); } roff += fused->numWeights; assert(fused->numOutputs <= MAX_NUM_OUTPUTS); for (int i = 0; i < fused->numOutputs; i++) { output_domain[i] = runtime->get_index_space_domain( ctx, task->regions[i+roff].region.get_index_space()); output_ptr[i] = helperGetTensorPointerRO<float>( regions[i+roff], task->regions[i+roff], FID_DATA, ctx, runtime); } roff += fused->numOutputs; for (int i = 0; i < fused->numInputs; i++) { input_grad_domain[i] = runtime->get_index_space_domain( ctx, task->regions[i+roff].region.get_index_space()); input_grad_ptr[i] = helperGetTensorPointerRW<float>( regions[i+roff], task->regions[i+roff], FID_DATA, ctx, runtime); assert(input_grad_domain[i] == input_domain[i]); } roff += fused->numInputs; for (int i = 0; i < fused->numWeights; i++) { weight_grad_domain[i] = runtime->get_index_space_domain( ctx, task->regions[i+roff].region.get_index_space()); weight_grad_ptr[i] = helperGetTensorPointerRW<float>( regions[i+roff], task->regions[i+roff], FID_DATA, ctx, runtime); assert(weight_grad_domain[i].get_volume() == weight_domain[i].get_volume()); } roff += fused->numWeights; for (int i = 0; i < fused->numOutputs; i++) { output_grad_domain[i] = runtime->get_index_space_domain( ctx, task->regions[i+roff].region.get_index_space()); output_grad_ptr[i] = helperGetTensorPointerRW<float>( regions[i+roff], task->regions[i+roff], FID_DATA, ctx, runtime); assert(output_grad_domain[i] == output_domain[i]); } roff += fused->numOutputs; // Assert that all meta share the same dnn/blas handler int start = 0; for (start = 0; start < fused->numOperators; start++) if (metas->meta[start] != NULL) break; for (int op = start+1; op < fused->numOperators; op++) if (metas->meta[op] != NULL) { assert(metas->meta[start]->handle.blas == metas->meta[op]->handle.blas); assert(metas->meta[start]->handle.dnn == metas->meta[op]->handle.dnn); } cudaStream_t stream; checkCUDA(get_legion_stream(&stream)); int ioff = 0, woff = 0, ooff = 0; Domain my_id[MAX_NUM_INPUTS], my_grad_id[MAX_NUM_INPUTS]; Domain my_wd[MAX_NUM_WEIGHTS], my_grad_wd[MAX_NUM_WEIGHTS]; Domain my_od[MAX_NUM_OUTPUTS], my_grad_od[MAX_NUM_OUTPUTS]; const float* my_ip[MAX_NUM_INPUTS]; const float* my_wp[MAX_NUM_WEIGHTS]; const float* my_op[MAX_NUM_OUTPUTS]; float* my_grad_ip[MAX_NUM_INPUTS]; float* my_grad_wp[MAX_NUM_WEIGHTS]; float* my_grad_op[MAX_NUM_OUTPUTS]; // Do backpropagation in the reverse ordering for (int op = 0; op < fused->numOperators; op++) { ioff += fused->op_num_inputs[op]; woff += fused->op_num_weights[op]; ooff += fused->op_num_outputs[op]; } for (int op = fused->numOperators-1; op >= 0; op--) { ioff -= fused->op_num_inputs[op]; woff -= fused->op_num_weights[op]; ooff -= fused->op_num_outputs[op]; for (int i = 0; i < fused->op_num_inputs[op]; i++) { int my_off = fused->op_input_idx[i+ioff]; if (fused->op_input_source[i+ioff] == SOURCE_INPUT) { my_id[i] = input_domain[my_off]; my_ip[i] = input_ptr[my_off]; my_grad_id[i] = input_grad_domain[my_off]; my_grad_ip[i] = input_grad_ptr[my_off]; assert(my_grad_id[i] == my_id[i]); } else if (fused->op_input_source[i+ioff] == SOURCE_OUTPUT) { my_id[i] = output_domain[my_off]; my_ip[i] = output_ptr[my_off]; my_grad_id[i] = output_grad_domain[my_off]; my_grad_ip[i] = output_grad_ptr[my_off]; assert(my_grad_id[i] == my_id[i]); } else assert(false); } for (int i = 0; i < fused->op_num_weights[op]; i++) { assert(fused->op_weight_source[i+woff] == SOURCE_WEIGHT); my_wd[i] = weight_domain[fused->op_weight_idx[i+woff]]; my_wp[i] = weight_ptr[fused->op_weight_idx[i+woff]]; my_grad_wd[i] = weight_grad_domain[fused->op_weight_idx[i+woff]]; my_grad_wp[i] = weight_grad_ptr[fused->op_weight_idx[i+woff]]; assert(my_grad_wd[i].get_volume() == my_wd[i].get_volume()); } for (int i = 0; i < fused->op_num_outputs[op]; i++) { assert(fused->op_output_source[i+ooff] == SOURCE_OUTPUT); my_od[i] = output_domain[fused->op_output_idx[i+ooff]]; my_op[i] = output_ptr[fused->op_output_idx[i+ooff]]; my_grad_od[i] = output_grad_domain[fused->op_output_idx[i+ooff]]; my_grad_op[i] = output_grad_ptr[fused->op_output_idx[i+ooff]]; assert(my_grad_od[i] == my_od[i]); } switch (fused->op_op_type[op]) { case OP_CONCAT: { assert(fused->op_num_weights[op] == 0); assert(fused->op_num_outputs[op] == 1); ConcatMeta* m = (ConcatMeta*) metas->meta[op]; int num_inputs = fused->op_num_inputs[op]; Concat::backward_kernel(my_grad_op[0], my_grad_ip, num_inputs, m->axis, my_grad_od[0], my_grad_id, stream); break; } case OP_CONV2D: { assert(fused->op_num_inputs[op] == 1); assert(fused->op_num_outputs[op] == 1); assert(my_id[0].get_dim() == 4); assert(my_wd[0].get_dim() == 4); assert(my_od[0].get_dim() == 4); Conv2DMeta* m = (Conv2DMeta*) metas->meta[op]; Conv2D::backward_kernel(m, my_ip[0], my_grad_ip[0], my_op[0], my_grad_op[0], my_wp[0], my_grad_wp[0], my_grad_wp[1], stream); break; } case OP_BATCHNORM: { assert(fused->op_num_inputs[op] == 1); assert(fused->op_num_outputs[op] == 1); assert(my_id[0].get_dim() == 4); assert(my_wd[0].get_dim() == 1); assert(my_wd[1].get_dim() == 1); assert(my_od[0].get_dim() == 4); BatchNormMeta* m = (BatchNormMeta*) metas->meta[op]; BatchNorm::backward_kernel(m, my_ip[0], my_grad_op[0], my_op[0], my_grad_ip[0], my_wp[0], my_grad_wp[0], my_grad_wp[1], my_od[0].get_volume(), stream); break; } case OP_DROPOUT: { assert(fused->op_num_inputs[op] == 1); assert(fused->op_num_outputs[op] == 1); DropoutMeta* m = (DropoutMeta*) metas->meta[op]; Dropout::backward_kernel(m, my_grad_op[0], my_grad_ip[0], stream); break; } case OP_LINEAR: { assert(fused->op_num_inputs[op] == 1); assert(fused->op_num_weights[op] == 2); assert(fused->op_num_outputs[op] == 1); Rect<2> kernel_rect = my_wd[0]; int in_dim = kernel_rect.hi[0] - kernel_rect.lo[0] + 1; int out_dim = kernel_rect.hi[1] - kernel_rect.lo[1] + 1; int batch_size = my_id[0].get_volume() / in_dim; assert(my_od[0].get_volume() == out_dim * batch_size); assert(my_id[0].get_volume() == in_dim * batch_size); assert(my_wd[1].get_volume() == out_dim); LinearMeta* m = (LinearMeta*) metas->meta[op]; Linear::backward_kernel(m, my_ip[0], my_grad_ip[0], my_op[0], my_grad_op[0], my_wp[0], my_grad_wp[0], my_grad_wp[1], in_dim, out_dim, batch_size, stream); break; } case OP_BATCHMATMUL: { assert(fused->op_num_inputs[op] == 2); assert(fused->op_num_weights[op] == 0); assert(fused->op_num_outputs[op] == 1); Domain out_domain = my_od[0]; Domain a_domain = my_id[0]; Domain b_domain = my_id[1]; // check dims int m = b_domain.hi()[0] - b_domain.lo()[0] + 1; assert(m == out_domain.hi()[0] - out_domain.lo()[0] + 1); int n = a_domain.hi()[1] - a_domain.lo()[1] + 1; assert(n == out_domain.hi()[1] - out_domain.lo()[1] + 1); int k = a_domain.hi()[0] - a_domain.lo()[0] + 1; assert(k == b_domain.hi()[1] - b_domain.lo()[1] + 1); assert(a_domain.get_dim() == b_domain.get_dim()); assert(a_domain.get_dim() == out_domain.get_dim()); int batch = 1; for (int i = 2; i < a_domain.get_dim(); i++) { int dim_size = a_domain.hi()[i] - a_domain.lo()[i] + 1; assert(dim_size == b_domain.hi()[i] - b_domain.lo()[i] + 1); assert(dim_size == out_domain.hi()[i] - out_domain.lo()[i] + 1); batch *= dim_size; } BatchMatmulMeta* meta = (BatchMatmulMeta*) metas->meta[op]; BatchMatmul::backward_kernel(meta, my_op[0], my_grad_op[0], my_ip[0], my_grad_ip[0], my_ip[1], my_grad_ip[1], NULL, m, n, k, batch, stream); break; } case OP_EW_ADD: case OP_EW_SUB: case OP_EW_MUL: case OP_EW_DIV: { assert(fused->op_num_inputs[op] == 2); assert(fused->op_num_weights[op] == 0); assert(fused->op_num_outputs[op] == 1); assert(my_id[0] == my_id[1]); assert(my_id[0] == my_od[0]); ElementBinaryMeta* m = (ElementBinaryMeta*) metas->meta[op]; ElementBinary::backward_kernel(m, my_grad_op[0], my_ip[0], my_ip[1], my_grad_ip[0], my_grad_ip[1], stream); break; } case OP_RELU: case OP_SIGMOID: case OP_TANH: case OP_ELU: { assert(fused->op_num_inputs[op] == 1); assert(fused->op_num_weights[op] == 0); assert(fused->op_num_outputs[op] == 1); assert(my_id[0] == my_od[0]); ElementUnaryMeta* m = (ElementUnaryMeta*) metas->meta[op]; ElementUnary::backward_kernel(m, my_ip[0], my_grad_ip[0], my_op[0], my_grad_op[0], my_id[0].get_volume(), stream); break; } case OP_POOL2D: { assert(fused->op_num_inputs[op] == 1); assert(fused->op_num_weights[op] == 0); assert(fused->op_num_outputs[op] == 1); //assert(my_id[0] == my_od[0]); Pool2DMeta* m = (Pool2DMeta*) metas->meta[op]; Pool2D::backward_kernel(m, my_ip[0], my_grad_ip[0], my_op[0], my_grad_op[0], stream); break; } case OP_FLAT: { assert(fused->op_num_inputs[op] == 1); assert(fused->op_num_weights[op] == 0); assert(fused->op_num_outputs[op] == 1); assert(my_grad_id[0].get_volume() == my_grad_od[0].get_volume()); Flat::backward_kernel(my_grad_ip[0], my_grad_op[0], my_grad_id[0].get_volume(), stream); break; } case OP_RESHAPE: { assert(fused->op_num_inputs[op] == 1); assert(fused->op_num_weights[op] == 0); assert(fused->op_num_outputs[op] == 1); assert(my_grad_id[0].get_volume() == my_grad_od[0].get_volume()); Reshape::backward_kernel(my_grad_ip[0], my_grad_op[0], my_grad_id[0].get_volume(), stream); break; } case OP_TRANSPOSE: { assert(fused->op_num_inputs[op] == 1); assert(fused->op_num_weights[op] == 0); assert(fused->op_num_outputs[op] == 1); assert(my_grad_id[0].get_volume() == my_grad_od[0].get_volume()); TransposeMeta* m = (TransposeMeta*) metas->meta[op]; Transpose::backward_kernel(m, my_grad_ip[0], my_grad_op[0], my_grad_id[0], my_grad_od[0], stream); break; } default: assert(false && "Fusion currently does not support type"); } } assert(ioff == 0); assert(woff == 0); assert(ooff == 0); //for (int i = 0; i < fused->numWeights; i++) // print_tensor<float>(weight_grad_ptr[i], weight_grad_domain[i].get_volume(), "[Fused:backward:weight_grad]"); //for (int i = 0; i < fused->numInputs; i++) // print_tensor<float>(input_grad_ptr[i], input_grad_domain[i].get_volume(), "[Fused:backward:input_grad]"); //for (int i = 0; i < fused->numOutputs; i++) // print_tensor<float>(output_grad_ptr[i], output_grad_domain[i].get_volume(), "[Fused:backward:output_grad]"); } void FusedOp::backward(const FFModel& ff) { // Set iter_config iter_config = ff.iter_config; ArgumentMap argmap; Context ctx = ff.config.lg_ctx; Runtime* runtime = ff.config.lg_hlr; Domain domain = runtime->get_index_space_domain(ctx, task_is); switch (domain.get_dim()) { #define DIMFUNC(DIM) \ case DIM: \ { \ Rect<DIM> rect = domain; \ int idx = 0; \ for (PointInRectIterator<DIM> it(rect); it(); it++) { \ OpMeta* mp = meta[idx++]; \ argmap.set_point(*it, TaskArgument(&mp, sizeof(OpMeta*))); \ } \ break; \ } LEGION_FOREACH_N(DIMFUNC) #undef DIMFUNC default: assert(false); } IndexLauncher launcher(FUSEDOP_BWD_TASK_ID, task_is, TaskArgument(this, sizeof(FusedOp)), argmap, Predicate::TRUE_PRED, false/*must*/, 0/*mapper_id*/, FFConfig::get_hash_id(std::string(name))); int idx = 0; for (int i = 0; i < numInputs; i++) { launcher.add_region_requirement( RegionRequirement(input_lps[i], 0/*projection id*/, READ_ONLY, EXCLUSIVE, inputs[i].region)); launcher.add_field(idx++, FID_DATA); } for (int i = 0; i < numWeights; i++) { launcher.add_region_requirement( RegionRequirement(weights[i].part, 0/*projection id*/, READ_ONLY, EXCLUSIVE, weights[i].region)); launcher.add_field(idx++, FID_DATA); } for (int i = 0; i < numOutputs; i++) { launcher.add_region_requirement( RegionRequirement(outputs[i].part, 0/*projection id*/, READ_ONLY, EXCLUSIVE, outputs[i].region)); launcher.add_field(idx++, FID_DATA); } for (int i = 0; i < numInputs; i++) { launcher.add_region_requirement( RegionRequirement(input_grad_lps[i], 0/*projection id*/, READ_WRITE, EXCLUSIVE, inputs[i].region_grad)); launcher.add_field(idx++, FID_DATA); } for (int i = 0; i < numWeights; i++) { launcher.add_region_requirement( RegionRequirement(weights[i].part_grad, 0/*projection id*/, READ_WRITE, EXCLUSIVE, weights[i].region_grad)); launcher.add_field(idx++, FID_DATA); } for (int i = 0; i < numOutputs; i++) { launcher.add_region_requirement( RegionRequirement(outputs[i].part_grad, 0/*projection id*/, READ_WRITE, EXCLUSIVE, outputs[i].region_grad)); launcher.add_field(idx++, FID_DATA); } runtime->execute_index_space(ctx, launcher); } bool FusedOp::measure_operator_cost(Simulator* sim, const ParallelConfig& pc, CostMetrics& cost_metrics) { // The search should happen before fusion assert(false); return false; }
the_stack
#include "trt_engine/trt_network_crt/plugins/grid_sampler_plugin/grid_sampler_plugin.h" #include <cuda_runtime.h> #include <device_launch_parameters.h> #include "trt_engine/trt_network_crt/plugins/common/half_ext.cuh" FWD_TRT_NAMESPACE_BEGIN #define LAUNCH_BOUNDS_0 \ __launch_bounds__(256, 4) // default launch bounds that should give good occupancy // and versatility across all architectures. #define LAUNCH_BOUNDS_1(max_threads_per_block) __launch_bounds__((max_threads_per_block)) #define LAUNCH_BOUNDS_2(max_threads_per_block, min_blocks_per_sm) \ __launch_bounds__((max_threads_per_block), (min_blocks_per_sm)) enum class GridSamplerInterpolation { Bilinear = 0, Nearest }; enum class GridSamplerPadding { Zeros = 0, Border, Reflection }; // Unnormalizes a coordinate from the -1 to +1 scale to its pixel index value, // where we view each pixel as an area between (idx - 0.5) and (idx + 0.5). // if align_corners: -1 and +1 get sent to the centers of the corner pixels // -1 --> 0 // +1 --> (size - 1) // scale_factor = (size - 1) / 2 // if not align_corners: -1 and +1 get sent to the image edges // -1 --> -0.5 // +1 --> (size - 1) + 0.5 == size - 0.5 // scale_factor = size / 2 template <typename scalar_t> static __forceinline__ __device__ scalar_t grid_sampler_unnormalize(scalar_t coord, int size, bool align_corners) { if (align_corners) { // unnormalize coord from [-1, 1] to [0, size - 1] return ((float(coord) + 1.f) / 2) * (size - 1); } else { // unnormalize coord from [-1, 1] to [-0.5, size - 0.5] return ((float(coord) + 1.f) * size - 1) / 2; } } // Clips coordinates to between 0 and clip_limit - 1 template <typename scalar_t> static __forceinline__ __device__ scalar_t clip_coordinates(scalar_t in, int clip_limit) { return min(static_cast<scalar_t>(clip_limit - 1), max(in, static_cast<scalar_t>(0))); } // Reflects coordinates until they fall between low and high (inclusive). // The bounds are passed as twice their value so that half-integer values // can be represented as ints. template <typename scalar_t> static __forceinline__ __device__ scalar_t reflect_coordinates(scalar_t in, int twice_low, int twice_high) { if (twice_low == twice_high) { return static_cast<scalar_t>(0); } scalar_t min = static_cast<scalar_t>(twice_low) / 2; scalar_t span = static_cast<scalar_t>(twice_high - twice_low) / 2; in = fabs(in - min); // `fmod` returns same sign as `in`, which is positive after the `fabs` above. scalar_t extra = fmod(in, span); int flips = static_cast<int>(floor(in / span)); if (flips % 2 == 0) { return extra + min; } else { return span - extra + min; } } // Computes the pixel source index value for a grid coordinate template <typename scalar_t> static __forceinline__ __device__ scalar_t grid_sampler_compute_source_index( scalar_t coord, int size, GridSamplerPadding padding_mode, bool align_corners) { coord = grid_sampler_unnormalize(coord, size, align_corners); if (padding_mode == GridSamplerPadding::Border) { // clip coordinates to image borders coord = clip_coordinates(coord, size); } else if (padding_mode == GridSamplerPadding::Reflection) { // reflect coordinates by image borders if (align_corners) { coord = reflect_coordinates(coord, 0, 2 * (size - 1)); } else { coord = reflect_coordinates(coord, -1, 2 * size - 1); // when align_corners=False, reflection does not auto clip coords coord = clip_coordinates(coord, size); } } return coord; } static __forceinline__ __device__ bool within_bounds_2d(int h, int w, int H, int W) { return h >= 0 && h < H && w >= 0 && w < W; } static __forceinline__ __device__ bool within_bounds_3d(int d, int h, int w, int D, int H, int W) { return d >= 0 && d < D && h >= 0 && h < H && w >= 0 && w < W; } template <typename scalar_t> static __forceinline__ __device__ void safe_add_2d(scalar_t* data, int h, int w, int sH, int sW, int H, int W, scalar_t delta) { if (within_bounds_2d(h, w, H, W)) { atomicAdd(data + h * sH + w * sW, delta); } } template <typename scalar_t> static __forceinline__ __device__ void safe_add_3d(scalar_t* data, int d, int h, int w, int sD, int sH, int sW, int D, int H, int W, scalar_t delta) { if (within_bounds_3d(d, h, w, D, H, W)) { atomicAdd(data + d * sD + h * sH + w * sW, delta); } } template <typename scalar_t> LAUNCH_BOUNDS_1(1024) __global__ void grid_sampler_2d_kernel(const int nthreads, TensorInfo<scalar_t> input, TensorInfo<scalar_t> grid, TensorInfo<scalar_t> output, const GridSamplerInterpolation interpolation_mode, const GridSamplerPadding padding_mode, bool align_corners) { int C = input.Size(1); int inp_H = input.Size(2); int inp_W = input.Size(3); int out_H = grid.Size(1); int out_W = grid.Size(2); int inp_sN = input.Stride(0); int inp_sC = input.Stride(1); int inp_sH = input.Stride(2); int inp_sW = input.Stride(3); int grid_sN = grid.Stride(0); int grid_sH = grid.Stride(1); int grid_sW = grid.Stride(2); int grid_sCoor = grid.Stride(3); int out_sN = output.Stride(0); int out_sC = output.Stride(1); int out_sH = output.Stride(2); int out_sW = output.Stride(3); int64_t thread_idx = blockIdx.x * blockDim.x + threadIdx.x; for (int index = thread_idx; thread_idx < nthreads; thread_idx += blockDim.x * gridDim.x, index = thread_idx) { const int w = index % out_W; const int h = (index / out_W) % out_H; const int n = index / (out_H * out_W); const int grid_offset = n * grid_sN + h * grid_sH + w * grid_sW; // get the corresponding input x, y co-ordinates from grid scalar_t ix = grid.DataPtr()[grid_offset]; scalar_t iy = grid.DataPtr()[grid_offset + grid_sCoor]; ix = grid_sampler_compute_source_index(ix, inp_W, padding_mode, align_corners); iy = grid_sampler_compute_source_index(iy, inp_H, padding_mode, align_corners); if (interpolation_mode == GridSamplerInterpolation::Bilinear) { // get NE, NW, SE, SW pixel values from (x, y) int ix_nw = static_cast<int>(floor(ix)); int iy_nw = static_cast<int>(floor(iy)); int ix_ne = ix_nw + 1; int iy_ne = iy_nw; int ix_sw = ix_nw; int iy_sw = iy_nw + 1; int ix_se = ix_nw + 1; int iy_se = iy_nw + 1; // get surfaces to each neighbor: scalar_t nw = (ix_se - ix) * (iy_se - iy); scalar_t ne = (ix - ix_sw) * (iy_sw - iy); scalar_t sw = (ix_ne - ix) * (iy - iy_ne); scalar_t se = (ix - ix_nw) * (iy - iy_nw); // calculate bilinear weighted pixel value and set output pixel auto inp_ptr_NC = input.DataPtr() + n * inp_sN; auto out_ptr_NCHW = output.DataPtr() + n * out_sN + h * out_sH + w * out_sW; for (int c = 0; c < C; ++c, inp_ptr_NC += inp_sC, out_ptr_NCHW += out_sC) { *out_ptr_NCHW = static_cast<scalar_t>(0); if (within_bounds_2d(iy_nw, ix_nw, inp_H, inp_W)) { *out_ptr_NCHW += inp_ptr_NC[iy_nw * inp_sH + ix_nw * inp_sW] * nw; } if (within_bounds_2d(iy_ne, ix_ne, inp_H, inp_W)) { *out_ptr_NCHW += inp_ptr_NC[iy_ne * inp_sH + ix_ne * inp_sW] * ne; } if (within_bounds_2d(iy_sw, ix_sw, inp_H, inp_W)) { *out_ptr_NCHW += inp_ptr_NC[iy_sw * inp_sH + ix_sw * inp_sW] * sw; } if (within_bounds_2d(iy_se, ix_se, inp_H, inp_W)) { *out_ptr_NCHW += inp_ptr_NC[iy_se * inp_sH + ix_se * inp_sW] * se; } } } else if (interpolation_mode == GridSamplerInterpolation::Nearest) { int ix_nearest = static_cast<int>(round(ix)); int iy_nearest = static_cast<int>(round(iy)); // assign nearest neighor pixel value to output pixel auto inp_ptr_NC = input.DataPtr() + n * inp_sN; auto out_ptr_NCHW = output.DataPtr() + n * out_sN + h * out_sH + w * out_sW; for (int c = 0; c < C; ++c, inp_ptr_NC += inp_sC, out_ptr_NCHW += out_sC) { if (within_bounds_2d(iy_nearest, ix_nearest, inp_H, inp_W)) { *out_ptr_NCHW = inp_ptr_NC[iy_nearest * inp_sH + ix_nearest * inp_sW]; } else { *out_ptr_NCHW = static_cast<scalar_t>(0.0f); } } } } } template <typename scalar_t> LAUNCH_BOUNDS_1(1024) __global__ void grid_sampler_3d_kernel(const int nthreads, TensorInfo<scalar_t> input, TensorInfo<scalar_t> grid, TensorInfo<scalar_t> output, const GridSamplerInterpolation interpolation_mode, const GridSamplerPadding padding_mode, bool align_corners) { int C = input.Size(1); int inp_D = input.Size(2); int inp_H = input.Size(3); int inp_W = input.Size(4); int out_D = grid.Size(1); int out_H = grid.Size(2); int out_W = grid.Size(3); int inp_sN = input.Stride(0); int inp_sC = input.Stride(1); int inp_sD = input.Stride(2); int inp_sH = input.Stride(3); int inp_sW = input.Stride(4); int grid_sN = grid.Stride(0); int grid_sD = grid.Stride(1); int grid_sH = grid.Stride(2); int grid_sW = grid.Stride(3); int grid_sCoor = grid.Stride(4); int out_sN = output.Stride(0); int out_sC = output.Stride(1); int out_sD = output.Stride(2); int out_sH = output.Stride(3); int out_sW = output.Stride(4); int64_t thread_idx = blockIdx.x * blockDim.x + threadIdx.x; for (int index = thread_idx; thread_idx < nthreads; thread_idx += blockDim.x * gridDim.x, index = thread_idx) { const int w = index % out_W; const int h = (index / out_W) % out_H; const int d = (index / (out_H * out_W)) % out_D; const int n = index / (out_D * out_H * out_W); const int grid_offset = n * grid_sN + d * grid_sD + h * grid_sH + w * grid_sW; // get the corresponding input x, y, z co-ordinates from grid scalar_t ix = grid.DataPtr()[grid_offset]; scalar_t iy = grid.DataPtr()[grid_offset + grid_sCoor]; scalar_t iz = grid.DataPtr()[grid_offset + 2 * grid_sCoor]; ix = grid_sampler_compute_source_index(ix, inp_W, padding_mode, align_corners); iy = grid_sampler_compute_source_index(iy, inp_H, padding_mode, align_corners); iz = grid_sampler_compute_source_index(iz, inp_D, padding_mode, align_corners); if (interpolation_mode == GridSamplerInterpolation::Bilinear) { // get corner pixel values from (x, y, z) // for 4d, we used north-east-south-west // for 5d, we add top-bottom int ix_tnw = static_cast<int>(floor(ix)); int iy_tnw = static_cast<int>(floor(iy)); int iz_tnw = static_cast<int>(floor(iz)); int ix_tne = ix_tnw + 1; int iy_tne = iy_tnw; int iz_tne = iz_tnw; int ix_tsw = ix_tnw; int iy_tsw = iy_tnw + 1; int iz_tsw = iz_tnw; int ix_tse = ix_tnw + 1; int iy_tse = iy_tnw + 1; int iz_tse = iz_tnw; int ix_bnw = ix_tnw; int iy_bnw = iy_tnw; int iz_bnw = iz_tnw + 1; int ix_bne = ix_tnw + 1; int iy_bne = iy_tnw; int iz_bne = iz_tnw + 1; int ix_bsw = ix_tnw; int iy_bsw = iy_tnw + 1; int iz_bsw = iz_tnw + 1; int ix_bse = ix_tnw + 1; int iy_bse = iy_tnw + 1; int iz_bse = iz_tnw + 1; // get surfaces to each neighbor: scalar_t tnw = (ix_bse - ix) * (iy_bse - iy) * (iz_bse - iz); scalar_t tne = (ix - ix_bsw) * (iy_bsw - iy) * (iz_bsw - iz); scalar_t tsw = (ix_bne - ix) * (iy - iy_bne) * (iz_bne - iz); scalar_t tse = (ix - ix_bnw) * (iy - iy_bnw) * (iz_bnw - iz); scalar_t bnw = (ix_tse - ix) * (iy_tse - iy) * (iz - iz_tse); scalar_t bne = (ix - ix_tsw) * (iy_tsw - iy) * (iz - iz_tsw); scalar_t bsw = (ix_tne - ix) * (iy - iy_tne) * (iz - iz_tne); scalar_t bse = (ix - ix_tnw) * (iy - iy_tnw) * (iz - iz_tnw); auto inp_ptr_NC = input.DataPtr() + n * inp_sN; auto out_ptr_NCDHW = output.DataPtr() + n * out_sN + d * out_sD + h * out_sH + w * out_sW; for (int c = 0; c < C; ++c, inp_ptr_NC += inp_sC, out_ptr_NCDHW += out_sC) { // (c, iz_tnw, iy_tnw, ix_tnw) * tnw + (c, iz_tne, iy_tne, ix_tne) * // tne // + (c, iz_tsw, iy_tsw, ix_tsw) * tsw + (c, iz_tse, iy_tse, ix_tse) * // tse // + (c, iz_bnw, iy_bnw, ix_bnw) * bnw + (c, iz_bne, iy_bne, ix_bne) * // bne // + (c, iz_bsw, iy_bsw, ix_bsw) * bsw + (c, iz_bse, iy_bse, ix_bse) * // bse *out_ptr_NCDHW = static_cast<scalar_t>(0); if (within_bounds_3d(iz_tnw, iy_tnw, ix_tnw, inp_D, inp_H, inp_W)) { *out_ptr_NCDHW += inp_ptr_NC[iz_tnw * inp_sD + iy_tnw * inp_sH + ix_tnw * inp_sW] * tnw; } if (within_bounds_3d(iz_tne, iy_tne, ix_tne, inp_D, inp_H, inp_W)) { *out_ptr_NCDHW += inp_ptr_NC[iz_tne * inp_sD + iy_tne * inp_sH + ix_tne * inp_sW] * tne; } if (within_bounds_3d(iz_tsw, iy_tsw, ix_tsw, inp_D, inp_H, inp_W)) { *out_ptr_NCDHW += inp_ptr_NC[iz_tsw * inp_sD + iy_tsw * inp_sH + ix_tsw * inp_sW] * tsw; } if (within_bounds_3d(iz_tse, iy_tse, ix_tse, inp_D, inp_H, inp_W)) { *out_ptr_NCDHW += inp_ptr_NC[iz_tse * inp_sD + iy_tse * inp_sH + ix_tse * inp_sW] * tse; } if (within_bounds_3d(iz_bnw, iy_bnw, ix_bnw, inp_D, inp_H, inp_W)) { *out_ptr_NCDHW += inp_ptr_NC[iz_bnw * inp_sD + iy_bnw * inp_sH + ix_bnw * inp_sW] * bnw; } if (within_bounds_3d(iz_bne, iy_bne, ix_bne, inp_D, inp_H, inp_W)) { *out_ptr_NCDHW += inp_ptr_NC[iz_bne * inp_sD + iy_bne * inp_sH + ix_bne * inp_sW] * bne; } if (within_bounds_3d(iz_bsw, iy_bsw, ix_bsw, inp_D, inp_H, inp_W)) { *out_ptr_NCDHW += inp_ptr_NC[iz_bsw * inp_sD + iy_bsw * inp_sH + ix_bsw * inp_sW] * bsw; } if (within_bounds_3d(iz_bse, iy_bse, ix_bse, inp_D, inp_H, inp_W)) { *out_ptr_NCDHW += inp_ptr_NC[iz_bse * inp_sD + iy_bse * inp_sH + ix_bse * inp_sW] * bse; } } } else if (interpolation_mode == GridSamplerInterpolation::Nearest) { int ix_nearest = static_cast<int>(round(ix)); int iy_nearest = static_cast<int>(round(iy)); int iz_nearest = static_cast<int>(round(iz)); // assign nearest neighor pixel value to output pixel auto inp_ptr_NC = input.DataPtr() + n * inp_sN; auto out_ptr_NCDHW = output.DataPtr() + n * out_sN + d * out_sD + h * out_sH + w * out_sW; for (int c = 0; c < C; ++c, inp_ptr_NC += inp_sC, out_ptr_NCDHW += out_sC) { if (within_bounds_3d(iz_nearest, iy_nearest, ix_nearest, inp_D, inp_H, inp_W)) { *out_ptr_NCDHW = inp_ptr_NC[iz_nearest * inp_sD + iy_nearest * inp_sH + ix_nearest * inp_sW]; } else { *out_ptr_NCDHW = static_cast<scalar_t>(0); } } } } } template <typename T> void GridSampler2DCuda(const TensorInfo<T>& input, const TensorInfo<T>& grid, TensorInfo<T>& output, int64_t interpolation_mode, int64_t padding_mode, bool align_corners, cudaStream_t stream) { const int N = grid.Size(0); const int H = grid.Size(1); const int W = grid.Size(2); const int count = N * H * W; if (count > 0) { const int blockDim = 1024; const int gridDim = (count + blockDim - 1) / blockDim; grid_sampler_2d_kernel<T><<<gridDim, blockDim, 0, stream>>>( count, input, grid, output, static_cast<GridSamplerInterpolation>(interpolation_mode), static_cast<GridSamplerPadding>(padding_mode), align_corners); } } template <typename T> void GridSampler3DCuda(const TensorInfo<T>& input, const TensorInfo<T>& grid, TensorInfo<T>& output, int64_t interpolation_mode, int64_t padding_mode, bool align_corners, cudaStream_t stream) { const int N = input.Size(0); const int D = grid.Size(1); const int H = grid.Size(2); const int W = grid.Size(3); const int count = N * D * H * W; if (count > 0) { const int blockDim = 1024; const int gridDim = (count + blockDim - 1) / blockDim; grid_sampler_3d_kernel<T><<<gridDim, blockDim, 0, stream>>>( count, input, grid, output, static_cast<GridSamplerInterpolation>(interpolation_mode), static_cast<GridSamplerPadding>(padding_mode), align_corners); } } template void GridSampler2DCuda<float>(const TensorInfo<float>& input, const TensorInfo<float>& grid, TensorInfo<float>& output, int64_t interpolation_mode, int64_t padding_mode, bool align_corners, cudaStream_t stream); template void GridSampler2DCuda<__half>(const TensorInfo<__half>& input, const TensorInfo<__half>& grid, TensorInfo<__half>& output, int64_t interpolation_mode, int64_t padding_mode, bool align_corners, cudaStream_t stream); template void GridSampler3DCuda<float>(const TensorInfo<float>& input, const TensorInfo<float>& grid, TensorInfo<float>& output, int64_t interpolation_mode, int64_t padding_mode, bool align_corners, cudaStream_t stream); template void GridSampler3DCuda<__half>(const TensorInfo<__half>& input, const TensorInfo<__half>& grid, TensorInfo<__half>& output, int64_t interpolation_mode, int64_t padding_mode, bool align_corners, cudaStream_t stream); FWD_TRT_NAMESPACE_END
the_stack
#include "datamex.hpp" #if ENABLE_GPU #include "datacu.hpp" #endif #ifndef NDEBUG #include<iostream> #endif using namespace vl ; /* ---------------------------------------------------------------- */ /* MexContext */ /* ---------------------------------------------------------------- */ vl::MexContext::MexContext() : Context() #if ENABLE_GPU , gpuIsInitialized(false) , canary(NULL) #endif { } vl::MexContext::~MexContext() { #if ENABLE_GPU // so that ~Context does not crash if MATLAB reset the GPU in the mean time validateGpu() ; #endif } /* ---------------------------------------------------------------- */ /* GPU management */ /* ---------------------------------------------------------------- */ #if ENABLE_GPU // Do noting if the GPU is not initialized, otherwise invalidate it // if needed vl::Error MexContext::validateGpu() { if (!gpuIsInitialized) { return vl::vlSuccess ; } gpuIsInitialized = mxGPUIsValidGPUData(canary) ; if (!gpuIsInitialized) { #ifndef NDEBUG std::cout<<"MexContext:: GPU reset detected; invalidating the GPU state"<<std::endl ; #endif mxDestroyArray(canary) ; canary = NULL ; Context::invalidateGpu() ; } return vl::vlSuccess ; } // Initialize GPU; also make sure that it was not reset by MATLAB vl::Error vl::MexContext::initGpu() { validateGpu() ; if (!gpuIsInitialized) { mwSize dims = 1 ; mxInitGPU() ; // todo: can mxGPUCreateGPUArray return NULL ? mxGPUArray * gpuArray = mxGPUCreateGPUArray(1,&dims,mxINT8_CLASS,mxREAL,MX_GPU_DO_NOT_INITIALIZE) ; canary = mxGPUCreateMxArrayOnGPU(gpuArray) ; mexMakeArrayPersistent(canary) ; mxGPUDestroyGPUArray(gpuArray) ; gpuIsInitialized = true ; } return vl::vlSuccess ; } #endif /* ---------------------------------------------------------------- */ /* MexTensor */ /* ---------------------------------------------------------------- */ /* The MexTensor class helps handling MATLAB CPU and GPU arrays. The design is somewhat ackward to match MATLAB assumpitons. The class can either: - wrap an existing mxArray (or mxArray + mxGPUArray) - or create a new mxArray (or mxArray + mxGPUArray) In the last case, the array is released when the destructor is called. However, this would normally interfere with MATLAB automatic garbage collection upon raising an exception (which can happen using mexErrMsgTxt() or, implicitly, when an array creation function cannot complete, for example due to a memory error). Therefore the constructors make the allocated memory persistent. C++ guarantees that the arrays are freeed upon error in the destructors. Note that, upon cerating an array, errors such as running out of CPU/GPU memory can occurr. In this case, MATLAB throws an error and quits the MEX file (either implicitly or because we call mexErrMsgTxt()). Hence constructors always complete with a well defined object. */ /* ---------------------------------------------------------------- */ /* Constructing, clearing, destroying */ /* ---------------------------------------------------------------- */ vl::MexTensor::MexTensor(MexContext & context) : context(context), Tensor(), array(NULL), isArrayOwner(false) #if ENABLE_GPU , gpuArray(NULL) #endif { } mxArray * vl::MexTensor::relinquish() { isArrayOwner = false ; return (mxArray*) array ; } void vl::MexTensor::clear() { #if ENABLE_GPU if (gpuArray) { mxGPUDestroyGPUArray(gpuArray) ; gpuArray = NULL ; } #endif if (isArrayOwner) { if (array) { mxDestroyArray((mxArray*)array) ; array = NULL ; } isArrayOwner = false ; } memory = NULL ; memorySize = 0 ; memoryType = vl::CPU ; width = 0 ; height = 0 ; depth = 0 ; size = 0 ; } vl::MexTensor::~MexTensor() { clear() ; } /* ---------------------------------------------------------------- */ /* init without filling */ /* ---------------------------------------------------------------- */ vl::Error vl::MexTensor::init(Device dev, TensorGeometry const & geom) { mwSize dimensions [4] = {(mwSize)geom.getHeight(), (mwSize)geom.getWidth(), (mwSize)geom.getDepth(), (mwSize)geom.getSize()} ; mwSize newMemorySize = geom.getNumElements() * sizeof(float) ; float * newMemory = NULL ; mxArray * newArray = NULL ; #if ENABLE_GPU mxGPUArray* newGpuArray = NULL ; #endif if (dev == vl::CPU) { mwSize dimensions_ [4] = {0} ; newMemory = (float*)mxMalloc(newMemorySize) ; newArray = mxCreateNumericArray(4, dimensions_, mxSINGLE_CLASS, mxREAL) ; mxSetData(newArray, newMemory) ; mxSetDimensions(newArray, dimensions, 4) ; } #ifdef ENABLE_GPU else { newGpuArray = mxGPUCreateGPUArray(4, dimensions, mxSINGLE_CLASS, mxREAL, MX_GPU_DO_NOT_INITIALIZE) ; newArray = mxGPUCreateMxArrayOnGPU(newGpuArray) ; newMemory = (float*) mxGPUGetData(newGpuArray) ; } #else else { abort() ; } #endif //mexMakeArrayPersistent(newArray) ; // avoid double free with MATALB garbage collector upon error TensorGeometry::operator=(geom) ; memoryType = dev ; memory = newMemory ; memorySize = newMemorySize ; array = newArray ; isArrayOwner = true ; #if ENABLE_GPU gpuArray = newGpuArray ; #endif return vl::vlSuccess ; } /* ---------------------------------------------------------------- */ /* init filling with zeros */ /* ---------------------------------------------------------------- */ vl::Error vl::MexTensor::initWithZeros(vl::Device dev, TensorGeometry const & geom) { clear() ; mwSize dimensions [4] = {(mwSize)geom.getHeight(), (mwSize)geom.getWidth(), (mwSize)geom.getDepth(), (mwSize)geom.getSize()} ; mwSize newMemorySize = geom.getNumElements() * sizeof(float) ; float * newMemory = NULL ; mxArray * newArray = NULL ; #if ENABLE_GPU mxGPUArray* newGpuArray = NULL ; #endif if (dev == vl::CPU) { newArray = mxCreateNumericArray(4, dimensions, mxSINGLE_CLASS, mxREAL) ; newMemory = (float*) mxGetData(newArray) ; } #ifdef ENABLE_GPU else { context.initGpu() ; newGpuArray = mxGPUCreateGPUArray(4, dimensions, mxSINGLE_CLASS, mxREAL, MX_GPU_INITIALIZE_VALUES) ; newArray = mxGPUCreateMxArrayOnGPU(newGpuArray) ; newMemory = (float*) mxGPUGetData((mxGPUArray*)newGpuArray) ; } #else else { abort() ; } #endif //mexMakeArrayPersistent(newArray) ; // avoid double free with MATALB garbage collector upon error TensorGeometry::operator=(geom) ; memoryType = dev ; memory = newMemory ; memorySize = newMemorySize ; array = newArray ; isArrayOwner = true ; #if ENABLE_GPU gpuArray = newGpuArray ; #endif return vl::vlSuccess ; } /* ---------------------------------------------------------------- */ /* init with any fill */ /* ---------------------------------------------------------------- */ #if ENABLE_GPU template<typename type> __global__ void fill (type * data, type value, int size) { int index = threadIdx.x + blockIdx.x * blockDim.x ; if (index < size) data[index] = value ; } #endif vl::Error vl::MexTensor::init(vl::Device dev, vl::TensorGeometry const & geom, float value) { if (value == 0) { initWithZeros(dev, geom) ; } else { init(dev, geom) ; if (memoryType == vl::CPU) { int const n = getNumElements() ; for (int i = 0 ; i < n ; ++i) { memory[i] = value ; } } #ifdef ENABLE_GPU else { fill<float> <<<divideUpwards(getNumElements(), VL_CUDA_NUM_THREADS), VL_CUDA_NUM_THREADS>>> ((float*)getMemory(), value, getNumElements()) ; cudaError_t error = cudaGetLastError() ; if (error != cudaSuccess) { clear() ; mexErrMsgTxt((std::string("MexTensor: fill: CUDA error: ") + cudaGetErrorString(error)).c_str()) ; } } #endif } return vl::vlSuccess ; } /* ---------------------------------------------------------------- */ /* init with array */ /* ---------------------------------------------------------------- */ vl::Error vl::MexTensor::init(mxArray const * array_) { clear() ; if (array_ == NULL) { return vl::vlSuccess ; } // empty vl::Device dev ; float * newMemory = NULL ; mxArray * newArray = (mxArray*)array_ ; #if ENABLE_GPU mxGPUArray* newGpuArray = NULL ; #endif mwSize const * dimensions ; mwSize numDimensions ; mxClassID classID ; #ifdef ENABLE_GPU context.initGpu() ; if (mxIsGPUArray(array_)) { dev = GPU ; newGpuArray = (mxGPUArray*) mxGPUCreateFromMxArray(newArray) ; newMemory = (float*) mxGPUGetDataReadOnly(newGpuArray) ; classID = mxGPUGetClassID(newGpuArray) ; dimensions = mxGPUGetDimensions(newGpuArray) ; numDimensions = mxGPUGetNumberOfDimensions(newGpuArray) ; } else #endif { if (!mxIsNumeric(newArray)) { mexErrMsgTxt("An input is not a numeric array (or GPU support not compiled).") ; } dev = CPU ; newMemory = (float*) mxGetData(newArray) ; classID = mxGetClassID(newArray) ; dimensions = mxGetDimensions(newArray) ; numDimensions = mxGetNumberOfDimensions(newArray) ; } height = (numDimensions >= 1) ? dimensions[0] : 1 ; width = (numDimensions >= 2) ? dimensions[1] : 1 ; depth = (numDimensions >= 3) ? dimensions[2] : 1 ; size = (numDimensions >= 4) ? dimensions[3] : 1 ; memoryType = dev ; memory = newMemory ; memorySize = getNumElements() * sizeof(float) ; array = newArray ; isArrayOwner = false ; #if ENABLE_GPU gpuArray = newGpuArray ; #endif if (classID != mxSINGLE_CLASS && ! isEmpty()) { mexErrMsgTxt("An input is not a SINGLE array nor it is empty.") ; } return vl::vlSuccess ; } void vl::print(char const * str, vl::Tensor const & tensor) { size_t size = tensor.getNumElements() * sizeof(float) ; double scaled ; const char * units ; if (size < 1024) { scaled = size ; units = "B" ; } else if (size < 1024*1024) { scaled = size / 1024.0 ; units = "KB" ; } else if (size < 1024*1024*1024) { scaled = size / (1024.0 * 1024.0) ; units = "MB" ; } else { scaled = size / (1024.0 * 1024.0 * 1024.0) ; units = "GB" ; } const char * dev = "" ; switch (tensor.getMemoryType()) { case vl::CPU : dev = "CPU" ; break ; case vl::GPU : dev = "GPU" ; break ; } mexPrintf("%s[%d x %d x %d x %d | %.1f%s %s]\n", str, tensor.getHeight(), tensor.getWidth(), tensor.getDepth(), tensor.getSize(), scaled, units, dev); }
the_stack
#pragma once #include <cuda.h> #include <gunrock/util/cuda_properties.cuh> #include <gunrock/util/vector_types.cuh> namespace gunrock { namespace util { namespace io { /** * Enumeration of data movement cache modifiers. */ namespace st { enum CacheModifier { NONE, // default (currently wb) cg, // cache global wb, // write back all levels cs, // cache streaming LIMIT }; } // namespace st /** * Basic utility for performing modified stores through cache. */ template <st::CacheModifier CACHE_MODIFIER> struct ModifiedStore { /* * Store operation we will provide specializations for */ template <typename T> __device__ __forceinline__ static void St(T val, T* ptr); /* * Vec-4 stores for 64-bit types are implemented as two vec-2 stores */ __device__ __forceinline__ static void St(double4 val, double4* ptr) { ModifiedStore<CACHE_MODIFIER>::St(*reinterpret_cast<double2*>(&val.x), reinterpret_cast<double2*>(ptr)); ModifiedStore<CACHE_MODIFIER>::St(*reinterpret_cast<double2*>(&val.z), reinterpret_cast<double2*>(ptr) + 1); } __device__ __forceinline__ static void St(ulonglong4 val, ulonglong4* ptr) { ModifiedStore<CACHE_MODIFIER>::St(*reinterpret_cast<ulonglong2*>(&val.x), reinterpret_cast<ulonglong2*>(ptr)); ModifiedStore<CACHE_MODIFIER>::St(*reinterpret_cast<ulonglong2*>(&val.z), reinterpret_cast<ulonglong2*>(ptr) + 1); } __device__ __forceinline__ static void St(longlong4 val, longlong4* ptr) { ModifiedStore<CACHE_MODIFIER>::St(*reinterpret_cast<longlong2*>(&val.x), reinterpret_cast<longlong2*>(ptr)); ModifiedStore<CACHE_MODIFIER>::St(*reinterpret_cast<longlong2*>(&val.z), reinterpret_cast<longlong2*>(ptr) + 1); } }; #if __CUDA_ARCH__ >= 200 /** * Specialization for NONE modifier */ template <> template <typename T> __device__ __forceinline__ void ModifiedStore<st::NONE>::St(T val, T* ptr) { *ptr = val; } /** * Singleton store op */ #define GR_STORE(base_type, ptx_type, reg_mod, cast_type, modifier) \ template <> \ template <> \ void ModifiedStore<st::modifier>::St(base_type val, base_type* ptr) { \ asm volatile("st.global." #modifier "." #ptx_type " [%0], %1;" \ : \ : _GR_ASM_PTR_(ptr), \ #reg_mod(reinterpret_cast<cast_type&>(val))); \ } /** * Vector store ops */ #define GR_STORE_VEC1(component_type, base_type, ptx_type, reg_mod, cast_type, \ modifier) \ template <> \ template <> \ void ModifiedStore<st::modifier>::St(base_type val, base_type* ptr) { \ component_type c = val.x; \ asm volatile("st.global." #modifier "." #ptx_type " [%0], %1;" \ : \ : _GR_ASM_PTR_(ptr), \ #reg_mod(reinterpret_cast<cast_type&>(c))); \ } #define GR_STORE_VEC2(component_type, base_type, ptx_type, reg_mod, cast_type, \ modifier) \ template <> \ template <> \ void ModifiedStore<st::modifier>::St(base_type val, base_type* ptr) { \ component_type cx = val.x; \ component_type cy = val.y; \ asm volatile("st.global." #modifier ".v2." #ptx_type " [%0], {%1, %2};" \ : \ : _GR_ASM_PTR_(ptr), \ #reg_mod(reinterpret_cast<cast_type&>(cx)), \ #reg_mod(reinterpret_cast<cast_type&>(cy))); \ } #define GR_STORE_VEC4(component_type, base_type, ptx_type, reg_mod, cast_type, \ modifier) \ template <> \ template <> \ void ModifiedStore<st::modifier>::St(base_type val, base_type* ptr) { \ component_type cx = val.x; \ component_type cy = val.y; \ component_type cz = val.z; \ component_type cw = val.w; \ asm volatile( \ "st.global." #modifier ".v4." #ptx_type " [%0], {%1, %2, %3, %4};" \ : \ : _GR_ASM_PTR_(ptr), #reg_mod(reinterpret_cast<cast_type&>(cx)), \ #reg_mod(reinterpret_cast<cast_type&>(cy)), \ #reg_mod(reinterpret_cast<cast_type&>(cz)), \ #reg_mod(reinterpret_cast<cast_type&>(cw))); \ } /** * Defines specialized store ops for only the base type */ #define GR_STORE_BASE(base_type, ptx_type, reg_mod, cast_type) \ GR_STORE(base_type, ptx_type, reg_mod, cast_type, cg) \ GR_STORE(base_type, ptx_type, reg_mod, cast_type, wb) \ GR_STORE(base_type, ptx_type, reg_mod, cast_type, cs) /** * Defines specialized store ops for the base type and for its derivative vec1 * and vec2 types */ #define GR_STORE_BASE_ONE_TWO(base_type, dest_type, short_type, ptx_type, \ reg_mod, cast_type) \ GR_STORE_BASE(base_type, ptx_type, reg_mod, cast_type) \ \ GR_STORE_VEC1(base_type, short_type##1, ptx_type, reg_mod, cast_type, cg) \ GR_STORE_VEC1(base_type, short_type##1, ptx_type, reg_mod, cast_type, wb) \ GR_STORE_VEC1(base_type, short_type##1, ptx_type, reg_mod, cast_type, cs) \ \ GR_STORE_VEC2(base_type, short_type##2, ptx_type, reg_mod, cast_type, cg) \ GR_STORE_VEC2(base_type, short_type##2, ptx_type, reg_mod, cast_type, wb) \ GR_STORE_VEC2(base_type, short_type##2, ptx_type, reg_mod, cast_type, cs) /** * Defines specialized store ops for the base type and for its derivative vec1, * vec2, and vec4 types */ #define GR_STORE_BASE_ONE_TWO_FOUR(base_type, dest_type, short_type, ptx_type, \ reg_mod, cast_type) \ GR_STORE_BASE_ONE_TWO(base_type, dest_type, short_type, ptx_type, reg_mod, \ cast_type) \ \ GR_STORE_VEC4(base_type, short_type##4, ptx_type, reg_mod, cast_type, cg) \ GR_STORE_VEC4(base_type, short_type##4, ptx_type, reg_mod, cast_type, wb) \ GR_STORE_VEC4(base_type, short_type##4, ptx_type, reg_mod, cast_type, cs) #if CUDA_VERSION >= 4000 #define GR_REG8 h #define GR_REG16 h #define GR_CAST8 short #else #define GR_REG8 r #define GR_REG16 r #define GR_CAST8 char #endif /** * Define cache-modified stores for all 4-byte (and smaller) structures */ GR_STORE_BASE_ONE_TWO_FOUR(char, char, char, s8, GR_REG8, GR_CAST8) GR_STORE_BASE_ONE_TWO_FOUR(short, short, short, s16, GR_REG16, short) GR_STORE_BASE_ONE_TWO_FOUR(int, int, int, s32, r, int) GR_STORE_BASE_ONE_TWO_FOUR(unsigned char, unsigned char, uchar, u8, GR_REG8, unsigned GR_CAST8) GR_STORE_BASE_ONE_TWO_FOUR(unsigned short, unsigned short, ushort, u16, GR_REG16, unsigned short) GR_STORE_BASE_ONE_TWO_FOUR(unsigned int, unsigned int, uint, u32, r, unsigned int) GR_STORE_BASE_ONE_TWO_FOUR(float, float, float, f32, f, float) #if !defined(__LP64__) || (__LP64__ == 0) // longs are 64-bit on non-Windows 64-bit compilers GR_STORE_BASE_ONE_TWO_FOUR(long, long, long, s32, r, long) GR_STORE_BASE_ONE_TWO_FOUR(unsigned long, unsigned long, ulong, u32, r, unsigned long) #endif GR_STORE_BASE(signed char, s8, r, unsigned int) // Only need to define base: char2,char4, etc // already defined from char GR_STORE_BASE(bool, s8, r, unsigned int) /** * Define cache-modified stores for all 8-byte structures */ GR_STORE_BASE_ONE_TWO(unsigned long long, unsigned long long, ulonglong, u64, l, unsigned long long) GR_STORE_BASE_ONE_TWO(long long, long long, longlong, s64, l, long long) GR_STORE_BASE_ONE_TWO( double, double, double, s64, l, long long) // Cast to 64-bit long long a workaround for the fact that // the 3.x assembler has no register constraint for doubles #if defined(__LP64__) // longs are 64-bit on non-Windows 64-bit compilers GR_STORE_BASE_ONE_TWO(long, long, long, s64, l, long) GR_STORE_BASE_ONE_TWO(unsigned long, unsigned long, ulong, u64, l, unsigned long) #endif /** * Undefine macros */ #undef GR_STORE_VEC1 #undef GR_STORE_VEC2 #undef GR_STORE_VEC4 #undef GR_STORE_BASE #undef GR_STORE_BASE_ONE_TWO #undef GR_STORE_BASE_ONE_TWO_FOUR #undef GR_CAST8 #undef GR_REG8 #undef GR_REG16 #else //__CUDA_ARCH__ template <st::CacheModifier WRITE_MODIFIER> template <typename T> __device__ __forceinline__ void ModifiedStore<WRITE_MODIFIER>::St(T val, T* ptr) { *ptr = val; } #endif //__CUDA_ARCH__ } // namespace io } // namespace util } // namespace gunrock
the_stack
* Example showing the use of CUFFT for fast 1D-convolution using FFT. * This sample is the same as simpleCUFFT, except that it uses a callback * function to perform the pointwise multiply and scale, on input to the * inverse transform. * */ // includes, system #include <stdlib.h> #include <stdio.h> #include <string.h> #include <math.h> // includes, project #include <cuda_runtime.h> #include <cufft.h> #include <cufftXt.h> #include <helper_functions.h> #include <helper_cuda.h> // Complex data type typedef float2 Complex; static __device__ __host__ inline Complex ComplexAdd(Complex, Complex); static __device__ __host__ inline Complex ComplexScale(Complex, float); static __device__ __host__ inline Complex ComplexMul(Complex, Complex); // This is the callback routine prototype static __device__ cufftComplex ComplexPointwiseMulAndScale(void *a, size_t index, void *cb_info, void *sharedmem); typedef struct _cb_params { Complex *filter; float scale; } cb_params; // This is the callback routine. It does complex pointwise multiplication with // scaling. static __device__ cufftComplex ComplexPointwiseMulAndScale(void *a, size_t index, void *cb_info, void *sharedmem) { cb_params *my_params = (cb_params *)cb_info; return (cufftComplex)ComplexScale( ComplexMul(((Complex *)a)[index], (my_params->filter)[index]), my_params->scale); } // Define the device pointer to the callback routine. The host code will fetch // this and pass it to CUFFT __device__ cufftCallbackLoadC myOwnCallbackPtr = ComplexPointwiseMulAndScale; // Filtering functions void Convolve(const Complex *, int, const Complex *, int, Complex *); // Padding functions int PadData(const Complex *, Complex **, int, const Complex *, Complex **, int); //////////////////////////////////////////////////////////////////////////////// // declaration, forward int runTest(int argc, char **argv); // The filter size is assumed to be a number smaller than the signal size #define SIGNAL_SIZE 50 #define FILTER_KERNEL_SIZE 11 //////////////////////////////////////////////////////////////////////////////// // Program main //////////////////////////////////////////////////////////////////////////////// int main(int argc, char **argv) { struct cudaDeviceProp properties; int device; checkCudaErrors(cudaGetDevice(&device)); checkCudaErrors(cudaGetDeviceProperties(&properties, device)); if (!(properties.major >= 2)) { printf("simpleCUFFT_callback requires CUDA architecture SM2.0 or higher\n"); return EXIT_WAIVED; } return runTest(argc, argv); } //////////////////////////////////////////////////////////////////////////////// //! Run a simple test for CUFFT callbacks //////////////////////////////////////////////////////////////////////////////// int runTest(int argc, char **argv) { printf("[simpleCUFFT_callback] is starting...\n"); findCudaDevice(argc, (const char **)argv); // Allocate host memory for the signal Complex *h_signal = (Complex *)malloc(sizeof(Complex) * SIGNAL_SIZE); // Initialize the memory for the signal for (unsigned int i = 0; i < SIGNAL_SIZE; ++i) { h_signal[i].x = rand() / (float)RAND_MAX; h_signal[i].y = 0; } // Allocate host memory for the filter Complex *h_filter_kernel = (Complex *)malloc(sizeof(Complex) * FILTER_KERNEL_SIZE); // Initialize the memory for the filter for (unsigned int i = 0; i < FILTER_KERNEL_SIZE; ++i) { h_filter_kernel[i].x = rand() / (float)RAND_MAX; h_filter_kernel[i].y = 0; } // Pad signal and filter kernel Complex *h_padded_signal; Complex *h_padded_filter_kernel; int new_size = PadData(h_signal, &h_padded_signal, SIGNAL_SIZE, h_filter_kernel, &h_padded_filter_kernel, FILTER_KERNEL_SIZE); int mem_size = sizeof(Complex) * new_size; // Allocate device memory for signal Complex *d_signal; checkCudaErrors(cudaMalloc((void **)&d_signal, mem_size)); // Copy host memory to device checkCudaErrors( cudaMemcpy(d_signal, h_padded_signal, mem_size, cudaMemcpyHostToDevice)); // Allocate device memory for filter kernel Complex *d_filter_kernel; checkCudaErrors(cudaMalloc((void **)&d_filter_kernel, mem_size)); // Copy host memory to device checkCudaErrors(cudaMemcpy(d_filter_kernel, h_padded_filter_kernel, mem_size, cudaMemcpyHostToDevice)); // Create one CUFFT plan for the forward transforms, and one for the reverse // transform with load callback. cufftHandle plan, cb_plan; size_t work_size; checkCudaErrors(cufftCreate(&plan)); checkCudaErrors(cufftCreate(&cb_plan)); checkCudaErrors(cufftMakePlan1d(plan, new_size, CUFFT_C2C, 1, &work_size)); checkCudaErrors(cufftMakePlan1d(cb_plan, new_size, CUFFT_C2C, 1, &work_size)); // Define a structure used to pass in the device address of the filter kernel, // and the scale factor cb_params h_params; h_params.filter = d_filter_kernel; h_params.scale = 1.0f / new_size; // Allocate device memory for parameters cb_params *d_params; checkCudaErrors(cudaMalloc((void **)&d_params, sizeof(cb_params))); // Copy host memory to device checkCudaErrors(cudaMemcpy(d_params, &h_params, sizeof(cb_params), cudaMemcpyHostToDevice)); // The host needs to get a copy of the device pointer to the callback cufftCallbackLoadC hostCopyOfCallbackPtr; checkCudaErrors(cudaMemcpyFromSymbol(&hostCopyOfCallbackPtr, myOwnCallbackPtr, sizeof(hostCopyOfCallbackPtr))); // Now associate the load callback with the plan. cufftResult status = cufftXtSetCallback(cb_plan, (void **)&hostCopyOfCallbackPtr, CUFFT_CB_LD_COMPLEX, (void **)&d_params); if (status == CUFFT_LICENSE_ERROR) { printf("This sample requires a valid license file.\n"); printf( "The file was either not found, out of date, or otherwise invalid.\n"); return EXIT_WAIVED; } checkCudaErrors(cufftXtSetCallback(cb_plan, (void **)&hostCopyOfCallbackPtr, CUFFT_CB_LD_COMPLEX, (void **)&d_params)); // Transform signal and kernel printf("Transforming signal cufftExecC2C\n"); checkCudaErrors(cufftExecC2C(plan, (cufftComplex *)d_signal, (cufftComplex *)d_signal, CUFFT_FORWARD)); checkCudaErrors(cufftExecC2C(plan, (cufftComplex *)d_filter_kernel, (cufftComplex *)d_filter_kernel, CUFFT_FORWARD)); // Transform signal back, using the callback to do the pointwise multiply on // the way in. printf("Transforming signal back cufftExecC2C\n"); checkCudaErrors(cufftExecC2C(cb_plan, (cufftComplex *)d_signal, (cufftComplex *)d_signal, CUFFT_INVERSE)); // Copy device memory to host Complex *h_convolved_signal = h_padded_signal; checkCudaErrors(cudaMemcpy(h_convolved_signal, d_signal, mem_size, cudaMemcpyDeviceToHost)); // Allocate host memory for the convolution result Complex *h_convolved_signal_ref = (Complex *)malloc(sizeof(Complex) * SIGNAL_SIZE); // Convolve on the host Convolve(h_signal, SIGNAL_SIZE, h_filter_kernel, FILTER_KERNEL_SIZE, h_convolved_signal_ref); // check result bool bTestResult = sdkCompareL2fe((float *)h_convolved_signal_ref, (float *)h_convolved_signal, 2 * SIGNAL_SIZE, 1e-5f); // Destroy CUFFT context checkCudaErrors(cufftDestroy(plan)); checkCudaErrors(cufftDestroy(cb_plan)); // cleanup memory free(h_signal); free(h_filter_kernel); free(h_padded_signal); free(h_padded_filter_kernel); free(h_convolved_signal_ref); checkCudaErrors(cudaFree(d_signal)); checkCudaErrors(cudaFree(d_filter_kernel)); checkCudaErrors(cudaFree(d_params)); return bTestResult ? EXIT_SUCCESS : EXIT_FAILURE; } // Pad data int PadData(const Complex *signal, Complex **padded_signal, int signal_size, const Complex *filter_kernel, Complex **padded_filter_kernel, int filter_kernel_size) { int minRadius = filter_kernel_size / 2; int maxRadius = filter_kernel_size - minRadius; int new_size = signal_size + maxRadius; // Pad signal Complex *new_data = (Complex *)malloc(sizeof(Complex) * new_size); memcpy(new_data + 0, signal, signal_size * sizeof(Complex)); memset(new_data + signal_size, 0, (new_size - signal_size) * sizeof(Complex)); *padded_signal = new_data; // Pad filter new_data = (Complex *)malloc(sizeof(Complex) * new_size); memcpy(new_data + 0, filter_kernel + minRadius, maxRadius * sizeof(Complex)); memset(new_data + maxRadius, 0, (new_size - filter_kernel_size) * sizeof(Complex)); memcpy(new_data + new_size - minRadius, filter_kernel, minRadius * sizeof(Complex)); *padded_filter_kernel = new_data; return new_size; } //////////////////////////////////////////////////////////////////////////////// // Filtering operations //////////////////////////////////////////////////////////////////////////////// // Computes convolution on the host void Convolve(const Complex *signal, int signal_size, const Complex *filter_kernel, int filter_kernel_size, Complex *filtered_signal) { int minRadius = filter_kernel_size / 2; int maxRadius = filter_kernel_size - minRadius; // Loop over output element indices for (int i = 0; i < signal_size; ++i) { filtered_signal[i].x = filtered_signal[i].y = 0; // Loop over convolution indices for (int j = -maxRadius + 1; j <= minRadius; ++j) { int k = i + j; if (k >= 0 && k < signal_size) { filtered_signal[i] = ComplexAdd(filtered_signal[i], ComplexMul(signal[k], filter_kernel[minRadius - j])); } } } } //////////////////////////////////////////////////////////////////////////////// // Complex operations //////////////////////////////////////////////////////////////////////////////// // Complex addition static __device__ __host__ inline Complex ComplexAdd(Complex a, Complex b) { Complex c; c.x = a.x + b.x; c.y = a.y + b.y; return c; } // Complex scale static __device__ __host__ inline Complex ComplexScale(Complex a, float s) { Complex c; c.x = s * a.x; c.y = s * a.y; return c; } // Complex multiplication static __device__ __host__ inline Complex ComplexMul(Complex a, Complex b) { Complex c; c.x = a.x * b.x - a.y * b.y; c.y = a.x * b.y + a.y * b.x; return c; }
the_stack
#include <iostream> namespace chrono { // dot product of each column of a matrix with itself CUDA_HOST_DEVICE inline real3 DotMM(const real* M) { real3 result; result.x = M[0] * M[0] + M[1] * M[1] + M[2] * M[2]; result.y = M[4] * M[4] + M[5] * M[5] + M[6] * M[6]; result.z = M[8] * M[8] + M[9] * M[9] + M[10] * M[10]; return result; } // dot product of each column of a matrix with another matrix CUDA_HOST_DEVICE inline real3 DotMM(const real* M, const real* N) { real3 result; result.x = M[0] * N[0] + M[1] * N[1] + M[2] * N[2]; result.y = M[4] * N[4] + M[5] * N[5] + M[6] * N[6]; result.z = M[8] * N[8] + M[9] * N[9] + M[10] * N[10]; return result; } CUDA_HOST_DEVICE inline Mat33 MulMM(const real* M, const real* N) { Mat33 r; r[0] = M[0] * N[0] + M[4] * N[1] + M[8] * N[2]; r[1] = M[1] * N[0] + M[5] * N[1] + M[9] * N[2]; r[2] = M[2] * N[0] + M[6] * N[1] + M[10] * N[2]; r[4] = M[0] * N[4] + M[4] * N[5] + M[8] * N[6]; r[5] = M[1] * N[4] + M[5] * N[5] + M[9] * N[6]; r[6] = M[2] * N[4] + M[6] * N[5] + M[10] * N[6]; r[8] = M[0] * N[8] + M[4] * N[9] + M[8] * N[10]; r[9] = M[1] * N[8] + M[5] * N[9] + M[9] * N[10]; r[10] = M[2] * N[8] + M[6] * N[9] + M[10] * N[10]; return r; } CUDA_HOST_DEVICE inline Mat33 MulM_TM(const real* M, const real* N) { // c1 c2 c3 // c1 c2 c3 // 0 1 2 // 0 4 8 // 4 5 6 // 1 5 9 // 8 9 10 // 2 6 10 Mat33 r; r[0] = M[0] * N[0] + M[1] * N[1] + M[2] * N[2]; r[1] = M[4] * N[0] + M[5] * N[1] + M[6] * N[2]; r[2] = M[8] * N[0] + M[9] * N[1] + M[10] * N[2]; r[4] = M[0] * N[4] + M[1] * N[5] + M[2] * N[6]; r[5] = M[4] * N[4] + M[5] * N[5] + M[6] * N[6]; r[6] = M[8] * N[4] + M[9] * N[5] + M[10] * N[6]; r[8] = M[0] * N[8] + M[1] * N[9] + M[2] * N[10]; r[9] = M[4] * N[8] + M[5] * N[9] + M[6] * N[10]; r[10] = M[8] * N[8] + M[9] * N[9] + M[10] * N[10]; return r; } CUDA_HOST_DEVICE inline real3 MulMV(const real* M, const real* N) { real3 r; r[0] = M[0] * N[0] + M[4] * N[1] + M[8] * N[2]; r[1] = M[1] * N[0] + M[5] * N[1] + M[9] * N[2]; r[2] = M[2] * N[0] + M[6] * N[1] + M[10] * N[2]; return r; } CUDA_HOST_DEVICE inline Mat33 OuterProductVV(const real* A, const real* B) { return Mat33(A[0] * B[0], A[1] * B[0], A[2] * B[0], A[0] * B[1], A[1] * B[1], A[2] * B[1], A[0] * B[2], A[1] * B[2], A[2] * B[2]); } CUDA_HOST_DEVICE inline Mat33 ScaleMat(const real* M, const real b) { Mat33 r; r[0] = M[0] * b; r[1] = M[1] * b; r[2] = M[2] * b; r[4] = M[4] * b; r[5] = M[5] * b; r[6] = M[6] * b; r[8] = M[8] * b; r[9] = M[9] * b; r[10] = M[10] * b; return r; } CUDA_HOST_DEVICE inline SymMat33 NormalEquations(const real* A) { SymMat33 T; T.x11 = A[0] * A[0] + A[1] * A[1] + A[2] * A[2]; T.x21 = A[0] * A[4] + A[1] * A[5] + A[2] * A[6]; T.x31 = A[0] * A[8] + A[1] * A[9] + A[2] * A[10]; T.x22 = A[4] * A[4] + A[5] * A[5] + A[6] * A[6]; T.x32 = A[4] * A[8] + A[5] * A[9] + A[6] * A[10]; T.x33 = A[8] * A[8] + A[9] * A[9] + A[10] * A[10]; return T; } CUDA_HOST_DEVICE inline Mat33 MAbs(const real* M) { return Mat33(Abs(M[0]), Abs(M[1]), Abs(M[2]), Abs(M[4]), Abs(M[5]), Abs(M[6]), Abs(M[8]), Abs(M[9]), Abs(M[10])); } //[0,4,8 ] //[1,5,9 ] //[2,6,10] //[3,7,11] CUDA_HOST_DEVICE real3 operator*(const Mat33& M, const real3& v) { return MulMV(M.array, v.array); } CUDA_HOST_DEVICE Mat33 operator*(const Mat33& N, const real scale) { return ScaleMat(N.array, scale); } CUDA_HOST_DEVICE Mat33 operator*(const Mat33& M, const Mat33& N) { return MulMM(M.array, N.array); } CUDA_HOST_DEVICE Mat33 operator+(const Mat33& M, const Mat33& N) { return Mat33(M[0] + N[0], M[1] + N[1], M[2] + N[2], M[4] + N[4], M[5] + N[5], M[6] + N[6], M[8] + N[8], M[9] + N[9], M[10] + N[10]); } CUDA_HOST_DEVICE Mat33 operator-(const Mat33& M, const Mat33& N) { return Mat33(M[0] - N[0], M[1] - N[1], M[2] - N[2], M[4] - N[4], M[5] - N[5], M[6] - N[6], M[8] - N[8], M[9] - N[9], M[10] - N[10]); } CUDA_HOST_DEVICE Mat33 operator-(const Mat33& M) { return Mat33(-M[0], -M[1], -M[2], -M[4], -M[5], -M[6], -M[8], -M[9], -M[10]); } CUDA_HOST_DEVICE Mat33 Abs(const Mat33& m) { return MAbs(m.array); } CUDA_HOST_DEVICE Mat33 SkewSymmetric(const real3& r) { return Mat33(0, r[2], -r[1], -r[2], 0, r[0], r[1], -r[0], 0); } CUDA_HOST_DEVICE Mat33 SkewSymmetricAlt(const real3& r) { return Mat33(0, r[2], r[1], r[2], 0, r[0], r[1], r[0], 0); } CUDA_HOST_DEVICE Mat33 MultTranspose(const Mat33& M, const Mat33& N) { // Not a clean way to write this in AVX, might as well transpose first and then multiply return M * Transpose(N); } CUDA_HOST_DEVICE Mat33 TransposeMult(const Mat33& M, const Mat33& N) { return MulM_TM(M.array, N.array); } CUDA_HOST_DEVICE Mat33 Transpose(const Mat33& a) { return Mat33(a[0], a[4], a[8], a[1], a[5], a[9], a[2], a[6], a[10]); } CUDA_HOST_DEVICE real Trace(const Mat33& m) { return m[0] + m[5] + m[10]; } // Multiply a 3x1 by a 1x3 to get a 3x3 CUDA_HOST_DEVICE Mat33 OuterProduct(const real3& a, const real3& b) { return OuterProductVV(a.array, b.array); } CUDA_HOST_DEVICE real InnerProduct(const Mat33& A, const Mat33& B) { return simd::HorizontalAdd(DotMM(A.array, B.array)); } CUDA_HOST_DEVICE Mat33 Adjoint(const Mat33& A) { Mat33 T; T[0] = A[5] * A[10] - A[9] * A[6]; T[1] = -A[1] * A[10] + A[9] * A[2]; T[2] = A[1] * A[6] - A[5] * A[2]; T[4] = -A[4] * A[10] + A[8] * A[6]; T[5] = A[0] * A[10] - A[8] * A[2]; T[6] = -A[0] * A[6] + A[4] * A[2]; T[8] = A[4] * A[9] - A[8] * A[5]; T[9] = -A[0] * A[9] + A[8] * A[1]; T[10] = A[0] * A[5] - A[4] * A[1]; return T; } CUDA_HOST_DEVICE Mat33 AdjointTranspose(const Mat33& A) { Mat33 T; T[0] = A[5] * A[10] - A[9] * A[6]; T[1] = -A[4] * A[10] + A[8] * A[6]; T[2] = A[4] * A[9] - A[8] * A[5]; T[4] = -A[1] * A[10] + A[9] * A[2]; T[5] = A[0] * A[10] - A[8] * A[2]; T[6] = -A[0] * A[9] + A[8] * A[1]; T[8] = A[1] * A[6] - A[5] * A[2]; T[9] = -A[0] * A[6] + A[4] * A[2]; T[10] = A[0] * A[5] - A[4] * A[1]; return T; } CUDA_HOST_DEVICE real Determinant(const Mat33& m) { return m[0] * (m[5] * m[10] - m[9] * m[6]) - m[4] * (m[1] * m[10] - m[9] * m[2]) + m[8] * (m[1] * m[6] - m[5] * m[2]); } CUDA_HOST_DEVICE Mat33 Inverse(const Mat33& A) { real s = Determinant(A); if (s > 0.0) { return Adjoint(A) * real(1.0 / s); } else { return Mat33(0); } } // Same as inverse but we store it transposed CUDA_HOST_DEVICE Mat33 InverseTranspose(const Mat33& A) { real s = Determinant(A); if (s > 0.0) { return AdjointTranspose(A) * real(1.0 / s); } else { return Mat33(0); } } CUDA_HOST_DEVICE real Norm(const Mat33& A) { return Sqrt(Trace(A * Transpose(A))); } CUDA_HOST_DEVICE real NormSq(const Mat33& A) { return Trace(A * Transpose(A)); } CUDA_HOST_DEVICE real DoubleDot(const Mat33& A, const Mat33& B) { return A[0] * B[0] + A[1] * B[1] + A[2] * B[2] + A[4] * B[4] + A[5] * B[5] + A[6] * B[6] + A[8] * B[8] + A[9] * B[9] + A[10] * B[10]; } CUDA_HOST_DEVICE real3 LargestColumnNormalized(const Mat33& A) { real3 scale = DotMM(A.array); real3 sqrt_scale = simd::SquareRoot(scale); if (scale.x > scale.y) { if (scale.x > scale.z) { return A.col(0) / sqrt_scale.x; } } else if (scale.y > scale.z) { return A.col(1) / sqrt_scale.y; } return A.col(2) / sqrt_scale.z; } //// ======================================================================================== CUDA_HOST_DEVICE Mat33 operator*(const DiagMat33& M, const Mat33& N) { return Mat33(M.x11 * N[0], M.x22 * N[1], M.x33 * N[2], M.x11 * N[4], M.x22 * N[5], M.x33 * N[6], M.x11 * N[8], M.x22 * N[9], M.x33 * N[10]); } CUDA_HOST_DEVICE real3 operator*(const DiagMat33& M, const real3& v) { real3 result; result.x = M.x11 * v.x; result.y = M.x22 * v.y; result.z = M.x33 * v.z; return result; } //// ======================================================================================== CUDA_HOST_DEVICE SymMat33 operator-(const SymMat33& M, const real& v) { return SymMat33(M.x11 - v, M.x21, M.x31, M.x22 - v, M.x32, M.x33 - v); // only subtract diagonal } CUDA_HOST_DEVICE SymMat33 CofactorMatrix(const SymMat33& A) { SymMat33 T; T.x11 = A.x22 * A.x33 - A.x32 * A.x32; // T.x21 = -A.x21 * A.x33 + A.x32 * A.x31; // T.x22 = A.x11 * A.x33 - A.x31 * A.x31; // T.x31 = A.x21 * A.x32 - A.x22 * A.x31; // T.x32 = -A.x11 * A.x32 + A.x21 * A.x31; // T.x33 = A.x11 * A.x22 - A.x21 * A.x21; // return T; } CUDA_HOST_DEVICE real3 LargestColumnNormalized(const SymMat33& A) { real scale1 = Length2(real3(A.x11, A.x21, A.x31)); real scale2 = Length2(real3(A.x21, A.x22, A.x32)); real scale3 = Length2(real3(A.x31, A.x32, A.x33)); if (scale1 > scale2) { if (scale1 > scale3) { return real3(A.x11, A.x21, A.x31) / sqrt(scale1); } } else if (scale2 > scale3) { return real3(A.x21, A.x22, A.x32) / sqrt(scale2); } if (scale3 > 0) return real3(A.x31, A.x32, A.x33) / sqrt(scale3); else { return (real3(1, 0, 0)); } } CUDA_HOST_DEVICE SymMat33 NormalEquationsMatrix(const Mat33& A) { return NormalEquations(A.array); } //// ======================================================================================== CUDA_HOST_DEVICE real3 operator*(const Mat32& M, const real2& v) { real3 result; result.x = M[0] * v.x + M[4] * v.y; result.y = M[1] * v.x + M[5] * v.y; result.z = M[2] * v.x + M[6] * v.y; return result; } CUDA_HOST_DEVICE Mat32 operator*(const SymMat33& M, const Mat32& N) { Mat32 result; // x11 x21 x31 c11 c12 // x21 x22 x32 c21 c22 // x31 x32 x33 c31 c32 result[0] = M.x11 * N[0] + M.x21 * N[1] + M.x31 * N[2]; result[1] = M.x21 * N[0] + M.x22 * N[1] + M.x32 * N[2]; result[2] = M.x31 * N[0] + M.x32 * N[1] + M.x33 * N[2]; result[4] = M.x11 * N[4] + M.x21 * N[5] + M.x31 * N[6]; result[5] = M.x21 * N[4] + M.x22 * N[5] + M.x32 * N[6]; result[6] = M.x31 * N[4] + M.x32 * N[5] + M.x33 * N[6]; return result; } //// ======================================================================================== CUDA_HOST_DEVICE SymMat22 operator-(const SymMat22& M, const real& v) { return SymMat22(M.x11 - v, M.x21, M.x22 - v); // } CUDA_HOST_DEVICE SymMat22 CofactorMatrix(const SymMat22& A) { SymMat22 T; T.x11 = A.x22; // T.x21 = -A.x21; // T.x22 = A.x11; // return T; } CUDA_HOST_DEVICE real2 LargestColumnNormalized(const SymMat22& A) { real scale1 = Length2(real2(A.x11, A.x21)); real scale2 = Length2(real2(A.x21, A.x22)); if (scale1 > scale2) { return real2(A.x11, A.x21) / sqrt(scale1); } else if (scale2 > 0) { return real2(A.x21, A.x22) / sqrt(scale2); } else { return real2(1, 0); } } // A^T*B CUDA_HOST_DEVICE SymMat22 TransposeTimesWithSymmetricResult(const Mat32& A, const Mat32& B) { SymMat22 T; T.x11 = A[0] * B[0] + A[1] * B[1] + A[2] * B[2]; T.x21 = A[4] * B[0] + A[5] * B[1] + A[6] * B[2]; T.x22 = A[4] * B[4] + A[5] * B[5] + A[6] * B[6]; return T; } CUDA_HOST_DEVICE SymMat22 ConjugateWithTranspose(const Mat32& A, const SymMat33& B) { return TransposeTimesWithSymmetricResult(B * A, A); } CUDA_HOST_DEVICE void Print(const Mat33& A, const char* name) { printf("%s\n", name); printf("%f %f %f\n", A[0], A[4], A[8]); printf("%f %f %f\n", A[1], A[5], A[9]); printf("%f %f %f\n", A[2], A[6], A[10]); } CUDA_HOST_DEVICE void Print(const Mat32& A, const char* name) { printf("%s\n", name); printf("%f %f\n", A[0], A[4]); printf("%f %f\n", A[1], A[5]); printf("%f %f\n", A[2], A[6]); } CUDA_HOST_DEVICE void Print(const SymMat33& A, const char* name) { printf("%s\n", name); printf("%f %f %f\n", A.x11, A.x21, A.x31); printf("%f %f %f\n", A.x21, A.x22, A.x32); printf("%f %f %f\n", A.x31, A.x32, A.x33); } CUDA_HOST_DEVICE void Print(const SymMat22& A, const char* name) { printf("%s\n", name); printf("%f %f\n", A.x11, A.x21); printf("%f %f\n", A.x21, A.x22); } CUDA_HOST_DEVICE void PrintLine(const Mat33& A, const char* name) { printf("%s: [%f,%f,%f,%f,%f,%f,%f,%f,%f]\n", name, A[0], A[1], A[2], A[4], A[5], A[6], A[8], A[9], A[10]); } CUDA_HOST_DEVICE void PrintLine(const Mat32& A, const char* name) { printf("%s: [%f,%f,%f,%f,%f,%f]\n", name, A[0], A[1], A[2], A[4], A[5], A[6]); } CUDA_HOST_DEVICE void PrintLine(const SymMat33& A, const char* name) { printf("%s: [%f,%f,%f,%f,%f,%f,%f,%f,%f]\n", name, A.x11, A.x21, A.x31, A.x21, A.x22, A.x32, A.x31, A.x32, A.x33); } CUDA_HOST_DEVICE void PrintLine(const SymMat22& A, const char* name) { printf("%s: [%f,%f,%f,%f]\n", name, A.x11, A.x21, A.x21, A.x22); } } #endif
the_stack
#define CUB_STDERR #include <iterator> #include <cub/warp/warp_store.cuh> #include <cub/iterator/cache_modified_output_iterator.cuh> #include <cub/iterator/discard_output_iterator.cuh> #include <cub/util_allocator.cuh> #include <thrust/device_vector.h> #include <thrust/host_vector.h> #include <thrust/sequence.h> #include "test_util.h" using namespace cub; const int MAX_ITERATIONS = 30; template <int BlockThreads, int WarpThreads, int ItemsPerThread, WarpStoreAlgorithm StoreAlgorithm, typename OutputIteratorT, typename OutputT> __global__ void kernel(OutputIteratorT output) { using WarpStoreT = WarpStore<OutputT, ItemsPerThread, StoreAlgorithm, WarpThreads>; constexpr int warps_in_block = BlockThreads / WarpThreads; constexpr int tile_size = ItemsPerThread * WarpThreads; const int warp_id = static_cast<int>(threadIdx.x) / WarpThreads; __shared__ typename WarpStoreT::TempStorage temp_storage[warps_in_block]; OutputT reg[ItemsPerThread]; for (int item = 0; item < ItemsPerThread; item++) { reg[item] = static_cast<OutputT>(threadIdx.x * ItemsPerThread + item); } WarpStoreT(temp_storage[warp_id]).Store(output + warp_id * tile_size, reg); } template <int BlockThreads, int WarpThreads, int ItemsPerThread, WarpStoreAlgorithm StoreAlgorithm, typename OutputIteratorT, typename OutputT> __global__ void kernel(int valid_items, OutputIteratorT output) { using WarpStoreT = WarpStore<OutputT, ItemsPerThread, StoreAlgorithm, WarpThreads>; constexpr int warps_in_block = BlockThreads / WarpThreads; constexpr int tile_size = ItemsPerThread * WarpThreads; const int tid = static_cast<int>(threadIdx.x); const int warp_id = tid / WarpThreads; __shared__ typename WarpStoreT::TempStorage temp_storage[warps_in_block]; OutputT reg[ItemsPerThread]; for (int item = 0; item < ItemsPerThread; item++) { reg[item] = static_cast<OutputT>(threadIdx.x * ItemsPerThread + item); } WarpStoreT(temp_storage[warp_id]) .Store(output + warp_id * tile_size, reg, valid_items); } template <typename T, int BlockThreads, int WarpThreads, int ItemsPerThread, WarpStoreAlgorithm StoreAlgorithm> thrust::device_vector<T> GenExpectedOutput(int valid_items) { const int tile_size = WarpThreads * ItemsPerThread; const int total_warps = BlockThreads / WarpThreads; const int elements = total_warps * tile_size; thrust::device_vector<T> input(elements); if (StoreAlgorithm == WarpStoreAlgorithm::WARP_STORE_STRIPED) { thrust::host_vector<T> h_input(elements); // In this case we need different stripe pattern, so the // items/threads parameters are swapped constexpr int fake_block_size = ItemsPerThread * (BlockThreads / WarpThreads); FillStriped<ItemsPerThread, WarpThreads, fake_block_size>(h_input.begin()); input = h_input; } else { thrust::sequence(input.begin(), input.end()); } if (valid_items != elements) { for (int warp_id = 0; warp_id < total_warps; warp_id++) { thrust::fill(input.begin() + warp_id * tile_size + valid_items, input.begin() + (warp_id + 1) * tile_size, T{}); } } return input; } template < typename T, int BlockThreads, int WarpThreads, int ItemsPerThread, WarpStoreAlgorithm StoreAlgorithm> void CheckResults(int valid_items, const thrust::device_vector<T> &output) { const thrust::device_vector<T> expected_output = GenExpectedOutput<T, BlockThreads, WarpThreads, ItemsPerThread, StoreAlgorithm>(valid_items); AssertEquals(expected_output, output); } template <typename T, int BlockThreads, int WarpThreads, int ItemsPerThread, WarpStoreAlgorithm StoreAlgorithm, typename OutputIteratorT> void TestImplementation(OutputIteratorT output) { kernel<BlockThreads, WarpThreads, ItemsPerThread, StoreAlgorithm, OutputIteratorT, T><<<1, BlockThreads>>>(output); CubDebugExit(cudaPeekAtLastError()); CubDebugExit(cudaDeviceSynchronize()); } template <typename T, int BlockThreads, int WarpThreads, int ItemsPerThread, WarpStoreAlgorithm StoreAlgorithm, typename OutputIteratorT> void TestImplementation(int valid_items, OutputIteratorT output) { kernel<BlockThreads, WarpThreads, ItemsPerThread, StoreAlgorithm, OutputIteratorT, T><<<1, BlockThreads>>>(valid_items, output); CubDebugExit(cudaPeekAtLastError()); CubDebugExit(cudaDeviceSynchronize()); } template <typename T, int BlockThreads, int WarpThreads, int ItemsPerThread, WarpStoreAlgorithm StoreAlgorithm> void TestPointer() { const int tile_size = WarpThreads * ItemsPerThread; const int total_warps = BlockThreads / WarpThreads; const int elements = total_warps * tile_size; thrust::device_vector<T> output(elements); thrust::fill(output.begin(), output.end(), T{}); TestImplementation<T, BlockThreads, WarpThreads, ItemsPerThread, StoreAlgorithm>( thrust::raw_pointer_cast(output.data())); CheckResults<T, BlockThreads, WarpThreads, ItemsPerThread, StoreAlgorithm>( elements, output); const unsigned int max_valid_items = WarpThreads * ItemsPerThread; for (int iteration = 0; iteration < MAX_ITERATIONS; iteration++) { const int valid_items = static_cast<int>(RandomValue(max_valid_items)); thrust::fill(output.begin(), output.end(), T{}); TestImplementation<T, BlockThreads, WarpThreads, ItemsPerThread, StoreAlgorithm>( valid_items, thrust::raw_pointer_cast(output.data())); CheckResults<T, BlockThreads, WarpThreads, ItemsPerThread, StoreAlgorithm>( valid_items, output); } } template <typename T, int BlockThreads, int WarpThreads, int ItemsPerThread, WarpStoreAlgorithm StoreAlgorithm, CacheStoreModifier StoreModifier> void TestIterator() { const int tile_size = WarpThreads * ItemsPerThread; const int total_warps = BlockThreads / WarpThreads; const int elements = total_warps * tile_size; thrust::device_vector<T> output(elements); thrust::fill(output.begin(), output.end(), T{}); TestImplementation<T, BlockThreads, WarpThreads, ItemsPerThread, StoreAlgorithm>( CacheModifiedOutputIterator<StoreModifier, T>( thrust::raw_pointer_cast(output.data()))); CheckResults<T, BlockThreads, WarpThreads, ItemsPerThread, StoreAlgorithm>( elements, output); const int max_valid_items = WarpThreads * ItemsPerThread; for (int iteration = 0; iteration < MAX_ITERATIONS; iteration++) { const int valid_items = RandomValue(max_valid_items); thrust::fill(output.begin(), output.end(), T{}); TestImplementation<T, BlockThreads, WarpThreads, ItemsPerThread, StoreAlgorithm>( valid_items, CacheModifiedOutputIterator<StoreModifier, T>( thrust::raw_pointer_cast(output.data()))); CheckResults<T, BlockThreads, WarpThreads, ItemsPerThread, StoreAlgorithm>( valid_items, output); } } template <typename T, int BlockThreads, int WarpThreads, int ItemsPerThread, WarpStoreAlgorithm StoreAlgorithm> void TestIterator() { TestIterator<T, BlockThreads, WarpThreads, ItemsPerThread, StoreAlgorithm, CacheStoreModifier::STORE_DEFAULT>(); TestIterator<T, BlockThreads, WarpThreads, ItemsPerThread, StoreAlgorithm, CacheStoreModifier::STORE_WB>(); TestIterator<T, BlockThreads, WarpThreads, ItemsPerThread, StoreAlgorithm, CacheStoreModifier::STORE_CG>(); TestIterator<T, BlockThreads, WarpThreads, ItemsPerThread, StoreAlgorithm, CacheStoreModifier::STORE_CS>(); TestIterator<T, BlockThreads, WarpThreads, ItemsPerThread, StoreAlgorithm, CacheStoreModifier::STORE_WT>(); TestIterator<T, BlockThreads, WarpThreads, ItemsPerThread, StoreAlgorithm, CacheStoreModifier::STORE_VOLATILE>(); } template <typename T, int BlockThreads, int WarpThreads, int ItemsPerThread, WarpStoreAlgorithm StoreAlgorithm> void Test() { TestPointer<T, BlockThreads, WarpThreads, ItemsPerThread, StoreAlgorithm>(); TestIterator<T, BlockThreads, WarpThreads, ItemsPerThread, StoreAlgorithm>(); } template <typename T, int BlockThreads, int WarpThreads, int ItemsPerThread> void Test() { Test<T, BlockThreads, WarpThreads, ItemsPerThread, WarpStoreAlgorithm::WARP_STORE_DIRECT>(); Test<T, BlockThreads, WarpThreads, ItemsPerThread, WarpStoreAlgorithm::WARP_STORE_STRIPED>(); Test<T, BlockThreads, WarpThreads, ItemsPerThread, WarpStoreAlgorithm::WARP_STORE_TRANSPOSE>(); Test<T, BlockThreads, WarpThreads, ItemsPerThread, WarpStoreAlgorithm::WARP_STORE_VECTORIZE>(); } template <typename T, int BlockThreads, int WarpThreads> void Test() { Test<T, BlockThreads, WarpThreads, 1>(); Test<T, BlockThreads, WarpThreads, 4>(); Test<T, BlockThreads, WarpThreads, 7>(); } template <typename T, int BlockThreads> void Test() { Test<T, BlockThreads, 4>(); Test<T, BlockThreads, 16>(); Test<T, BlockThreads, 32>(); } template <int BlockThreads> void Test() { Test<std::uint16_t, BlockThreads>(); Test<std::uint32_t, BlockThreads>(); Test<std::uint64_t, BlockThreads>(); } int main(int argc, char** argv) { // Initialize command line CommandLineArgs args(argc, argv); // Initialize device CubDebugExit(args.DeviceInit()); Test<256>(); }
the_stack
#include "ew_op_gpu.h" #include "gpu_hmma.h" #include <stdio.h> #if __CUDA_ARCH__ >= 700 template <bool N64> __device__ __noinline__ void stg_64x64x64_nx(ehalf* Y, uint offsetY, uint loadY, uint N, uint K, uint n, uint i) { for (uint j = 0; j < 2; j++) if (N64 || n + i*16 + j*32 < N) store_half4(Y + (offsetY + (i*16 + j*32)*K), to_half4( ew_add( ld_shared_float4(loadY + j*64 + 0), ld_shared_float4(loadY + j*64 + 128)) )); } template <bool N64> __device__ __noinline__ void red_64x64x64_nx(ehalf* Y, uint offsetY, uint loadY, uint N, uint K, uint n, uint i, uint stdC) { for (uint j = 0; j < 2; j++) for (uint k = 0; k < 2; k++) { uint sum2 = to_half2( ew_add( ld_shared_float2(loadY + k*8*stdC + j*64 + 0), ld_shared_float2(loadY + k*8*stdC + j*64 + 128) ) ); uint offset = offsetY + K*(j*32 + k*8 + i*16); if (N64 || n + j*32 + k*8 < N) reduce_half2(Y + offset, sum2); } } template <uint OP_B, bool N64, bool GATED> __global__ void __launch_bounds__(256) hgemm_blocksparse_64x64x64_nx_dsd( const uint2* __restrict__ Lut, const float* __restrict__ Gate, const ehalf* __restrict__ X, const ehalf* __restrict__ W, ehalf* Y, uint* Lock, uint locks, uint N, uint C, uint K, uint blk_a, uint blk_b, uint blk_N) { const uint stdA = 64 + 8; const uint stdB = 64 + (OP_B == OP_N ? 16 : 8); const uint stdC = 256 + 4; __shared__ ehalf hShare[(stdA + stdB)*64]; float* fShare = (float*)hShare; uint2* LutOffsets = (uint2*)&hShare[(stdA + stdB)*64]; uint tid = threadIdx.x; uint idx_ab = blockIdx.x; uint idx_B = blockIdx.y; uint idx_A = blockIdx.z; uint idx_L = idx_A * blk_a + idx_ab / blk_b; uint idx_N = idx_B * blk_b + idx_ab % blk_b; uint4 lut_head = ((const uint4*)Lut)[idx_L]; uint lut_offset = lut_head.x; uint lut_size = lut_head.y; uint idx_K = lut_head.z; uint idx_Lock = lut_head.w; uint tx = tid % 8; uint ty = tid / 8; if (lut_size > 0) { uint* Gates = (uint*)&LutOffsets[lut_size]; // prefetch the lut and gate data into shared Lut += lut_offset; #pragma unroll 1 for (uint i = tid; i < lut_size; i += 256) { uint2 entry = Lut[i]; if (GATED) { float gate = Gate[entry.y]; uint gate2; asm("{ \n\t" ".reg .f16 gate; \n\t" "cvt.rn.f16.f32 gate, %1; \n\t" "mov.b32 %0, {gate, gate}; \n\t" "}" : "=r"(gate2) : "f"(gate)); Gates[i] = gate2; } else Gates[i] = 1; entry.x *= 64; entry.y *= 64*64; LutOffsets[i] = entry; } __syncthreads(); uint storA = ty*stdA + tx*8; uint storB = ty*stdB + tx*8 + stdA*64; uint loadA = fragmentA<OP_N,M16N16K16>::get_idx(tid, stdA, (tid & 128)*32/128 + (tid & 64)*stdA*32/64); uint loadB = fragmentB<OP_B,M16N16K16>::get_idx(tid, stdB, (tid & 128)*(OP_B == OP_N ? stdB : 1)*32/128 + (tid & 32)*(OP_B == OP_N ? 1 : stdB) + stdA*64); uint n = idx_N*64 + ty; uint offsetA = n*C + tx*8; if (!N64) { asm(".reg .pred pn00, pn32;\n\t"::); asm("setp.lt.u32 pn00, %0, %1;\n\t" :: "r"(n + 0*32), "r"(N)); asm("setp.lt.u32 pn32, %0, %1;\n\t" :: "r"(n + 1*32), "r"(N)); } asm("mov.b32 %0, %0;" : "+r"(idx_N) : ); asm("mov.b32 %0, %0;" : "+r"(loadA) : ); asm("mov.b32 %0, %0;" : "+r"(loadB) : ); asm("mov.b32 %0, %0;" : "+r"(offsetA) : ); fragmentC<OP_N,OP_B,M16N16K16> fragC[2][2]; uint idx_lut = 0; #pragma unroll 1 do { uint gate = Gates[idx_lut]; if (gate != 0) { uint2 entry = LutOffsets[idx_lut]; const ehalf* pW = W + (entry.y + tid*8); uint4 b00 = load_half8(pW + 0*32*64); uint4 b32 = load_half8(pW + 1*32*64); uint4 a00, a32; entry.x += offsetA; if (N64) { a00 = load_half8(X + (entry.x + 0*C)); a32 = load_half8(X + (entry.x + 32*C)); } else { asm("mov.b64 {%0, %1}, 0; \n\t" "mov.b64 {%2, %3}, 0; \n\t" "mov.b64 {%4, %5}, 0; \n\t" "mov.b64 {%6, %7}, 0; \n\t" "@pn00 ld.global.nc.v4.u32 { %0, %1, %2, %3}, [%8];\n\t" "@pn32 ld.global.nc.v4.u32 { %4, %5, %6, %7}, [%9];\n\t" : "=r"(a00.x), "=r"(a00.y), "=r"(a00.z), "=r"(a00.w), "=r"(a32.x), "=r"(a32.y), "=r"(a32.z), "=r"(a32.w) : "l"(X + (entry.x + 0*C)), "l"(X + (entry.x + 32*C))); } if (GATED) { asm("mul.rn.f16x2 %0, %0, %1;" : "+r"(b00.x) : "r"(gate)); asm("mul.rn.f16x2 %0, %0, %1;" : "+r"(b00.y) : "r"(gate)); asm("mul.rn.f16x2 %0, %0, %1;" : "+r"(b00.z) : "r"(gate)); asm("mul.rn.f16x2 %0, %0, %1;" : "+r"(b00.w) : "r"(gate)); asm("mul.rn.f16x2 %0, %0, %1;" : "+r"(b32.x) : "r"(gate)); asm("mul.rn.f16x2 %0, %0, %1;" : "+r"(b32.y) : "r"(gate)); asm("mul.rn.f16x2 %0, %0, %1;" : "+r"(b32.z) : "r"(gate)); asm("mul.rn.f16x2 %0, %0, %1;" : "+r"(b32.w) : "r"(gate)); } __syncthreads(); *(uint4*)&hShare[storB + 0*32*stdB] = b00; *(uint4*)&hShare[storB + 1*32*stdB] = b32; *(uint4*)&hShare[storA + 0*32*stdA] = a00; *(uint4*)&hShare[storA + 1*32*stdA] = a32; __syncthreads(); fragmentA<OP_N,M16N16K16> fragA[2]; fragmentB<OP_B,M16N16K16> fragB[2]; for (uint k = 0; k < 2; k++) { for (uint i = 0; i < 2; i++) { fragA[i].load(hShare, loadA + k*16 + i*16*stdA, stdA); fragB[i].load(hShare, loadB + (OP_B == OP_N ? stdB : 1)*k*16 + (OP_B == OP_N ? 1 : stdB)*i*16, stdB); } for (uint i = 0; i < 2; i++) for (uint j = 0; j < 2; j++) fragC[i][j].mma_sync(fragA[i], fragB[j]); } } } while (++idx_lut < lut_size); uint txc = tid % 16; uint tyc = tid / 16; n = idx_N*64 + tyc; uint loadY = tyc*stdC + txc*4; uint storY = fragmentC<OP_N,OP_B,M16N16K16>::get_idx(tid, stdC, tid & 224); uint offsetY = n*K + idx_K*64 + txc*4; if (idx_Lock == 0) { for (uint i = 0; i < 2; i++) { __syncthreads(); for (uint j = 0; j < 2; j++) fragC[i][j].store(fShare, storY + j*16, stdC); __syncthreads(); stg_64x64x64_nx<N64>(Y, offsetY, loadY, N, K, n, i); } } else { Lock += idx_N*locks + idx_Lock - 1; // Critial Section if (tid == 0) while (atomicCAS(Lock, 0, 1) != 0); __syncthreads(); uint* Count = Lock + locks * blk_N; uint count = *Count; __syncthreads(); if (count == 0) { if (tid == 0) *Count = 1; // first block to get here just writes out to init the memory for (uint i = 0; i < 2; i++) { __syncthreads(); for (uint j = 0; j < 2; j++) fragC[i][j].store(fShare, storY + j*16, stdC); __syncthreads(); stg_64x64x64_nx<N64>(Y, offsetY, loadY, N, K, n, i); } } else { txc = tid % 32; tyc = tid / 32; n = idx_N*64 + tyc; loadY = tyc*stdC + txc*2; offsetY = n*K + idx_K*64 + txc*2; // subsequent blocks must accumulate for (uint i = 0; i < 2; i++) { __syncthreads(); for (uint j = 0; j < 2; j++) fragC[i][j].store(fShare, storY + j*16, stdC); __syncthreads(); red_64x64x64_nx<N64>(Y, offsetY, loadY, N, K, n, i, stdC); } } __threadfence(); __syncthreads(); if (tid == 0) atomicExch(Lock, 0); // End Critial Section } } else { uint n = idx_N*64 + ty; uint offsetY = n*K + idx_K*64 + tx*8; if (N64 || n + 0 < N) zero_half8(Y + (offsetY + 0*K)); if (N64 || n + 32 < N) zero_half8(Y + (offsetY + 32*K)); } } template <bool N64> __device__ __noinline__ void stg_64x32x32_nx(ehalf* Y, uint offsetY, uint loadY, uint N, uint K, uint n, uint i) { for (uint j = 0; j < 2; j++) if (N64 || n + i*16 + j*32 < N) store_half4(Y + (offsetY + (j*32 + i*16)*K), to_half4(ew_add( ld_shared_float4(loadY + j*32 + 0), ld_shared_float4(loadY + j*32 + 64)))); } template <bool N64> __device__ __noinline__ void red_64x32x32_nx(ehalf* Y, uint offsetY, uint loadY, uint N, uint K, uint n, uint i, uint stdC) { for (uint j = 0; j < 2; j++) for (uint k = 0; k < 2; k++) if (N64 || n + j*32 + k*8 < N) reduce_half2(Y + (offsetY + K*(j*32 + k*8 + i*16)), to_half2(ew_add( ld_shared_float2(loadY + k*8*stdC + j*32 + 0), ld_shared_float2(loadY + k*8*stdC + j*32 + 64)))); } template <uint OP_B, bool N64, bool GATED> __global__ void __launch_bounds__(128) hgemm_blocksparse_64x32x32_nx_dsd( const uint2* __restrict__ Lut, const float* __restrict__ Gate, const ehalf* __restrict__ X, const ehalf* __restrict__ W, ehalf* Y, uint* Lock, uint locks, uint N, uint C, uint K, uint blk_a, uint blk_b, uint blk_N) { const uint stdA = 32+8; const uint stdB = 32 + (OP_B == OP_N ? 16 : 8); const uint stdC = 132; __shared__ float fShare[stdC*16]; // stdC*16*4 > (stdA*64 + stdB*32)*2 ehalf* hShare = (ehalf*)fShare; uint2* LutOffsets = (uint2*)&hShare[stdA*64 + stdB*32]; uint tid = threadIdx.x; uint idx_ab = blockIdx.x; uint idx_B = blockIdx.y; uint idx_A = blockIdx.z; uint idx_L = idx_A * blk_a + idx_ab / blk_b; uint idx_N = idx_B * blk_b + idx_ab % blk_b; uint4 lut_head = ((const uint4*)Lut)[idx_L]; uint lut_offset = lut_head.x; uint lut_size = lut_head.y; uint idx_K = lut_head.z; uint idx_Lock = lut_head.w; uint tx = tid % 8; uint ty = tid / 8; if (lut_size > 0) { uint* Gates = (uint*)&LutOffsets[lut_size]; // prefetch the lut and gate data into shared Lut += lut_offset; #pragma unroll 1 for (uint i = tid; i < lut_size; i += 128) { uint2 entry = Lut[i]; if (GATED) { float gate = Gate[entry.y]; uint gate2; asm("{ \n\t" ".reg .f16 gate; \n\t" "cvt.rn.f16.f32 gate, %1; \n\t" "mov.b32 %0, {gate, gate}; \n\t" "}" : "=r"(gate2) : "f"(gate)); Gates[i] = gate2; } else Gates[i] = 1; entry.x *= 32; entry.y *= 32*32; LutOffsets[i] = entry; } __syncthreads(); uint storA = ty*stdA + tx*4; uint storB = ty*stdB + tx*4 + stdA*64; uint loadA = fragmentA<OP_N,M16N16K16>::get_idx(tid, stdA, (tid & 64)*16/64 + (tid & 32)*stdA); uint loadB = fragmentB<OP_B,M16N16K16>::get_idx(tid, stdB, (tid & 64)*(OP_B == OP_N ? stdB : 1)*16/64 + stdA*64); uint n = idx_N*64 + ty; uint offsetA = n*C + tx*4; if (!N64) { asm(".reg .pred pn<4>;\n\t"::); asm("setp.lt.u32 pn0, %0, %1;\n\t" :: "r"(n + 0*16), "r"(N)); asm("setp.lt.u32 pn1, %0, %1;\n\t" :: "r"(n + 1*16), "r"(N)); asm("setp.lt.u32 pn2, %0, %1;\n\t" :: "r"(n + 2*16), "r"(N)); asm("setp.lt.u32 pn3, %0, %1;\n\t" :: "r"(n + 3*16), "r"(N)); } asm("mov.b32 %0, %0;" : "+r"(loadA) : ); asm("mov.b32 %0, %0;" : "+r"(loadB) : ); asm("mov.b32 %0, %0;" : "+r"(offsetA) : ); fragmentC<OP_N,OP_B,M16N16K16> fragC[2][2]; uint idx_lut = 0; #pragma unroll 1 do { uint gate = Gates[idx_lut]; if (gate != 0) { uint2 entry = LutOffsets[idx_lut]; const ehalf* pW = W + (entry.y + tid*4); uint2 b00 = load_half4(pW + 0*16*32); uint2 b16 = load_half4(pW + 1*16*32); uint2 a00, a16, a32, a48; entry.x += offsetA; if (N64) { a00 = load_half4(X + (entry.x + 0*16*C)); a16 = load_half4(X + (entry.x + 1*16*C)); a32 = load_half4(X + (entry.x + 2*16*C)); a48 = load_half4(X + (entry.x + 3*16*C)); } else { asm("mov.b64 {%0, %1}, 0; \n\t" "mov.b64 {%2, %3}, 0; \n\t" "mov.b64 {%4, %5}, 0; \n\t" "mov.b64 {%6, %7}, 0; \n\t" "@pn0 ld.global.nc.v2.u32 {%0, %1}, [ %8];\n\t" "@pn1 ld.global.nc.v2.u32 {%2, %3}, [ %9];\n\t" "@pn2 ld.global.nc.v2.u32 {%4, %5}, [%10];\n\t" "@pn3 ld.global.nc.v2.u32 {%6, %7}, [%11];\n\t" : "=r"(a00.x), "=r"(a00.y), "=r"(a16.x), "=r"(a16.y), "=r"(a32.x), "=r"(a32.y), "=r"(a48.x), "=r"(a48.y) : "l"(X + (entry.x + 0*16*C)), "l"(X + (entry.x + 1*16*C)), "l"(X + (entry.x + 2*16*C)), "l"(X + (entry.x + 3*16*C))); } if (GATED) { asm("mul.rn.f16x2 %0, %0, %1;" : "+r"(b00.x) : "r"(gate)); asm("mul.rn.f16x2 %0, %0, %1;" : "+r"(b00.y) : "r"(gate)); asm("mul.rn.f16x2 %0, %0, %1;" : "+r"(b16.x) : "r"(gate)); asm("mul.rn.f16x2 %0, %0, %1;" : "+r"(b16.y) : "r"(gate)); } __syncthreads(); *(uint2*)&hShare[storB + 0*16*stdB] = b00; *(uint2*)&hShare[storB + 1*16*stdB] = b16; *(uint2*)&hShare[storA + 0*16*stdA] = a00; *(uint2*)&hShare[storA + 1*16*stdA] = a16; *(uint2*)&hShare[storA + 2*16*stdA] = a32; *(uint2*)&hShare[storA + 3*16*stdA] = a48; __syncthreads(); fragmentA<OP_N,M16N16K16> fragA[2]; fragmentB<OP_B,M16N16K16> fragB[2]; for (uint i = 0; i < 2; i++) { fragA[i].load(hShare, loadA + i*16*stdA, stdA); fragB[i].load(hShare, loadB + (OP_B == OP_N ? 1 : stdB)*i*16, stdB); } for (uint i = 0; i < 2; i++) for (uint j = 0; j < 2; j++) fragC[i][j].mma_sync(fragA[i], fragB[j]); } } while (++idx_lut < lut_size); uint txc = tid % 8; uint tyc = tid / 8; n = idx_N*64 + tyc; uint loadY = tyc*stdC + txc*4; uint storY = fragmentC<OP_N,OP_B,M16N16K16>::get_idx(tid, stdC, tid & 96); uint offsetY = n*K + idx_K*32 + txc*4; if (idx_Lock == 0) { for (uint i = 0; i < 2; i++) { __syncthreads(); for (uint j = 0; j < 2; j++) fragC[i][j].store(fShare, storY + j*16, stdC); __syncthreads(); stg_64x32x32_nx<N64>(Y, offsetY, loadY, N, K, n, i); } } else { Lock += idx_N*locks + idx_Lock - 1; // Critial Section if (tid == 0) while (atomicCAS(Lock, 0, 1) != 0); __syncthreads(); uint* Count = Lock + locks * blk_N; uint count = *Count; __syncthreads(); if (count == 0) { if (tid == 0) *Count = 1; // first block to get here just writes out to init the memory for (uint i = 0; i < 2; i++) { __syncthreads(); for (uint j = 0; j < 2; j++) fragC[i][j].store(fShare, storY + j*16, stdC); __syncthreads(); stg_64x32x32_nx<N64>(Y, offsetY, loadY, N, K, n, i); } } else { txc = tid % 16; tyc = tid / 16; n = idx_N*64 + tyc; loadY = tyc*stdC + txc*2; offsetY = n*K + idx_K*32 + txc*2; // subsequent blocks must accumulate for (uint i = 0; i < 2; i++) { __syncthreads(); for (uint j = 0; j < 2; j++) fragC[i][j].store(fShare, storY + j*16, stdC); __syncthreads(); red_64x32x32_nx<N64>(Y, offsetY, loadY, N, K, n, i, stdC); } } __threadfence(); __syncthreads(); if (tid == 0) atomicExch(Lock, 0); // End Critial Section } } else { uint txc = tid % 4; uint tyc = tid / 4; uint n = idx_N*64 + tyc; uint offsetY = n*K + idx_K*32 + txc*8; if (N64 || n + 0 < N) zero_half8(Y + (offsetY + 0*K)); if (N64 || n + 32 < N) zero_half8(Y + (offsetY + 32*K)); } } template <bool N64, bool GATED> __global__ void __launch_bounds__(256,3) hgemm_blocksparse_64x64x64_tn_dds( struct Plist<ehalf,8> X, struct Plist<ehalf,8> DY, ehalf* DW, const uint2* __restrict__ Lut, const float* __restrict__ Gate, uint params8, uint N, uint C, uint K, uint loops, uint accumulate) { const uint stdAB = 64 + 16; const uint stdC = 256 + 4; __shared__ ehalf hShare[stdAB*2*64]; float* fShare = (float*)hShare; uint tid = threadIdx.x; uint bid = blockIdx.x; float gate = GATED ? Gate[bid] : 1.0f; if (gate != 0.0f) { uint2 lut_head = Lut[bid]; uint tx = tid % 8; uint ty = tid / 8; uint n0 = ty; uint idx_A = lut_head.x; uint idx_B = lut_head.y; uint offsetA0 = ty*C + idx_A*64 + tx*8; uint offsetB0 = ty*K + idx_B*64 + tx*8; uint storAB = ty*stdAB + tx*8; uint loadA = fragmentA<OP_T,M16N16K16>::get_idx(tid, stdAB, (tid & 128)*stdAB*32/128 + (tid & 64)/2); uint loadB = fragmentB<OP_N,M16N16K16>::get_idx(tid, stdAB, (tid & 128)*stdAB*32/128 + (tid & 32) + stdAB*64); fragmentC<OP_T,OP_N,M16N16K16> fragC[2][2]; uint p8 = 0; #pragma unroll 1 do { const ehalf* A0; const ehalf* B0; asm("ld.param.u64 %0, [%2 + 0x160];\n\t" "ld.param.u64 %1, [%2 + 0x1a0];" : "=l"(A0), "=l"(B0) : "r"(p8)); p8 += 8; uint offsetA = offsetA0; uint offsetB = offsetB0; uint n = n0; uint loop = 0; #pragma unroll 1 do { asm volatile (".pragma \"nounroll\";"::); // ptxas, don't get clever uint4 a00 = {0}, a32 = {0}; uint4 b00 = {0}, b32 = {0}; if (N64 || n + 0 < N) { a00 = load_half8(A0 + (offsetA + 0*C)); b00 = load_half8(B0 + (offsetB + 0*K)); } if (N64 || n + 32 < N) { a32 = load_half8(A0 + (offsetA + 32*C)); b32 = load_half8(B0 + (offsetB + 32*K)); } offsetA += 64*C; offsetB += 64*K; if (!N64) n += 64; __syncthreads(); *(uint4*)&hShare[storAB + 0*stdAB + 0*stdAB] = a00; *(uint4*)&hShare[storAB + 32*stdAB + 0*stdAB] = a32; *(uint4*)&hShare[storAB + 0*stdAB + 64*stdAB] = b00; *(uint4*)&hShare[storAB + 32*stdAB + 64*stdAB] = b32; __syncthreads(); fragmentA<OP_T,M16N16K16> fragA[2]; fragmentB<OP_N,M16N16K16> fragB[2]; for (uint k = 0; k < 2; k++) { for (uint i = 0; i < 2; i++) { fragA[i].load(hShare, loadA + k*16*stdAB + i*16, stdAB); fragB[i].load(hShare, loadB + k*16*stdAB + i*16, stdAB); } for (uint i = 0; i < 2; i++) for (uint j = 0; j < 2; j++) fragC[i][j].mma_sync(fragA[i], fragB[j]); } } while (++loop < loops); } while (p8 < params8); asm volatile ("mov.u32 %0, %tid.x;" : "=r"(tid) :); asm volatile ("mov.u32 %0, %ctaid.x;" : "=r"(bid) :); uint storC = fragmentC<OP_T,OP_N,M16N16K16>::get_idx(tid, stdC, (tid & 224)); if (accumulate) { tx = tid % 32; ty = tid / 32; uint loadC = ty*stdC + tx *2; uint offsetC = bid*64*64 + tid*2; for (uint i = 0; i < 2; i++) { __syncthreads(); for (uint j = 0; j < 2; j++) fragC[i][j].store(fShare, storC + j*16, stdC); __syncthreads(); for (uint j = 0; j < 2; j++) for (uint k = 0; k < 2; k++) { uint sum2 = to_half2( ew_add( ld_shared_float2(loadC + k*8*stdC + j*64 + 0), ld_shared_float2(loadC + k*8*stdC + j*64 + 128) ) ); reduce_half2(DW + offsetC + (i*16 + j*32 + k*8)*64, sum2); } } } else { tx = tid % 16; ty = tid / 16; uint loadC = ty*stdC + tx *4; uint offsetC = bid*64*64 + tid*4; for (uint i = 0; i < 2; i++) { __syncthreads(); for (uint j = 0; j < 2; j++) fragC[i][j].store(fShare, storC + j*16, stdC); __syncthreads(); for (uint j = 0; j < 2; j++) { uint2 sum4 = to_half4( ew_add( ld_shared_float4(loadC + j*64 + 0), ld_shared_float4(loadC + j*64 + 128) ) ); store_half4(DW + offsetC + (i*16 + j*32)*64, sum4); } } } } else if (!accumulate) // gate == 0 { DW += bid*64*64 + tid*8; zero_half8(DW + 0*64); zero_half8(DW + 32*64); } } template <bool N64, bool GATED> __global__ void __launch_bounds__(128,6) hgemm_blocksparse_32x32x64_tn_dds( struct Plist<ehalf,8> X, struct Plist<ehalf,8> DY, ehalf* DW, const uint2* __restrict__ Lut, const float* __restrict__ Gate, uint params8, uint N, uint C, uint K, uint loops, uint accumulate) { const uint stdAB = 32+16; const uint stdC = 132; __shared__ ehalf hShare[stdAB*2*64]; float* fShare = (float*)hShare; uint tid = threadIdx.x; uint bid = blockIdx.x; float gate = GATED ? Gate[bid] : 1.0f; if (gate != 0.0f) { uint2 lut_head = Lut[bid]; uint tx = tid % 8; uint ty = tid / 8; uint n0 = ty; uint idx_A = lut_head.x; uint idx_B = lut_head.y; uint offsetA0 = ty*C + idx_A*32 + tx*4; uint offsetB0 = ty*K + idx_B*32 + tx*4; uint storAB = ty*stdAB + tx*4; uint loadA = fragmentA<OP_T,M16N16K16>::get_idx(tid, stdAB, (tid & 96)*stdAB/2); uint loadB = fragmentB<OP_N,M16N16K16>::get_idx(tid, stdAB, (tid & 96)*stdAB/2 + stdAB*64); fragmentC<OP_T,OP_N,M16N16K16> fragC[2][2]; uint p8 = 0; #pragma unroll 1 do { const ehalf* A0; const ehalf* B0; asm("ld.param.u64 %0, [%2 + 0x160];\n\t" "ld.param.u64 %1, [%2 + 0x1a0];" : "=l"(A0), "=l"(B0) : "r"(p8)); p8 += 8; uint offsetA = offsetA0; uint offsetB = offsetB0; uint n = n0; uint loop = 0; #pragma unroll 1 do { asm volatile (".pragma \"nounroll\";"::); // ptxas, don't get clever uint2 a00 = {0}, a16 = {0}, a32 = {0}, a48 = {0}; uint2 b00 = {0}, b16 = {0}, b32 = {0}, b48 = {0}; if (N64 || n + 0*16 < N) { a00 = load_half4(A0 + (offsetA + 0*16*C)); b00 = load_half4(B0 + (offsetB + 0*16*K)); } if (N64 || n + 1*16 < N) { a16 = load_half4(A0 + (offsetA + 1*16*C)); b16 = load_half4(B0 + (offsetB + 1*16*K)); } if (N64 || n + 2*16 < N) { a32 = load_half4(A0 + (offsetA + 2*16*C)); b32 = load_half4(B0 + (offsetB + 2*16*K)); } if (N64 || n + 3*16 < N) { a48 = load_half4(A0 + (offsetA + 3*16*C)); b48 = load_half4(B0 + (offsetB + 3*16*K)); } offsetA += 64*C; offsetB += 64*K; if (!N64) n += 64; __syncthreads(); *(uint2*)&hShare[storAB + 0*16*stdAB + 0*stdAB] = a00; *(uint2*)&hShare[storAB + 1*16*stdAB + 0*stdAB] = a16; *(uint2*)&hShare[storAB + 2*16*stdAB + 0*stdAB] = a32; *(uint2*)&hShare[storAB + 3*16*stdAB + 0*stdAB] = a48; *(uint2*)&hShare[storAB + 0*16*stdAB + 64*stdAB] = b00; *(uint2*)&hShare[storAB + 1*16*stdAB + 64*stdAB] = b16; *(uint2*)&hShare[storAB + 2*16*stdAB + 64*stdAB] = b32; *(uint2*)&hShare[storAB + 3*16*stdAB + 64*stdAB] = b48; __syncthreads(); fragmentA<OP_T,M16N16K16> fragA[2]; fragmentB<OP_N,M16N16K16> fragB[2]; for (uint i = 0; i < 2; i++) { fragA[i].load(hShare, loadA + i*16, stdAB); fragB[i].load(hShare, loadB + i*16, stdAB); } for (uint i = 0; i < 2; i++) for (uint j = 0; j < 2; j++) fragC[i][j].mma_sync(fragA[i], fragB[j]); } while (++loop < loops); } while (p8 < params8); asm volatile ("mov.u32 %0, %tid.x;" : "=r"(tid) :); asm volatile ("mov.u32 %0, %ctaid.x;" : "=r"(bid) :); uint storC = fragmentC<OP_T,OP_N,M16N16K16>::get_idx(tid, stdC, (tid & 96)); if (accumulate) { tx = tid % 16; ty = tid / 16; uint loadC = ty*stdC + tx*2; uint offsetC = bid*32*32 + tid*2; for (uint i = 0; i < 2; i++) { __syncthreads(); for (uint j = 0; j < 2; j++) fragC[i][j].store(fShare, storC + j*16, stdC); __syncthreads(); for (uint j = 0; j < 2; j++) { float2 sum2 = ew_add( ew_add( *(float2*)&fShare[loadC + j*8*stdC + 0*32], *(float2*)&fShare[loadC + j*8*stdC + 1*32]), ew_add( *(float2*)&fShare[loadC + j*8*stdC + 2*32], *(float2*)&fShare[loadC + j*8*stdC + 3*32])); reduce_half2(DW + offsetC + i*16*32 + j*8*32, to_half2(sum2)); } } } else { tx = tid % 8; ty = tid / 8; uint loadC = ty*stdC + tx*4; uint offsetC = bid*32*32 + tid*4; for (uint i = 0; i < 2; i++) { __syncthreads(); for (uint j = 0; j < 2; j++) fragC[i][j].store(fShare, storC + j*16, stdC); __syncthreads(); float4 sum4 = ew_add( ew_add( *(float4*)&fShare[loadC + 0*32], *(float4*)&fShare[loadC + 1*32]), ew_add( *(float4*)&fShare[loadC + 2*32], *(float4*)&fShare[loadC + 3*32])); store_half4(DW + offsetC + i*16*32, to_half4(sum4)); } } } else if (!accumulate) // gate == 0 zero_half8(DW + (bid*32*32 + tid*8)); } #else // __CUDA_ARCH__ >= 700 template <uint OP_B, bool N64, bool GATED> __global__ void __launch_bounds__(256) hgemm_blocksparse_64x64x64_nx_dsd( const uint2* __restrict__ Lut, const float* __restrict__ Gate, const ehalf* __restrict__ X, const ehalf* __restrict__ W, ehalf* Y, uint* Lock, uint locks, uint N, uint C, uint K, uint blk_a, uint blk_b, uint blk_N) { *Y = 0; } template <uint OP_A, bool N64, bool GATED> __global__ void __launch_bounds__(128) hgemm_blocksparse_64x32x32_nx_dsd( const uint2* __restrict__ Lut, const float* __restrict__ Gate, const ehalf* __restrict__ X, const ehalf* __restrict__ W, ehalf* Y, uint* Lock, uint locks, uint N, uint C, uint K, uint blk_a, uint blk_b, uint blk_N) { *Y = 0; } template <bool N64, bool GATED> __global__ void __launch_bounds__(256) hgemm_blocksparse_64x64x64_tn_dds( struct Plist<ehalf,8> X, struct Plist<ehalf,8> DY, ehalf* DW, const uint2* __restrict__ Lut, const float* __restrict__ Gate, uint params8, uint N, uint C, uint K, uint loops, uint accumulate) { *DW = 0; } template <bool N64, bool GATED> __global__ void __launch_bounds__(128) hgemm_blocksparse_32x32x64_tn_dds( struct Plist<ehalf,8> X, struct Plist<ehalf,8> DY, ehalf* DW, const uint2* __restrict__ Lut, const float* __restrict__ Gate, uint params8, uint N, uint C, uint K, uint loops, uint accumulate) { *DW = 0; } #endif // __CUDA_ARCH__ >= 700 cudaError_t hgemm_blocksparse_nx_dsd(const ehalf* X, const ehalf* W, ehalf* Y, bsmm_params* params, uint op) { dim3 grid(params->blk_a*params->blk_b, params->blk_B, params->blk_A); uint blk_N = params->blk_b * params->blk_B; // cuMemsetD16Async((CUdeviceptr)Y, 0, params->K * params->N, params->stream); if (params->locks > 0) cuMemsetD32Async((CUdeviceptr)params->Lock, 0, blk_N * params->locks * 2, params->stream); const uint2* Lut = (const uint2*)params->Lut; uint* Lock = (uint*)params->Lock; bool N64 = (params->N & 63) == 0; int shared = params->shared + params->shared/2; if (params->bsize == 32) { // 132*16*4 - ((32+8)*64 + (32+16)*32)*2 shared -= op == OP_N ? 256 : 768; if (shared < 0) shared = 0; if (params->Gate == 0) { if (op == OP_N) if (N64) hgemm_blocksparse_64x32x32_nx_dsd<OP_N, true,false><<<grid,128,shared,params->stream>>>(Lut, params->Gate, X, W, Y, Lock, params->locks, params->N, params->C, params->K, params->blk_a, params->blk_b, blk_N); else hgemm_blocksparse_64x32x32_nx_dsd<OP_N,false,false><<<grid,128,shared,params->stream>>>(Lut, params->Gate, X, W, Y, Lock, params->locks, params->N, params->C, params->K, params->blk_a, params->blk_b, blk_N); else if (N64) hgemm_blocksparse_64x32x32_nx_dsd<OP_T, true,false><<<grid,128,shared,params->stream>>>(Lut, params->Gate, X, W, Y, Lock, params->locks, params->N, params->C, params->K, params->blk_a, params->blk_b, blk_N); else hgemm_blocksparse_64x32x32_nx_dsd<OP_T,false,false><<<grid,128,shared,params->stream>>>(Lut, params->Gate, X, W, Y, Lock, params->locks, params->N, params->C, params->K, params->blk_a, params->blk_b, blk_N); } else { if (op == OP_N) if (N64) hgemm_blocksparse_64x32x32_nx_dsd<OP_N, true, true><<<grid,128,shared,params->stream>>>(Lut, params->Gate, X, W, Y, Lock, params->locks, params->N, params->C, params->K, params->blk_a, params->blk_b, blk_N); else hgemm_blocksparse_64x32x32_nx_dsd<OP_N,false, true><<<grid,128,shared,params->stream>>>(Lut, params->Gate, X, W, Y, Lock, params->locks, params->N, params->C, params->K, params->blk_a, params->blk_b, blk_N); else if (N64) hgemm_blocksparse_64x32x32_nx_dsd<OP_T, true, true><<<grid,128,shared,params->stream>>>(Lut, params->Gate, X, W, Y, Lock, params->locks, params->N, params->C, params->K, params->blk_a, params->blk_b, blk_N); else hgemm_blocksparse_64x32x32_nx_dsd<OP_T,false, true><<<grid,128,shared,params->stream>>>(Lut, params->Gate, X, W, Y, Lock, params->locks, params->N, params->C, params->K, params->blk_a, params->blk_b, blk_N); } } else if (params->bsize == 64) { if (params->Gate == 0) { if (op == OP_N) if (N64) hgemm_blocksparse_64x64x64_nx_dsd<OP_N, true,false><<<grid,256,shared,params->stream>>>(Lut, params->Gate, X, W, Y, Lock, params->locks, params->N, params->C, params->K, params->blk_a, params->blk_b, blk_N); else hgemm_blocksparse_64x64x64_nx_dsd<OP_N,false,false><<<grid,256,shared,params->stream>>>(Lut, params->Gate, X, W, Y, Lock, params->locks, params->N, params->C, params->K, params->blk_a, params->blk_b, blk_N); else if (N64) hgemm_blocksparse_64x64x64_nx_dsd<OP_T, true,false><<<grid,256,shared,params->stream>>>(Lut, params->Gate, X, W, Y, Lock, params->locks, params->N, params->C, params->K, params->blk_a, params->blk_b, blk_N); else hgemm_blocksparse_64x64x64_nx_dsd<OP_T,false,false><<<grid,256,shared,params->stream>>>(Lut, params->Gate, X, W, Y, Lock, params->locks, params->N, params->C, params->K, params->blk_a, params->blk_b, blk_N); } else { if (op == OP_N) if (N64) hgemm_blocksparse_64x64x64_nx_dsd<OP_N, true, true><<<grid,256,shared,params->stream>>>(Lut, params->Gate, X, W, Y, Lock, params->locks, params->N, params->C, params->K, params->blk_a, params->blk_b, blk_N); else hgemm_blocksparse_64x64x64_nx_dsd<OP_N,false, true><<<grid,256,shared,params->stream>>>(Lut, params->Gate, X, W, Y, Lock, params->locks, params->N, params->C, params->K, params->blk_a, params->blk_b, blk_N); else if (N64) hgemm_blocksparse_64x64x64_nx_dsd<OP_T, true, true><<<grid,256,shared,params->stream>>>(Lut, params->Gate, X, W, Y, Lock, params->locks, params->N, params->C, params->K, params->blk_a, params->blk_b, blk_N); else hgemm_blocksparse_64x64x64_nx_dsd<OP_T,false, true><<<grid,256,shared,params->stream>>>(Lut, params->Gate, X, W, Y, Lock, params->locks, params->N, params->C, params->K, params->blk_a, params->blk_b, blk_N); } } return cudaPeekAtLastError(); } cudaError_t hgemm_blocksparse_nx_dsd(const bhalf* X, const bhalf* W, bhalf* Y, bsmm_params* params, uint op) { return cudaSuccess; } cudaError_t hgemm_blocksparse_nx_dsd(const float* X, const float* W, float* Y, bsmm_params* params, uint op) { return cudaSuccess; } cudaError_t hgemm_blocksparse_tn_dds(const ehalf* X, const ehalf* E, ehalf* U, bsmm_params* params) { struct Plist<ehalf,8>* X8 = (struct Plist<ehalf,8>*)X; struct Plist<ehalf,8>* E8 = (struct Plist<ehalf,8>*)E; const uint2* Lut = (const uint2*)params->Lut; uint accumulate = params->beta == 1.0f; uint pcount8 = params->pcount * 8; uint N = params->N; uint C = params->C; uint K = params->K; uint loops = CEIL_DIV(N, 64); bool N64 = (N & 63) == 0; dim3 grid(params->blocks, 1, 1); if (params->bsize == 32) { if (params->Gate == 0) { if (N64) hgemm_blocksparse_32x32x64_tn_dds< true,false><<<grid,128,0,params->stream>>>(*X8, *E8, U, Lut, params->Gate, pcount8, N, C, K, loops, accumulate); else hgemm_blocksparse_32x32x64_tn_dds<false,false><<<grid,128,0,params->stream>>>(*X8, *E8, U, Lut, params->Gate, pcount8, N, C, K, loops, accumulate); } else { if (N64) hgemm_blocksparse_32x32x64_tn_dds< true, true><<<grid,128,0,params->stream>>>(*X8, *E8, U, Lut, params->Gate, pcount8, N, C, K, loops, accumulate); else hgemm_blocksparse_32x32x64_tn_dds<false, true><<<grid,128,0,params->stream>>>(*X8, *E8, U, Lut, params->Gate, pcount8, N, C, K, loops, accumulate); } } else if (params->bsize == 64) { if (params->Gate == 0) { if (N64) hgemm_blocksparse_64x64x64_tn_dds< true,false><<<grid,256,0,params->stream>>>(*X8, *E8, U, Lut, params->Gate, pcount8, N, C, K, loops, accumulate); else hgemm_blocksparse_64x64x64_tn_dds<false,false><<<grid,256,0,params->stream>>>(*X8, *E8, U, Lut, params->Gate, pcount8, N, C, K, loops, accumulate); } else { if (N64) hgemm_blocksparse_64x64x64_tn_dds< true, true><<<grid,256,0,params->stream>>>(*X8, *E8, U, Lut, params->Gate, pcount8, N, C, K, loops, accumulate); else hgemm_blocksparse_64x64x64_tn_dds<false, true><<<grid,256,0,params->stream>>>(*X8, *E8, U, Lut, params->Gate, pcount8, N, C, K, loops, accumulate); } } return cudaPeekAtLastError(); } cudaError_t hgemm_blocksparse_tn_dds(const bhalf* X, const bhalf* E, bhalf* U, bsmm_params* params) { return cudaSuccess; } cudaError_t hgemm_blocksparse_tn_dds(const float* X, const float* E, float* U, bsmm_params* params) { return cudaSuccess; } #define MAX_NORM 0 #define L2_NORM 1 #if __CUDA_ARCH__ >= 700 template <uint BSIZE, uint NSIZE, uint NORM> __global__ void __launch_bounds__(256) blocksparse_feature_reduce_nc( const struct Plist<ehalf,8> X8, ehalf* Y, uint N, uint C) { const ehalf* X; const uint ROW_THDS = BSIZE/8; const uint WARP_ROWS = 32/ROW_THDS; const uint C_reduced = C / BSIZE; uint tid = threadIdx.x; uint bc = blockIdx.x; uint bn = blockIdx.y; // Each warp works on a Plist entry uint p = tid / 32; // index into Plist uint tp = tid % 32; asm("ld.param.u64 %0, [%1 + 0x160];" : "=l"(X) : "r"(p * 8)); uint tc = tp % ROW_THDS; uint tn = tp / ROW_THDS; uint n0 = bn*NSIZE + tn; uint offsetX = n0*C + bc*BSIZE + tc*8; uint offsetY = (p*N + n0)*C_reduced + bc; #pragma unroll 1 for (uint n = 0; n < NSIZE && bn*NSIZE + n < N; n += WARP_ROWS) { bool n_valid = n + n0 < N; float8 x = load((const ehalf8*)(X + offsetX), 0, n_valid); float norm; if (NORM == MAX_NORM) norm = ew_max(ew_abs(x)); else norm = ew_sum(ew_sqr(x)); if (NORM == MAX_NORM) { #pragma unroll for (int i = ROW_THDS/2; i > 0; i >>= 1) norm = ew_warp_max(norm, i); } else { #pragma unroll for (int i = ROW_THDS/2; i > 0; i >>= 1) norm = ew_warp_sum(norm, i); norm = ew_sqrt(norm); } store(Y + offsetY, norm, 0, n_valid && tc == 0); offsetX += WARP_ROWS*C; offsetY += WARP_ROWS*C_reduced; } } template <bool M64, bool ACCUMULATE> __device__ __noinline__ void store_64x64x32_tn(float* C, uint loadC, uint M, uint N, uint cy, uint cx, uint i, uint stdC, float scale) { for (uint j = 0; j < 2; j++) for (uint k = 0; k < 4; k++) if (M64 || cy + i*16 + j*32 + k*4 < M) { float out = ew_zero_nan_inf(ew_mul( ew_add( ld_shared_float1(loadC + j*64 + k*4*stdC + 0), ld_shared_float1(loadC + j*64 + k*4*stdC + 128)), scale)); uint offsetC = (cy + i*16 + j*32 + k*4)*N + cx; if (ACCUMULATE) atomicRed(C + offsetC, out); else store(C + offsetC, out); } } template <bool M64, bool ACCUMULATE> __global__ void __launch_bounds__(256) hgemm_64x64x32_tn( const ehalf* A, const ehalf* B, float* C, uint M, uint N, uint K, uint blk_a, uint blk_b, float scale) { const uint stdAB = 64 + 16; const uint stdC = 256 + 4; __shared__ float fShare[stdC*16]; ehalf* hShare = (ehalf*)fShare; uint tid = threadIdx.x; uint idx_ab = blockIdx.x; uint idx_B = blockIdx.y; uint idx_A = blockIdx.z; idx_A = idx_A * blk_a + idx_ab / blk_b; idx_B = idx_B * blk_b + idx_ab % blk_b; uint tx = tid % 32; uint ty = tid / 32; uint ta = idx_A*64 + tx*2; uint tb = idx_B*64 + tx*2; uint offsetA = ty*M + ta; uint offsetB = ty*N + tb; uint storAB = ty*stdAB + tx*2; uint loadA = fragmentA<OP_T,M16N16K16>::get_idx(tid, stdAB, (tid & 128)*stdAB*16/128 + (tid & 64)/2); uint loadB = fragmentB<OP_N,M16N16K16>::get_idx(tid, stdAB, (tid & 128)*stdAB*16/128 + (tid & 32) + stdAB*32); asm(".reg .pred pred_a, pred_b; \n\t" "setp.lt.u32 pred_a, %0, %1; \n\t" "setp.lt.u32 pred_b, %2, %3; \n\t" :: "r"(ta), "r"(M), "r"(tb), "r"(N) ); fragmentC<OP_T,OP_N,M16N16K16> fragC[2][2]; #pragma unroll 1 for (uint k = 0, tk = ty; k < K; k += 32) { uint a00, a08, a16, a24; uint b00, b08, b16, b24; asm volatile("{\n\t" ".reg .pred ka00, ka08, ka16, ka24; \n\t" ".reg .pred kb00, kb08, kb16, kb24; \n\t" "setp.lt.and.u32 ka00, %16, %20, pred_a; \n\t" "setp.lt.and.u32 ka08, %17, %20, pred_a; \n\t" "setp.lt.and.u32 ka16, %18, %20, pred_a; \n\t" "setp.lt.and.u32 ka24, %19, %20, pred_a; \n\t" "setp.lt.and.u32 kb00, %16, %20, pred_b; \n\t" "setp.lt.and.u32 kb08, %17, %20, pred_b; \n\t" "setp.lt.and.u32 kb16, %18, %20, pred_b; \n\t" "setp.lt.and.u32 kb24, %19, %20, pred_b; \n\t" "mov.b64 { %0, %1}, 0; \n\t" "mov.b64 { %2, %3}, 0; \n\t" "mov.b64 { %4, %5}, 0; \n\t" "mov.b64 { %6, %7}, 0; \n\t" "@ka00 ld.global.nc.u32 %0, [ %8]; \n\t" "@ka08 ld.global.nc.u32 %1, [ %9]; \n\t" "@ka16 ld.global.nc.u32 %2, [%10]; \n\t" "@ka24 ld.global.nc.u32 %3, [%11]; \n\t" "@kb00 ld.global.nc.u32 %4, [%12]; \n\t" "@kb08 ld.global.nc.u32 %5, [%13]; \n\t" "@kb16 ld.global.nc.u32 %6, [%14]; \n\t" "@kb24 ld.global.nc.u32 %7, [%15]; \n\t" "}" : "=r"(a00), "=r"(a08), "=r"(a16), "=r"(a24), "=r"(b00), "=r"(b08), "=r"(b16), "=r"(b24) : "l"(A + (offsetA + 0*M)), "l"(A + (offsetA + 8*M)), "l"(A + (offsetA + 16*M)), "l"(A + (offsetA + 24*M)), "l"(B + (offsetB + 0*N)), "l"(B + (offsetB + 8*N)), "l"(B + (offsetB + 16*N)), "l"(B + (offsetB + 24*N)), "r"(tk), "r"(tk+8), "r"(tk+16), "r"(tk+24), "r"(K) ); offsetA += 32*M; offsetB += 32*N; tk += 32; __syncthreads(); *(uint*)&hShare[storAB + 0*stdAB + 0*stdAB] = a00; *(uint*)&hShare[storAB + 8*stdAB + 0*stdAB] = a08; *(uint*)&hShare[storAB + 16*stdAB + 0*stdAB] = a16; *(uint*)&hShare[storAB + 24*stdAB + 0*stdAB] = a24; *(uint*)&hShare[storAB + 0*stdAB + 32*stdAB] = b00; *(uint*)&hShare[storAB + 8*stdAB + 32*stdAB] = b08; *(uint*)&hShare[storAB + 16*stdAB + 32*stdAB] = b16; *(uint*)&hShare[storAB + 24*stdAB + 32*stdAB] = b24; __syncthreads(); fragmentA<OP_T,M16N16K16> fragA[2]; fragmentB<OP_N,M16N16K16> fragB[2]; for (int i = 0; i < 2; i++) { fragA[i].load(hShare, loadA + i*16, stdAB); fragB[i].load(hShare, loadB + i*16, stdAB); } for (int i = 0; i < 2; i++) for (int j = 0; j < 2; j++) fragC[i][j].mma_sync(fragA[i], fragB[j]); } asm volatile ("mov.u32 %0, %tid.x;" : "=r"(tid) :); asm volatile ("mov.u32 %0, %ctaid.x;" : "=r"(idx_ab) :); asm volatile ("mov.u32 %0, %ctaid.y;" : "=r"(idx_B) :); asm volatile ("mov.u32 %0, %ctaid.z;" : "=r"(idx_A) :); idx_A = idx_A * blk_a + idx_ab / blk_b; idx_B = idx_B * blk_b + idx_ab % blk_b; tx = tid % 64; ty = tid / 64; uint cx = idx_B*64 + tx; uint cy = idx_A*64 + ty; uint loadC = ty*stdC + tx; uint storC = fragmentC<OP_T,OP_N,M16N16K16>::get_idx(tid, stdC, (tid & 224)); bool cx_valid = cx < N; for (int i = 0; i < 2; i++) { __syncthreads(); for (int j = 0; j < 2; j++) fragC[i][j].store(fShare, storC + j*16, stdC); __syncthreads(); if (cx_valid) store_64x64x32_tn<M64,ACCUMULATE>(C, loadC, M, N, cy, cx, i, stdC, scale); } } #else // __CUDA_ARCH__ >= 700 template <uint BSIZE, uint NSIZE, uint NORM> __global__ void __launch_bounds__(256) blocksparse_feature_reduce_nc( const struct Plist<ehalf,8> X8, ehalf* Y, uint N, uint C) { *Y = 0; } template <bool M64, bool ACCUMULATE> __global__ void __launch_bounds__(256) hgemm_64x64x32_tn( const ehalf* A, const ehalf* B, float* C, uint M, uint N, uint K, uint blk_a, uint blk_b, float scale) { *C = 0; } #endif // __CUDA_ARCH__ >= 700 bool BlocksparseFeatureReduceNC(CUstream stream, ehalf* Y, const struct Plist<ehalf,8>* X8, uint params, uint C, uint N, uint bshift, uint norm_type) { uint gridC = C >> bshift; uint threads = params * 32; if (bshift == 5) { dim3 grid(gridC, CEIL_DIV(N, 32), 1); if (norm_type == MAX_NORM) blocksparse_feature_reduce_nc<32,32,MAX_NORM><<<grid,threads,0,stream>>>(*X8, Y, N, C); else blocksparse_feature_reduce_nc<32,32, L2_NORM><<<grid,threads,0,stream>>>(*X8, Y, N, C); } else if (bshift == 6) { dim3 grid(gridC, CEIL_DIV(N, 16), 1); if (norm_type == MAX_NORM) blocksparse_feature_reduce_nc<64,16,MAX_NORM><<<grid,threads,0,stream>>>(*X8, Y, N, C); else blocksparse_feature_reduce_nc<64,16, L2_NORM><<<grid,threads,0,stream>>>(*X8, Y, N, C); } return true; } bool hGemmTN(CUstream stream, const ehalf* A, const ehalf* B, float* C, uint M, uint N, uint K, uint blk_a, uint blk_b, uint blk_A, uint blk_B, uint accumulate, float scale) { if (scale != 0.0f) { dim3 grid(blk_a*blk_b, blk_B, blk_A); if (M & 63) if (accumulate) hgemm_64x64x32_tn<false, true><<<grid,256,0,stream>>>(A, B, C, M, N, K, blk_a, blk_b, scale); else hgemm_64x64x32_tn<false,false><<<grid,256,0,stream>>>(A, B, C, M, N, K, blk_a, blk_b, scale); else if (accumulate) hgemm_64x64x32_tn< true, true><<<grid,256,0,stream>>>(A, B, C, M, N, K, blk_a, blk_b, scale); else hgemm_64x64x32_tn< true,false><<<grid,256,0,stream>>>(A, B, C, M, N, K, blk_a, blk_b, scale); } else if (accumulate == 0) cuMemsetD32Async((CUdeviceptr)C, 0, M*N, stream); return true; } #endif // GOOGLE_CUDA // if (OP_B == OP_N) // printf("%d %d %3d %08x %08x %08x %08x\n", // idx_K, idx_Lock, tid, // b00.x, b00.y, b16.x, b16.y); // if (OP_B == OP_N) // printf("%d %d %3d %08x %08x %08x %08x %08x %08x %08x %08x %08x %08x %08x %08x %08x %08x %08x %08x\n", // idx_K, idx_Lock, tid, // fragA[0].x[0], fragA[0].x[1], fragA[0].x[2], fragA[0].x[3], fragA[0].x[4], fragA[0].x[5], fragA[0].x[6], fragA[0].x[7], // fragA[1].x[0], fragA[1].x[1], fragA[1].x[2], fragA[1].x[3], fragA[1].x[4], fragA[1].x[5], fragA[1].x[6], fragA[1].x[7]); // // fragB[0].x[0], fragB[0].x[1], fragB[0].x[2], fragB[0].x[3], fragB[0].x[4], fragB[0].x[5], fragB[0].x[6], fragB[0].x[7], // // fragB[1].x[0], fragB[1].x[1], fragB[1].x[2], fragB[1].x[3], fragB[1].x[4], fragB[1].x[5], fragB[1].x[6], fragB[1].x[7]); // if (OP_B == OP_N) // printf("%d %d %3d %.0f %.0f %.0f %.0f %.0f %.0f %.0f %.0f %.0f %.0f %.0f %.0f %.0f %.0f %.0f %.0f\n", // idx_K, idx_Lock, tid, // fragC[1][0].x[0], fragC[1][0].x[1], fragC[1][0].x[2], fragC[1][0].x[3], fragC[1][0].x[4], fragC[1][0].x[5], fragC[1][0].x[6], fragC[1][0].x[7], // fragC[1][1].x[0], fragC[1][1].x[1], fragC[1][1].x[2], fragC[1][1].x[3], fragC[1][1].x[4], fragC[1][1].x[5], fragC[1][1].x[6], fragC[1][1].x[7]); // printf("%d %d %3d %.0f %.0f %.0f %.0f %.0f %.0f %.0f %.0f %.0f %.0f %.0f %.0f %.0f %.0f %.0f %.0f %.0f %.0f %.0f %.0f %.0f %.0f %.0f %.0f %.0f %.0f %.0f %.0f %.0f %.0f %.0f %.0f\n", // idx_K, idx_Lock, tid, // fragC[0][0].x[0], fragC[0][0].x[1], fragC[0][0].x[2], fragC[0][0].x[3], fragC[0][0].x[4], fragC[0][0].x[5], fragC[0][0].x[6], fragC[0][0].x[7], // fragC[0][1].x[0], fragC[0][1].x[1], fragC[0][1].x[2], fragC[0][1].x[3], fragC[0][1].x[4], fragC[0][1].x[5], fragC[0][1].x[6], fragC[0][1].x[7], // fragC[1][0].x[0], fragC[1][0].x[1], fragC[1][0].x[2], fragC[1][0].x[3], fragC[1][0].x[4], fragC[1][0].x[5], fragC[1][0].x[6], fragC[1][0].x[7], // fragC[1][1].x[0], fragC[1][1].x[1], fragC[1][1].x[2], fragC[1][1].x[3], fragC[1][1].x[4], fragC[1][1].x[5], fragC[1][1].x[6], fragC[1][1].x[7]); //if (OP_B == OP_N) // for (uint j = 0; j < 2; j++) // { // float4 a = *(float4*)&fShare[loadC + j*32 + 0]; // float4 b = *(float4*)&fShare[loadC + j*32 + 64]; // printf("%d %d %d %3d %5d %.0f %.0f %.0f %.0f %.0f %.0f %.0f %.0f\n", // j, idx_K, idx_Lock, tid, offsetC + K*(j*32 + i*16), a.x, a.y, a.z, a.w, b.x, b.y, b.z, b.w // ); // // uint2 a = to_half4(ew_add(*(float4*)&fShare[loadC + j*32 + 0], *(float4*)&fShare[loadC + j*32 + 64])); // // printf("%d %d %d %3d %5d %08x %08x\n", // // j, idx_K, idx_Lock, tid, offsetC + K*(j*32 + i*16), a.x, a.y // // ); // //store_half4(C + (offsetC + (j*32 + i*16)*K), a); // }
the_stack
namespace xlib { namespace gpu { template<typename T> void printArray(const T* d_array, size_t size, const std::string& title, const std::string& sep) noexcept { auto h_array = new T[size]; cuMemcpyToHost(d_array, size, h_array); xlib::printArray(h_array, size, title, sep); delete[] h_array; } template<typename T, int SIZE> void printArray(const T (&d_array)[SIZE], const std::string& title, const std::string& sep) noexcept { auto h_array = new T[SIZE]; cuMemcpyFromSymbol(d_array, h_array); xlib::printArray(h_array, SIZE, title, sep); delete[] h_array; } template<typename T> void printSymbol(const T& d_symbol, const std::string& title) noexcept { T h_data; cuMemcpyFromSymbol(d_symbol, h_data); std::cout << title << h_data << std::endl; } //============================================================================== template<typename T> void printMatrix(const T* d_matrix, size_t rows, size_t cols, const std::string& title) noexcept { auto h_matrix = new T[rows * cols]; cuMemcpyToHost(d_matrix, rows * cols, h_matrix); xlib::printMatrix(h_matrix, rows, cols, cols, title); delete[] h_matrix; } template<typename T> void printMatrix(const T* d_matrix, size_t rows, size_t cols, size_t ld_cols, const std::string& title) noexcept { auto h_matrix = new T[rows * ld_cols]; cuMemcpyToHost(d_matrix, rows * ld_cols, h_matrix); xlib::printMatrix(h_matrix, rows, cols, ld_cols, title); delete[] h_matrix; } template<typename T> void printMatrixCM(const T* d_matrix, size_t rows, size_t cols, const std::string& title) noexcept { auto h_matrix = new T[rows * cols]; cuMemcpyToHost(d_matrix, rows * cols, h_matrix); xlib::printMatrixCM(h_matrix, rows, cols, rows, title); delete[] h_matrix; } template<typename T> void printMatrixCM(const T* d_matrix, size_t rows, size_t cols, size_t ld_rows, const std::string& title) noexcept { auto h_matrix = new T[ld_rows * cols]; cuMemcpyToHost(d_matrix, ld_rows * cols, h_matrix); xlib::printMatrixCM(h_matrix, rows, cols, ld_rows, title); delete[] h_matrix; } //------------------------------------------------------------------------------ template<typename T> void printMatrixRowsCM(const T* d_matrix, size_t rows, size_t cols, size_t first_row, size_t last_row, const std::string& title) noexcept { auto h_matrix = new T[rows * cols]; cuMemcpyToHost(d_matrix, rows * cols, h_matrix); last_row = (last_row == 0) ? rows : last_row; xlib::printMatrixCM(h_matrix + first_row, last_row - first_row, cols, rows, title); delete[] h_matrix; } template<typename T> void printMatrixRowsCM(const T* d_matrix, size_t rows, size_t cols, size_t first_row, const std::string& title) noexcept { printMatrixRowsCM(d_matrix, rows, cols, first_row, 0, title); } template<typename T> void printMatrixColumnsCM(const T* d_matrix, size_t rows, size_t cols, size_t first_col, size_t last_col, const std::string& title) noexcept { auto h_matrix = new T[rows * cols]; cuMemcpyToHost(d_matrix, rows * cols, h_matrix); xlib::printMatrixCM(h_matrix + first_col * rows, rows, last_col - first_col, rows, title); delete[] h_matrix; } //------------------------------------------------------------------------------ __device__ __forceinline__ const Cout& operator<<(const Cout& obj, const char* string) noexcept { printf("%s", string); return obj; } __device__ __forceinline__ const Cout& operator<<(const Cout& obj, uint64_t value) noexcept { printf("%llu", value); return obj; } __device__ __forceinline__ const Cout& operator<<(const Cout& obj, int64_t value) noexcept { printf("%lld", value); return obj; } __device__ __forceinline__ const Cout& operator<<(const Cout& obj, long long int value) noexcept { printf("%lld", value); return obj; } __device__ __forceinline__ const Cout& operator<<(const Cout& obj, long long unsigned value) noexcept { printf("%llu", value); return obj; } __device__ __forceinline__ const Cout& operator<<(const Cout& obj, int value) noexcept { printf("%d", value); return obj; } __device__ __forceinline__ const Cout& operator<<(const Cout& obj, unsigned value) noexcept { printf("%u", value); return obj; } __device__ __forceinline__ const Cout& operator<<(const Cout& obj, short value) noexcept { printf("%hd", value); return obj; } __device__ __forceinline__ const Cout& operator<<(const Cout& obj, unsigned short value) noexcept { printf("%hu", value); return obj; } __device__ __forceinline__ const Cout& operator<<(const Cout& obj, char value) noexcept { printf("%c", value); return obj; } __device__ __forceinline__ const Cout& operator<<(const Cout& obj, unsigned char value) noexcept { printf("%hhu", value); return obj; } __device__ __forceinline__ const Cout& operator<<(const Cout& obj, float value) noexcept { printf("%f", value); return obj; } __device__ __forceinline__ const Cout& operator<<(const Cout& obj, double value) noexcept { printf("%f", value); return obj; } template<typename T> __device__ __forceinline__ typename std::enable_if<std::is_pointer<T>::value, const Cout&>::type operator<<(const Cout& obj, const T pointer) noexcept { printf("0x%llX", pointer); return obj; } } // namespace gpu //============================================================================== namespace detail { template<typename T> __device__ __forceinline__ typename std::enable_if<std::is_floating_point<T>::value>::type printfArrayAux(T* array, int size, char sep = ' ') { for (int i = 0; i < size; i++) printf("%f%c", array[i], sep); } template<typename T> __device__ __forceinline__ typename std::enable_if<std::is_integral<T>::value && std::is_unsigned<T>::value>::type printfArrayAux(T* array, int size, char sep = ' ') { for (int i = 0; i < size; i++) printf("%llu%c", static_cast<uint64_t>(array[i]), sep); } template<typename T> __device__ __forceinline__ typename std::enable_if<std::is_integral<T>::value && std::is_signed<T>::value>::type printfArrayAux(T* array, int size, char sep = ' ') { for (int i = 0; i < size; i++) printf("%lld%c", static_cast<int64_t>(array[i]), sep); } template<> __device__ __forceinline__ void printfArrayAux<char>(char* array, int size, char sep) { for (int i = 0; i < size; i++) printf("%c%c", array[i], sep); } /* template<typename T, int SIZE> __device__ __forceinline__ void printfArrayAux(T (&array)[SIZE]) { printfArrayAux(array, SIZE, "\t"); printf("\n"); }*/ //------------------------------------------------------------------------------ /* template<typename T> __device__ __forceinline__ typename std::enable_if<std::is_floating_point<T>::value>::type printfArrayAux(T (&array)[1]) { printf("%f\n", array[0]); } template<typename T> __device__ __forceinline__ typename std::enable_if<std::is_floating_point<T>::value>::type printfArrayAux(T (&array)[2]) { printf("%f\t%f\n", array[0], array[1]); } template<typename T> __device__ __forceinline__ typename std::enable_if<std::is_floating_point<T>::value>::type printfArrayAux(T (&array)[3]) { printf("%f\t%f\t%f\n", array[0], array[1], array[2]); } template<typename T> __device__ __forceinline__ typename std::enable_if<std::is_floating_point<T>::value>::type printfArrayAux(T (&array)[4]) { printf("%f\t%f\t%f\t%f\n", array[0], array[1], array[2], array[3]); } template<typename T> __device__ __forceinline__ typename std::enable_if<std::is_floating_point<T>::value>::type printfArrayAux(T (&array)[5]) { printf("%f\t%f\t%f\t%f\t%f\n", array[0], array[1], array[2], array[3], array[4]); } //------------------------------------------------------------------------------ template<typename T> __device__ __forceinline__ typename std::enable_if<std::is_integral<T>::value && std::is_unsigned<T>::value>::type printfArrayAux(T (&array)[1]) { printf("%llu\n", static_cast<uint64_t>(array[0])); } template<typename T> __device__ __forceinline__ typename std::enable_if<std::is_integral<T>::value && std::is_unsigned<T>::value>::type printfArrayAux(T (&array)[2]) { printf("%llu\t%llu\n", static_cast<uint64_t>(array[0]), static_cast<uint64_t>(array[1])); } template<typename T> __device__ __forceinline__ typename std::enable_if<std::is_integral<T>::value && std::is_unsigned<T>::value>::type printfArrayAux(T (&array)[3]) { printf("%llu\t%llu\t%llu\n", static_cast<uint64_t>(array[0]), static_cast<uint64_t>(array[1]), static_cast<uint64_t>(array[2])); } template<typename T> __device__ __forceinline__ typename std::enable_if<std::is_integral<T>::value && std::is_unsigned<T>::value>::type printfArrayAux(T (&array)[4]) { printf("%llu\t%llu\t%llu\t%llu\n", static_cast<uint64_t>(array[0]), static_cast<uint64_t>(array[1]), static_cast<uint64_t>(array[2]), static_cast<uint64_t>(array[3])); } template<typename T> __device__ __forceinline__ typename std::enable_if<std::is_integral<T>::value && std::is_unsigned<T>::value>::type printfArrayAux(T (&array)[5]) { printf("%llu\t%llu\t%llu\t%llu\t%llu\n", static_cast<uint64_t>(array[0]), static_cast<uint64_t>(array[1]), static_cast<uint64_t>(array[2]), static_cast<uint64_t>(array[3]), static_cast<uint64_t>(array[4])); } //------------------------------------------------------------------------------ template<typename T> __device__ __forceinline__ typename std::enable_if<std::is_integral<T>::value && std::is_signed<T>::value>::type printfArrayAux(T (&array)[1]) { printf("%lld\n", static_cast<int64_t>(array[0])); } template<typename T> __device__ __forceinline__ typename std::enable_if<std::is_integral<T>::value && std::is_signed<T>::value>::type printfArrayAux(T (&array)[2]) { printf("%lld\t%lld\n", static_cast<int64_t>(array[0]), static_cast<int64_t>(array[1])); } template<typename T> __device__ __forceinline__ typename std::enable_if<std::is_integral<T>::value && std::is_signed<T>::value>::type printfArrayAux(T (&array)[3]) { printf("%lld\t%lld\t%lld\n", static_cast<int64_t>(array[0]), static_cast<int64_t>(array[1]), static_cast<int64_t>(array[2])); } template<typename T> __device__ __forceinline__ typename std::enable_if<std::is_integral<T>::value && std::is_signed<T>::value>::type printfArrayAux(T (&array)[4]) { printf("%lld\t%lld\t%lld\t%lld\n", static_cast<int64_t>(array[0]), static_cast<int64_t>(array[1]), static_cast<int64_t>(array[2]), static_cast<int64_t>(array[3])); } template<typename T> __device__ __forceinline__ typename std::enable_if<std::is_integral<T>::value && std::is_signed<T>::value>::type printfArrayAux(T (&array)[5]) { printf("%lld\t%lld\t%lld\t%lld\t%lld\n", static_cast<int64_t>(array[0]), static_cast<int64_t>(array[1]), static_cast<int64_t>(array[2]), static_cast<int64_t>(array[3]), static_cast<int64_t>(array[4])); } //------------------------------------------------------------------------------ template<> __device__ __forceinline__ void printfArrayAux<char, 1>(char (&array)[1]) { printf("%c\n", array[0]); } template<> __device__ __forceinline__ void printfArrayAux<char, 2>(char (&array)[2]) { printf("%c\t%c\n", array[0], array[1]); } template<> __device__ __forceinline__ void printfArrayAux<char, 3>(char (&array)[3]) { printf("%c\t%c\t%c\n", array[0], array[1], array[2]); } template<> __device__ __forceinline__ void printfArrayAux<char, 4>(char (&array)[4]) { printf("%c\t%c\t%c\t%c\n", array[0], array[1], array[2], array[3]); } template<> __device__ __forceinline__ void printfArrayAux<char, 5>(char (&array)[5]) { printf("%c\t%c\t%c\t%c\t%c\n", array[0], array[1], array[2], array[3], array[4]); } */ } // namespace detail //------------------------------------------------------------------------------ template<typename T> __device__ __forceinline__ void printfArray(T* array, int size) { detail::printfArrayAux(array, size); printf("\n"); } template<typename T, int SIZE> __device__ __forceinline__ void printfArray(T (&array)[SIZE]) { detail::printfArrayAux(array); } namespace warp { template<typename T, int SIZE> __device__ __forceinline__ void printfArray(T (&array)[SIZE]) { for (int i = 0; i < xlib::WARP_SIZE; i++) { if (i == xlib::lane_id()) { xlib::detail::printfArrayAux(array, SIZE, '\t'); printf("\n"); } } } } // namespace warp } // namespace xlib
the_stack