hip_filename
stringlengths
5
84
hip_content
stringlengths
79
9.69M
cuda_filename
stringlengths
4
83
cuda_content
stringlengths
19
9.69M
5ff64ff0569aa9fd0253e0345261df78b5c226c7.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #ifndef THC_GENERIC_FILE #define THC_GENERIC_FILE "generic/THCTensorRandom.cu" #else #define NUM_BLOCKS min((int)THCCeilDiv(size, (ptrdiff_t) BLOCK_SIZE), MAX_NUM_BLOCKS) #if defined(THC_REAL_IS_FLOAT) || defined(THC_REAL_IS_DOUBLE) || defined(THC_REAL_IS_HALF) void THCTensor_(uniform)(THCState* state, THCTensor *self_, double a, double b) { THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, self_)); ptrdiff_t size = THCTensor_(nElement)(state, self_); if (size == 0) return; THCGenerator* gen = THCRandom_getGenerator(state); THCTensor *self = THCTensor_(newContiguous)(state, self_); scalar_t *data = THCTensor_(data)(state, self); hipLaunchKernelGGL(( generate_uniform), dim3(NUM_BLOCKS), dim3(BLOCK_SIZE), 0, THCState_getCurrentStream(state), gen->state.gen_states, size, data, a, b); THCTensor_(freeCopyTo)(state, self, self_); }; void THCTensor_(normal)(THCState* state, THCTensor *self_, double mean, double stdv) { THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, self_)); ptrdiff_t size = THCTensor_(nElement)(state, self_); if (size == 0) return; THCGenerator* gen = THCRandom_getGenerator(state); THCTensor *self = THCTensor_(newContiguous)(state, self_); scalar_t *data = THCTensor_(data)(state, self); hipLaunchKernelGGL(( generate_normal), dim3(NUM_BLOCKS), dim3(BLOCK_SIZE), 0, THCState_getCurrentStream(state), gen->state.gen_states, size, data, mean, stdv); THCTensor_(freeCopyTo)(state, self, self_); }; void THCTensor_(normal_means)(THCState *state, THCTensor *self, THCTensor *means, double stddev) { THCTensor_(resizeAs)(state, self, means); THCTensor_(normal)(state, self, 0, stddev); THCTensor_(cadd)(state, self, self, ScalarConvert<int, scalar_t>::to(1), means); } void THCTensor_(normal_stddevs)(THCState *state, THCTensor *self, double mean, THCTensor *stddevs) { THCTensor_(resizeAs)(state, self, stddevs); THCTensor_(normal)(state, self, 0, 1); THCTensor_(cmul)(state, self, self, stddevs); THCTensor_(add)(state, self, self, ScalarConvert<double, scalar_t>::to(mean)); } void THCTensor_(normal_means_stddevs)(THCState *state, THCTensor *self, THCTensor *means, THCTensor *stddevs) { THCTensor_(resizeAs)(state, self, means); THCTensor_(normal)(state, self, 0, 1); THCTensor_(cmul)(state, self, self, stddevs); THCTensor_(cadd)(state, self, self, ScalarConvert<int, scalar_t>::to(1), means); } void THCTensor_(logNormal)(THCState* state, THCTensor *self_, double mean, double stdv) { THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, self_)); ptrdiff_t size = THCTensor_(nElement)(state, self_); if (size == 0) return; THCGenerator* gen = THCRandom_getGenerator(state); THCTensor *self = THCTensor_(newContiguous)(state, self_); scalar_t *data = THCTensor_(data)(state, self); hipLaunchKernelGGL(( generateLogNormal<scalar_t>), dim3(NUM_BLOCKS), dim3(BLOCK_SIZE), 0, THCState_getCurrentStream(state), gen->state.gen_states, size, data, mean, stdv); THCTensor_(freeCopyTo)(state, self, self_); }; void THCTensor_(exponential)(THCState* state, THCTensor *self_, double lambda) { THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, self_)); ptrdiff_t size = THCTensor_(nElement)(state, self_); if (size == 0) return; THCGenerator* gen = THCRandom_getGenerator(state); THCTensor *self = THCTensor_(newContiguous)(state, self_); scalar_t *data = THCTensor_(data)(state, self); hipLaunchKernelGGL(( generate_exponential), dim3(NUM_BLOCKS), dim3(BLOCK_SIZE), 0, THCState_getCurrentStream(state), gen->state.gen_states, size, data, lambda); THCTensor_(freeCopyTo)(state, self, self_); }; void THCTensor_(cauchy)(THCState* state, THCTensor *self_, double median, double sigma) { THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, self_)); ptrdiff_t size = THCTensor_(nElement)(state, self_); if (size == 0) return; THCGenerator* gen = THCRandom_getGenerator(state); THCTensor *self = THCTensor_(newContiguous)(state, self_); scalar_t *data = THCTensor_(data)(state, self); hipLaunchKernelGGL(( generate_cauchy), dim3(NUM_BLOCKS), dim3(BLOCK_SIZE), 0, THCState_getCurrentStream(state), gen->state.gen_states, size, data, median, sigma); THCTensor_(freeCopyTo)(state, self, self_); }; void THCTensor_(renormRows)(struct THCState* state, THCTensor* t) { THAssert(THCTensor_(nDimensionLegacyAll)(state, t) == 2); int64_t rows = THCTensor_(size)(state, t, 0); int64_t cols = THCTensor_(size)(state, t, 1); hipDeviceProp_t* props = THCState_getCurrentDeviceProperties(state); THAssert(props != NULL); int numSM = props->multiProcessorCount; int maxThreads = props->maxThreadsPerBlock; dim3 grid(rows < numSM * 4 ? rows : numSM * 4); dim3 block(cols < maxThreads ? cols : maxThreads); hipLaunchKernelGGL(( renormRowsL1<scalar_t>) , dim3(grid), dim3(block), block.x * sizeof(scalar_t), THCState_getCurrentStream(state), THCTensor_(data)(state, t), rows, cols); } void THCTensor_(multinomial)(struct THCState *state, THCudaLongTensor *self, THCTensor *prob_dist, int n_sample, int with_replacement) { THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self, prob_dist)); THCGenerator* gen = THCRandom_getGenerator(state); int inputSize = THCTensor_(nDimensionLegacyAll)(state, prob_dist); THArgCheck(inputSize > 0 && inputSize <= 2, 2, "prob_dist must be 1 or 2 dim"); // Categories are in the innermost dimension int64_t numDist = inputSize == 1 ? 1 : THCTensor_(sizeLegacyNoScalars)(state, prob_dist, 0); int64_t numCategoriesLong = inputSize == 1 ? THCTensor_(sizeLegacyNoScalars)(state, prob_dist, 0) : THCTensor_(sizeLegacyNoScalars)(state, prob_dist, 1); // Since the index tensor is float, numCategories cannot exceed max // float integer precision THArgCheck(numCategoriesLong <= FLOAT32_MAX_CONSECUTIVE_INT, 2, "number of categories cannot exceed 2^24"); int numCategories = (int) numCategoriesLong; THArgCheck(n_sample > 0, 3, "cannot sample <= 0 samples"); if (!with_replacement) { THArgCheck(n_sample <= numCategories, 2, "cannot sample n_sample > prob_dist:size(1) samples without " "replacement"); } int free_prob_dist = 0; // Restructure data for 2d if (inputSize == 1) { THCTensor *temp = THCTensor_(new)(state); THCTensor_(unsqueeze1d)(state, temp, prob_dist, 0); prob_dist = temp; free_prob_dist = 1; } THCudaLongTensor_resize2d(state, self, numDist, n_sample); // get current device properties hipDeviceProp_t* props = THCState_getCurrentDeviceProperties(state); THAssert(props != NULL); int numSM = props->multiProcessorCount; int maxThreads = props->maxThreadsPerBlock; int maxShared = props->sharedMemPerBlock; int requiredShared = (numCategories < maxThreads ? numCategories : maxThreads) * (sizeof(scalar_t) + sizeof(accreal)); if (n_sample == 1 && maxShared >= requiredShared) { // Optimized allocation-free implementation // To exploit greater parallelism for the sampling, generate the // Uniform random samples in a separate kernel launch, into // temporarily allocated memory. The device RNG is thread-limited THCTensor *sampled = THCTensor_(newWithSize2d)(state, numDist, n_sample); THCTensor_(uniform)(state, sampled, 0.0, 1.0); dim3 block(numCategories < maxThreads ? numCategories : maxThreads); dim3 grid(numDist < numSM * 4 ? numDist : numSM * 4); hipLaunchKernelGGL(( sampleMultinomialOnce<scalar_t, accreal>) , dim3(grid), dim3(block), requiredShared, THCState_getCurrentStream(state), THCudaLongTensor_data(state, self), numDist, numCategories, THCTensor_(data)(state, sampled), THCTensor_(data)(state, prob_dist), THCTensor_(stride)(state, prob_dist, 0), THCTensor_(stride)(state, prob_dist, 1) ); THCTensor_(free)(state, sampled); } else { // Generic, slow implementation with memory allocations // For sampling without replacement, we modify the distribution // for subsequent samples in this space THCTensor* origDist = THCTensor_(new)(state); THCTensor_(resizeAs)(state, origDist, prob_dist); THCTensor_(copy)(state, origDist, prob_dist); THCTensor* normDist = THCTensor_(new)(state); THCTensor_(resizeAs)(state, normDist, prob_dist); THCTensor* prefixSum = THCTensor_(new)(state); // Renorm along rows THCTensor_(copy)(state, normDist, origDist); THCTensor_(renormRows)(state, normDist); // Prefix sum along rows THCTensor_(cumsum)(state, prefixSum, normDist, 1); if (with_replacement) { // Sample with replacement // Binary search is warp divergent (so effectively we're running // with just a single thread), but for better utilization, // we need each block to have at least 4 warps. dim3 block(32, 4); // Each warp in a block will generate a sample from one // distribution concurrently. dim3 grid(numDist < MAX_NUM_BLOCKS ? numDist : MAX_NUM_BLOCKS); hipLaunchKernelGGL(( sampleMultinomialWithReplacement) , dim3(grid), dim3(block), 0, THCState_getCurrentStream(state), gen->state.gen_states, n_sample, THCudaLongTensor_data(state, self), numDist, numCategories, THCTensor_(data)(state, prefixSum)); } else { // Sample without replacement // Binary search is warp divergent (so effectively we're running // with just a single thread), but for better utilization, // we need each block to have at least 4 warps. dim3 block(32, 4); // Each warp in a block will generate a sample from a different // distribution concurrently. ptrdiff_t numBlocks = THCCeilDiv(numDist, (int64_t) 4); dim3 grid(numBlocks < MAX_NUM_BLOCKS ? numBlocks : MAX_NUM_BLOCKS); for (int sample = 0; sample < n_sample; ++sample) { if (sample > 0) { // Update probabilities // Renorm along rows THCTensor_(copy)(state, normDist, origDist); THCTensor_(renormRows)(state, normDist); // Prefix sum along rows THCTensor_(cumsum)(state, prefixSum, normDist, 1); } // The kernel can only draw one sample before we have to // recalculate our distribution hipLaunchKernelGGL(( sampleMultinomialWithoutReplacement) , dim3(grid), dim3(block), 0, THCState_getCurrentStream(state), gen->state.gen_states, n_sample, sample, THCudaLongTensor_data(state, self), numDist, numCategories, THCTensor_(data)(state, origDist), THCTensor_(data)(state, prefixSum)); } } THCTensor_(free)(state, prefixSum); THCTensor_(free)(state, normDist); THCTensor_(free)(state, origDist); } // Revert data restructuring based on input sizes if (inputSize == 1) { THCudaLongTensor_resize1d(state, self, n_sample); } if (free_prob_dist) { THCTensor_(free)(state, prob_dist); } } void THCTensor_(multinomialAliasSetup)(THCState *state, THCTensor *_probs, THCudaLongTensor *_J, THCTensor *_q){ THAssert(THCTensor_(isContiguous)(state, _q)); THAssert(THCudaLongTensor_isContiguous(state, _J)); THAssert(THCTensor_(isContiguous)(state, _probs)); int64_t inputsize = THCTensor_(nElement)(state, _probs); THCudaLongTensor *smaller = THCudaLongTensor_newWithSize1d(state, inputsize); THCudaLongTensor *larger = THCudaLongTensor_newWithSize1d(state, inputsize); THCudaLongTensor *smaller_short = THCudaLongTensor_newWithSize1d(state, inputsize); THCudaLongTensor *larger_short = THCudaLongTensor_newWithSize1d(state, inputsize); THCudaLongTensor_resize1d(state, _J, inputsize); THCTensor_(resize1d)(state, _q, inputsize); scalar_t one = ScalarConvert<int64_t, scalar_t>::to(1); int inputBlockDim = THCCeilDiv((int)inputsize + BLOCK_SIZE - 1, BLOCK_SIZE); hipLaunchKernelGGL(( aliasMultinomialFilter) , dim3(inputBlockDim), dim3(BLOCK_SIZE), 0, THCState_getCurrentStream(state) , THCTensor_(data)(state, _q), THCTensor_(data)(state, _probs), THCudaLongTensor_data(state, smaller), THCudaLongTensor_data(state, larger), THCudaLongTensor_data(state, _J), THCudaLongTensor_data(state, smaller_short), THCudaLongTensor_data(state, larger_short), one, inputsize ); THCudaLongTensor_nonzero(state, smaller_short, smaller); THCudaLongTensor_nonzero(state, larger_short, larger); int h_large_c = THCudaLongTensor_nElement(state, larger_short); THCudaLongTensor_resize1d(state, smaller_short, inputsize); THCudaLongTensor_resize1d(state, larger_short, inputsize); hipLaunchKernelGGL(( aliasMultinomialSetup) , dim3(1), dim3(1), 0, THCState_getCurrentStream(state), THCudaLongTensor_data(state, _J), THCTensor_(data)(state, _q), inputsize, THCudaLongTensor_data(state, smaller_short), THCudaLongTensor_data(state, larger_short), inputsize - h_large_c, h_large_c ); scalar_t q_max = THCTensor_(maxall)(state, _q); hipLaunchKernelGGL(( condDiv), dim3(inputBlockDim), dim3(BLOCK_SIZE), 0, THCState_getCurrentStream(state), THCTensor_(data)(state, _q), THCudaLongTensor_data(state, _J), inputsize, q_max ); THCudaLongTensor_free(state, smaller); THCudaLongTensor_free(state, larger); THCudaLongTensor_free(state, smaller_short); THCudaLongTensor_free(state, larger_short); } void THCTensor_(multinomialAliasDraw)(THCState *state, THCudaLongTensor *self, THCudaLongTensor *_J, THCTensor *_q){ THAssert(THCTensor_(isContiguous)(state, _q)); THAssert(THCudaLongTensor_isContiguous(state, _J)); THCGenerator* gen = THCRandom_getGenerator(state); int64_t K = THCudaLongTensor_nElement(state, _J); int64_t output_nelem = THCudaLongTensor_nElement(state, self); ptrdiff_t size = THCudaLongTensor_nElement(state, self); THCTensor *uniform = THCTensor_(newWithSize1d)(state, output_nelem); THCTensor *bernoulli = THCTensor_(newWithSize1d)(state, output_nelem); THCTensor_(uniform)(state, uniform, 0, K); THCTensor_(uniform)(state, bernoulli, 0, 1); hipLaunchKernelGGL(( multinomialAliasDrawKernel) , dim3(THCCeilDiv((int)output_nelem+BLOCK_SIZE-1, BLOCK_SIZE)), dim3(BLOCK_SIZE), 0, THCState_getCurrentStream(state), size, THCudaLongTensor_data(state, self), THCudaLongTensor_data(state, _J), THCTensor_(data)(state, _q), K, THCTensor_(data)(state, uniform), THCTensor_(data)(state, bernoulli) ); } #endif #if defined(THC_REAL_IS_DOUBLE) GENERATE_KERNEL1(generate_geometric, double, double p, double, hiprand_uniform_double, ceil(log(x) / log(1-p))) #else GENERATE_KERNEL1(generate_geometric, scalar_t, double p, float, hiprand_uniform, (ScalarConvert<float, scalar_t>::to(ceilf(logf(x) / log(1-p))))) #endif #if defined(THC_REAL_IS_LONG) || defined(THC_REAL_IS_DOUBLE) || defined(THC_REAL_IS_FLOAT) #define CURAND64(STATE) (((uint64_t)hiprand(STATE)) << 32) | (uint64_t)hiprand(STATE) GENERATE_KERNEL2(generate_random, scalar_t, int32_t base, uint32_t range, uint32_t, hiprand, \ static_cast<scalar_t>(static_cast<int32_t>((x % range) + base))) GENERATE_KERNEL2(generate_random_64, scalar_t, int64_t base, uint64_t range, uint64_t, CURAND64, \ static_cast<scalar_t>(static_cast<int64_t>((x % range) + base))) #elif defined(THC_REAL_IS_HALF) GENERATE_KERNEL2(generate_random, scalar_t, int32_t base, uint32_t range, uint32_t, hiprand, (ScalarConvert<int32_t, scalar_t>::to(static_cast<int32_t>(x % range + base)))) #else GENERATE_KERNEL2(generate_random, scalar_t, int32_t base, uint32_t range, uint32_t, hiprand, static_cast<scalar_t>(static_cast<int32_t>(x % range + base))) #endif void THCTensor_(geometric)(THCState* state, THCTensor *self_, double p) { THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, self_)); ptrdiff_t size = THCTensor_(nElement)(state, self_); if (size == 0) return; THCGenerator* gen = THCRandom_getGenerator(state); THCTensor *self = THCTensor_(newContiguous)(state, self_); scalar_t *data = THCTensor_(data)(state, self); hipLaunchKernelGGL(( generate_geometric), dim3(NUM_BLOCKS), dim3(BLOCK_SIZE), 0, THCState_getCurrentStream(state), gen->state.gen_states, size, data, p); THCTensor_(freeCopyTo)(state, self, self_); }; void THCTensor_(clampedRandom)(THCState* state, THCTensor *self_, int64_t min_val, int64_t max_val) { THArgCheck(min_val < max_val, 2, "max must be greater than min, but got: min = %lld, max = %lld", min_val, max_val); THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, self_)); ptrdiff_t size = THCTensor_(nElement)(state, self_); if (size == 0) return; THCGenerator* gen = THCRandom_getGenerator(state); THCTensor *self = THCTensor_(newContiguous)(state, self_); scalar_t *data = THCTensor_(data)(state, self); uint64_t range = max_val - min_val; #if defined(THC_REAL_IS_LONG) || defined(THC_REAL_IS_DOUBLE) || defined(THC_REAL_IS_FLOAT) if (range > 1ULL << 32) { hipLaunchKernelGGL(( generate_random_64), dim3(NUM_BLOCKS), dim3(BLOCK_SIZE), 0, THCState_getCurrentStream(state), gen->state.gen_states, static_cast<int>(size), data, min_val, range); } else { #endif hipLaunchKernelGGL(( generate_random), dim3(NUM_BLOCKS), dim3(BLOCK_SIZE), 0, THCState_getCurrentStream(state), gen->state.gen_states, static_cast<int>(size), data, static_cast<int32_t>(min_val), static_cast<uint32_t>(range)); #if defined(THC_REAL_IS_LONG) || defined(THC_REAL_IS_DOUBLE) || defined(THC_REAL_IS_FLOAT) } #endif THCTensor_(freeCopyTo)(state, self, self_); }; void THCTensor_(cappedRandom)(THCState* state, THCTensor *self_, int64_t max_val) { THCTensor_(clampedRandom)(state, self_, 0LL, max_val); }; #define HLF_MANT_DIG 11 void THCTensor_(random)(THCState* state, THCTensor *self_) { THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, self_)); ptrdiff_t size = THCTensor_(nElement)(state, self_); if (size == 0) return; THCGenerator* gen = THCRandom_getGenerator(state); THCTensor *self = THCTensor_(newContiguous)(state, self_); scalar_t *data = THCTensor_(data)(state, self); #if defined(THC_REAL_IS_HALF) hipLaunchKernelGGL(( generate_random), dim3(NUM_BLOCKS), dim3(BLOCK_SIZE), 0, THCState_getCurrentStream(state), gen->state.gen_states, static_cast<int>(size), data, static_cast<int32_t>(0UL), static_cast<uint32_t>((1UL << HLF_MANT_DIG) + 1)); #elif defined(THC_REAL_IS_FLOAT) hipLaunchKernelGGL(( generate_random), dim3(NUM_BLOCKS), dim3(BLOCK_SIZE), 0, THCState_getCurrentStream(state), gen->state.gen_states, static_cast<int>(size), data, static_cast<int32_t>(0UL), static_cast<uint32_t>((1UL << FLT_MANT_DIG) + 1)); #elif defined(THC_REAL_IS_DOUBLE) hipLaunchKernelGGL(( generate_random_64), dim3(NUM_BLOCKS), dim3(BLOCK_SIZE), 0, THCState_getCurrentStream(state), gen->state.gen_states, static_cast<int>(size), data, static_cast<int64_t>(0ULL), static_cast<uint64_t>((1ULL << DBL_MANT_DIG) + 1)); #elif defined(THC_REAL_IS_LONG) hipLaunchKernelGGL(( generate_random_64), dim3(NUM_BLOCKS), dim3(BLOCK_SIZE), 0, THCState_getCurrentStream(state), gen->state.gen_states, static_cast<int>(size), data, static_cast<int64_t>(0ULL), static_cast<uint64_t>(std::numeric_limits<scalar_t>::max()) + 1); #else hipLaunchKernelGGL(( generate_random), dim3(NUM_BLOCKS), dim3(BLOCK_SIZE), 0, THCState_getCurrentStream(state), gen->state.gen_states, static_cast<int>(size), data, static_cast<int32_t>(0UL), static_cast<uint32_t>(std::numeric_limits<scalar_t>::max()) + 1); #endif THCTensor_(freeCopyTo)(state, self, self_); }; #undef HLF_MANT_DIG #undef CURAND64 #undef NUM_BLOCKS #endif
5ff64ff0569aa9fd0253e0345261df78b5c226c7.cu
#ifndef THC_GENERIC_FILE #define THC_GENERIC_FILE "generic/THCTensorRandom.cu" #else #define NUM_BLOCKS min((int)THCCeilDiv(size, (ptrdiff_t) BLOCK_SIZE), MAX_NUM_BLOCKS) #if defined(THC_REAL_IS_FLOAT) || defined(THC_REAL_IS_DOUBLE) || defined(THC_REAL_IS_HALF) void THCTensor_(uniform)(THCState* state, THCTensor *self_, double a, double b) { THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, self_)); ptrdiff_t size = THCTensor_(nElement)(state, self_); if (size == 0) return; THCGenerator* gen = THCRandom_getGenerator(state); THCTensor *self = THCTensor_(newContiguous)(state, self_); scalar_t *data = THCTensor_(data)(state, self); generate_uniform<<<NUM_BLOCKS, BLOCK_SIZE, 0, THCState_getCurrentStream(state)>>>( gen->state.gen_states, size, data, a, b); THCTensor_(freeCopyTo)(state, self, self_); }; void THCTensor_(normal)(THCState* state, THCTensor *self_, double mean, double stdv) { THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, self_)); ptrdiff_t size = THCTensor_(nElement)(state, self_); if (size == 0) return; THCGenerator* gen = THCRandom_getGenerator(state); THCTensor *self = THCTensor_(newContiguous)(state, self_); scalar_t *data = THCTensor_(data)(state, self); generate_normal<<<NUM_BLOCKS, BLOCK_SIZE, 0, THCState_getCurrentStream(state)>>>( gen->state.gen_states, size, data, mean, stdv); THCTensor_(freeCopyTo)(state, self, self_); }; void THCTensor_(normal_means)(THCState *state, THCTensor *self, THCTensor *means, double stddev) { THCTensor_(resizeAs)(state, self, means); THCTensor_(normal)(state, self, 0, stddev); THCTensor_(cadd)(state, self, self, ScalarConvert<int, scalar_t>::to(1), means); } void THCTensor_(normal_stddevs)(THCState *state, THCTensor *self, double mean, THCTensor *stddevs) { THCTensor_(resizeAs)(state, self, stddevs); THCTensor_(normal)(state, self, 0, 1); THCTensor_(cmul)(state, self, self, stddevs); THCTensor_(add)(state, self, self, ScalarConvert<double, scalar_t>::to(mean)); } void THCTensor_(normal_means_stddevs)(THCState *state, THCTensor *self, THCTensor *means, THCTensor *stddevs) { THCTensor_(resizeAs)(state, self, means); THCTensor_(normal)(state, self, 0, 1); THCTensor_(cmul)(state, self, self, stddevs); THCTensor_(cadd)(state, self, self, ScalarConvert<int, scalar_t>::to(1), means); } void THCTensor_(logNormal)(THCState* state, THCTensor *self_, double mean, double stdv) { THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, self_)); ptrdiff_t size = THCTensor_(nElement)(state, self_); if (size == 0) return; THCGenerator* gen = THCRandom_getGenerator(state); THCTensor *self = THCTensor_(newContiguous)(state, self_); scalar_t *data = THCTensor_(data)(state, self); generateLogNormal<scalar_t><<<NUM_BLOCKS, BLOCK_SIZE, 0, THCState_getCurrentStream(state)>>>( gen->state.gen_states, size, data, mean, stdv); THCTensor_(freeCopyTo)(state, self, self_); }; void THCTensor_(exponential)(THCState* state, THCTensor *self_, double lambda) { THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, self_)); ptrdiff_t size = THCTensor_(nElement)(state, self_); if (size == 0) return; THCGenerator* gen = THCRandom_getGenerator(state); THCTensor *self = THCTensor_(newContiguous)(state, self_); scalar_t *data = THCTensor_(data)(state, self); generate_exponential<<<NUM_BLOCKS, BLOCK_SIZE, 0, THCState_getCurrentStream(state)>>>( gen->state.gen_states, size, data, lambda); THCTensor_(freeCopyTo)(state, self, self_); }; void THCTensor_(cauchy)(THCState* state, THCTensor *self_, double median, double sigma) { THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, self_)); ptrdiff_t size = THCTensor_(nElement)(state, self_); if (size == 0) return; THCGenerator* gen = THCRandom_getGenerator(state); THCTensor *self = THCTensor_(newContiguous)(state, self_); scalar_t *data = THCTensor_(data)(state, self); generate_cauchy<<<NUM_BLOCKS, BLOCK_SIZE, 0, THCState_getCurrentStream(state)>>>( gen->state.gen_states, size, data, median, sigma); THCTensor_(freeCopyTo)(state, self, self_); }; void THCTensor_(renormRows)(struct THCState* state, THCTensor* t) { THAssert(THCTensor_(nDimensionLegacyAll)(state, t) == 2); int64_t rows = THCTensor_(size)(state, t, 0); int64_t cols = THCTensor_(size)(state, t, 1); cudaDeviceProp* props = THCState_getCurrentDeviceProperties(state); THAssert(props != NULL); int numSM = props->multiProcessorCount; int maxThreads = props->maxThreadsPerBlock; dim3 grid(rows < numSM * 4 ? rows : numSM * 4); dim3 block(cols < maxThreads ? cols : maxThreads); renormRowsL1<scalar_t> <<<grid, block, block.x * sizeof(scalar_t), THCState_getCurrentStream(state)>>>(THCTensor_(data)(state, t), rows, cols); } void THCTensor_(multinomial)(struct THCState *state, THCudaLongTensor *self, THCTensor *prob_dist, int n_sample, int with_replacement) { THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self, prob_dist)); THCGenerator* gen = THCRandom_getGenerator(state); int inputSize = THCTensor_(nDimensionLegacyAll)(state, prob_dist); THArgCheck(inputSize > 0 && inputSize <= 2, 2, "prob_dist must be 1 or 2 dim"); // Categories are in the innermost dimension int64_t numDist = inputSize == 1 ? 1 : THCTensor_(sizeLegacyNoScalars)(state, prob_dist, 0); int64_t numCategoriesLong = inputSize == 1 ? THCTensor_(sizeLegacyNoScalars)(state, prob_dist, 0) : THCTensor_(sizeLegacyNoScalars)(state, prob_dist, 1); // Since the index tensor is float, numCategories cannot exceed max // float integer precision THArgCheck(numCategoriesLong <= FLOAT32_MAX_CONSECUTIVE_INT, 2, "number of categories cannot exceed 2^24"); int numCategories = (int) numCategoriesLong; THArgCheck(n_sample > 0, 3, "cannot sample <= 0 samples"); if (!with_replacement) { THArgCheck(n_sample <= numCategories, 2, "cannot sample n_sample > prob_dist:size(1) samples without " "replacement"); } int free_prob_dist = 0; // Restructure data for 2d if (inputSize == 1) { THCTensor *temp = THCTensor_(new)(state); THCTensor_(unsqueeze1d)(state, temp, prob_dist, 0); prob_dist = temp; free_prob_dist = 1; } THCudaLongTensor_resize2d(state, self, numDist, n_sample); // get current device properties cudaDeviceProp* props = THCState_getCurrentDeviceProperties(state); THAssert(props != NULL); int numSM = props->multiProcessorCount; int maxThreads = props->maxThreadsPerBlock; int maxShared = props->sharedMemPerBlock; int requiredShared = (numCategories < maxThreads ? numCategories : maxThreads) * (sizeof(scalar_t) + sizeof(accreal)); if (n_sample == 1 && maxShared >= requiredShared) { // Optimized allocation-free implementation // To exploit greater parallelism for the sampling, generate the // Uniform random samples in a separate kernel launch, into // temporarily allocated memory. The device RNG is thread-limited THCTensor *sampled = THCTensor_(newWithSize2d)(state, numDist, n_sample); THCTensor_(uniform)(state, sampled, 0.0, 1.0); dim3 block(numCategories < maxThreads ? numCategories : maxThreads); dim3 grid(numDist < numSM * 4 ? numDist : numSM * 4); sampleMultinomialOnce<scalar_t, accreal> <<<grid, block, requiredShared, THCState_getCurrentStream(state)>>>( THCudaLongTensor_data(state, self), numDist, numCategories, THCTensor_(data)(state, sampled), THCTensor_(data)(state, prob_dist), THCTensor_(stride)(state, prob_dist, 0), THCTensor_(stride)(state, prob_dist, 1) ); THCTensor_(free)(state, sampled); } else { // Generic, slow implementation with memory allocations // For sampling without replacement, we modify the distribution // for subsequent samples in this space THCTensor* origDist = THCTensor_(new)(state); THCTensor_(resizeAs)(state, origDist, prob_dist); THCTensor_(copy)(state, origDist, prob_dist); THCTensor* normDist = THCTensor_(new)(state); THCTensor_(resizeAs)(state, normDist, prob_dist); THCTensor* prefixSum = THCTensor_(new)(state); // Renorm along rows THCTensor_(copy)(state, normDist, origDist); THCTensor_(renormRows)(state, normDist); // Prefix sum along rows THCTensor_(cumsum)(state, prefixSum, normDist, 1); if (with_replacement) { // Sample with replacement // Binary search is warp divergent (so effectively we're running // with just a single thread), but for better utilization, // we need each block to have at least 4 warps. dim3 block(32, 4); // Each warp in a block will generate a sample from one // distribution concurrently. dim3 grid(numDist < MAX_NUM_BLOCKS ? numDist : MAX_NUM_BLOCKS); sampleMultinomialWithReplacement <<<grid, block, 0, THCState_getCurrentStream(state)>>>( gen->state.gen_states, n_sample, THCudaLongTensor_data(state, self), numDist, numCategories, THCTensor_(data)(state, prefixSum)); } else { // Sample without replacement // Binary search is warp divergent (so effectively we're running // with just a single thread), but for better utilization, // we need each block to have at least 4 warps. dim3 block(32, 4); // Each warp in a block will generate a sample from a different // distribution concurrently. ptrdiff_t numBlocks = THCCeilDiv(numDist, (int64_t) 4); dim3 grid(numBlocks < MAX_NUM_BLOCKS ? numBlocks : MAX_NUM_BLOCKS); for (int sample = 0; sample < n_sample; ++sample) { if (sample > 0) { // Update probabilities // Renorm along rows THCTensor_(copy)(state, normDist, origDist); THCTensor_(renormRows)(state, normDist); // Prefix sum along rows THCTensor_(cumsum)(state, prefixSum, normDist, 1); } // The kernel can only draw one sample before we have to // recalculate our distribution sampleMultinomialWithoutReplacement <<<grid, block, 0, THCState_getCurrentStream(state)>>>( gen->state.gen_states, n_sample, sample, THCudaLongTensor_data(state, self), numDist, numCategories, THCTensor_(data)(state, origDist), THCTensor_(data)(state, prefixSum)); } } THCTensor_(free)(state, prefixSum); THCTensor_(free)(state, normDist); THCTensor_(free)(state, origDist); } // Revert data restructuring based on input sizes if (inputSize == 1) { THCudaLongTensor_resize1d(state, self, n_sample); } if (free_prob_dist) { THCTensor_(free)(state, prob_dist); } } void THCTensor_(multinomialAliasSetup)(THCState *state, THCTensor *_probs, THCudaLongTensor *_J, THCTensor *_q){ THAssert(THCTensor_(isContiguous)(state, _q)); THAssert(THCudaLongTensor_isContiguous(state, _J)); THAssert(THCTensor_(isContiguous)(state, _probs)); int64_t inputsize = THCTensor_(nElement)(state, _probs); THCudaLongTensor *smaller = THCudaLongTensor_newWithSize1d(state, inputsize); THCudaLongTensor *larger = THCudaLongTensor_newWithSize1d(state, inputsize); THCudaLongTensor *smaller_short = THCudaLongTensor_newWithSize1d(state, inputsize); THCudaLongTensor *larger_short = THCudaLongTensor_newWithSize1d(state, inputsize); THCudaLongTensor_resize1d(state, _J, inputsize); THCTensor_(resize1d)(state, _q, inputsize); scalar_t one = ScalarConvert<int64_t, scalar_t>::to(1); int inputBlockDim = THCCeilDiv((int)inputsize + BLOCK_SIZE - 1, BLOCK_SIZE); aliasMultinomialFilter <<<inputBlockDim, BLOCK_SIZE, 0, THCState_getCurrentStream(state) >>>( THCTensor_(data)(state, _q), THCTensor_(data)(state, _probs), THCudaLongTensor_data(state, smaller), THCudaLongTensor_data(state, larger), THCudaLongTensor_data(state, _J), THCudaLongTensor_data(state, smaller_short), THCudaLongTensor_data(state, larger_short), one, inputsize ); THCudaLongTensor_nonzero(state, smaller_short, smaller); THCudaLongTensor_nonzero(state, larger_short, larger); int h_large_c = THCudaLongTensor_nElement(state, larger_short); THCudaLongTensor_resize1d(state, smaller_short, inputsize); THCudaLongTensor_resize1d(state, larger_short, inputsize); aliasMultinomialSetup <<<1, 1, 0, THCState_getCurrentStream(state)>>>( THCudaLongTensor_data(state, _J), THCTensor_(data)(state, _q), inputsize, THCudaLongTensor_data(state, smaller_short), THCudaLongTensor_data(state, larger_short), inputsize - h_large_c, h_large_c ); scalar_t q_max = THCTensor_(maxall)(state, _q); condDiv<<< inputBlockDim, BLOCK_SIZE, 0, THCState_getCurrentStream(state)>>>( THCTensor_(data)(state, _q), THCudaLongTensor_data(state, _J), inputsize, q_max ); THCudaLongTensor_free(state, smaller); THCudaLongTensor_free(state, larger); THCudaLongTensor_free(state, smaller_short); THCudaLongTensor_free(state, larger_short); } void THCTensor_(multinomialAliasDraw)(THCState *state, THCudaLongTensor *self, THCudaLongTensor *_J, THCTensor *_q){ THAssert(THCTensor_(isContiguous)(state, _q)); THAssert(THCudaLongTensor_isContiguous(state, _J)); THCGenerator* gen = THCRandom_getGenerator(state); int64_t K = THCudaLongTensor_nElement(state, _J); int64_t output_nelem = THCudaLongTensor_nElement(state, self); ptrdiff_t size = THCudaLongTensor_nElement(state, self); THCTensor *uniform = THCTensor_(newWithSize1d)(state, output_nelem); THCTensor *bernoulli = THCTensor_(newWithSize1d)(state, output_nelem); THCTensor_(uniform)(state, uniform, 0, K); THCTensor_(uniform)(state, bernoulli, 0, 1); multinomialAliasDrawKernel <<<THCCeilDiv((int)output_nelem+BLOCK_SIZE-1, BLOCK_SIZE), BLOCK_SIZE, 0, THCState_getCurrentStream(state)>>>( size, THCudaLongTensor_data(state, self), THCudaLongTensor_data(state, _J), THCTensor_(data)(state, _q), K, THCTensor_(data)(state, uniform), THCTensor_(data)(state, bernoulli) ); } #endif #if defined(THC_REAL_IS_DOUBLE) GENERATE_KERNEL1(generate_geometric, double, double p, double, curand_uniform_double, ceil(log(x) / log(1-p))) #else GENERATE_KERNEL1(generate_geometric, scalar_t, double p, float, curand_uniform, (ScalarConvert<float, scalar_t>::to(ceilf(logf(x) / log(1-p))))) #endif #if defined(THC_REAL_IS_LONG) || defined(THC_REAL_IS_DOUBLE) || defined(THC_REAL_IS_FLOAT) #define CURAND64(STATE) (((uint64_t)curand(STATE)) << 32) | (uint64_t)curand(STATE) GENERATE_KERNEL2(generate_random, scalar_t, int32_t base, uint32_t range, uint32_t, curand, \ static_cast<scalar_t>(static_cast<int32_t>((x % range) + base))) GENERATE_KERNEL2(generate_random_64, scalar_t, int64_t base, uint64_t range, uint64_t, CURAND64, \ static_cast<scalar_t>(static_cast<int64_t>((x % range) + base))) #elif defined(THC_REAL_IS_HALF) GENERATE_KERNEL2(generate_random, scalar_t, int32_t base, uint32_t range, uint32_t, curand, (ScalarConvert<int32_t, scalar_t>::to(static_cast<int32_t>(x % range + base)))) #else GENERATE_KERNEL2(generate_random, scalar_t, int32_t base, uint32_t range, uint32_t, curand, static_cast<scalar_t>(static_cast<int32_t>(x % range + base))) #endif void THCTensor_(geometric)(THCState* state, THCTensor *self_, double p) { THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, self_)); ptrdiff_t size = THCTensor_(nElement)(state, self_); if (size == 0) return; THCGenerator* gen = THCRandom_getGenerator(state); THCTensor *self = THCTensor_(newContiguous)(state, self_); scalar_t *data = THCTensor_(data)(state, self); generate_geometric<<<NUM_BLOCKS, BLOCK_SIZE, 0, THCState_getCurrentStream(state)>>>( gen->state.gen_states, size, data, p); THCTensor_(freeCopyTo)(state, self, self_); }; void THCTensor_(clampedRandom)(THCState* state, THCTensor *self_, int64_t min_val, int64_t max_val) { THArgCheck(min_val < max_val, 2, "max must be greater than min, but got: min = %lld, max = %lld", min_val, max_val); THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, self_)); ptrdiff_t size = THCTensor_(nElement)(state, self_); if (size == 0) return; THCGenerator* gen = THCRandom_getGenerator(state); THCTensor *self = THCTensor_(newContiguous)(state, self_); scalar_t *data = THCTensor_(data)(state, self); uint64_t range = max_val - min_val; #if defined(THC_REAL_IS_LONG) || defined(THC_REAL_IS_DOUBLE) || defined(THC_REAL_IS_FLOAT) if (range > 1ULL << 32) { generate_random_64<<<NUM_BLOCKS, BLOCK_SIZE, 0, THCState_getCurrentStream(state)>>>( gen->state.gen_states, static_cast<int>(size), data, min_val, range); } else { #endif generate_random<<<NUM_BLOCKS, BLOCK_SIZE, 0, THCState_getCurrentStream(state)>>>( gen->state.gen_states, static_cast<int>(size), data, static_cast<int32_t>(min_val), static_cast<uint32_t>(range)); #if defined(THC_REAL_IS_LONG) || defined(THC_REAL_IS_DOUBLE) || defined(THC_REAL_IS_FLOAT) } #endif THCTensor_(freeCopyTo)(state, self, self_); }; void THCTensor_(cappedRandom)(THCState* state, THCTensor *self_, int64_t max_val) { THCTensor_(clampedRandom)(state, self_, 0LL, max_val); }; #define HLF_MANT_DIG 11 void THCTensor_(random)(THCState* state, THCTensor *self_) { THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, self_)); ptrdiff_t size = THCTensor_(nElement)(state, self_); if (size == 0) return; THCGenerator* gen = THCRandom_getGenerator(state); THCTensor *self = THCTensor_(newContiguous)(state, self_); scalar_t *data = THCTensor_(data)(state, self); #if defined(THC_REAL_IS_HALF) generate_random<<<NUM_BLOCKS, BLOCK_SIZE, 0, THCState_getCurrentStream(state)>>>( gen->state.gen_states, static_cast<int>(size), data, static_cast<int32_t>(0UL), static_cast<uint32_t>((1UL << HLF_MANT_DIG) + 1)); #elif defined(THC_REAL_IS_FLOAT) generate_random<<<NUM_BLOCKS, BLOCK_SIZE, 0, THCState_getCurrentStream(state)>>>( gen->state.gen_states, static_cast<int>(size), data, static_cast<int32_t>(0UL), static_cast<uint32_t>((1UL << FLT_MANT_DIG) + 1)); #elif defined(THC_REAL_IS_DOUBLE) generate_random_64<<<NUM_BLOCKS, BLOCK_SIZE, 0, THCState_getCurrentStream(state)>>>( gen->state.gen_states, static_cast<int>(size), data, static_cast<int64_t>(0ULL), static_cast<uint64_t>((1ULL << DBL_MANT_DIG) + 1)); #elif defined(THC_REAL_IS_LONG) generate_random_64<<<NUM_BLOCKS, BLOCK_SIZE, 0, THCState_getCurrentStream(state)>>>( gen->state.gen_states, static_cast<int>(size), data, static_cast<int64_t>(0ULL), static_cast<uint64_t>(std::numeric_limits<scalar_t>::max()) + 1); #else generate_random<<<NUM_BLOCKS, BLOCK_SIZE, 0, THCState_getCurrentStream(state)>>>( gen->state.gen_states, static_cast<int>(size), data, static_cast<int32_t>(0UL), static_cast<uint32_t>(std::numeric_limits<scalar_t>::max()) + 1); #endif THCTensor_(freeCopyTo)(state, self, self_); }; #undef HLF_MANT_DIG #undef CURAND64 #undef NUM_BLOCKS #endif
a240f7f29a48ec78b3b3dfeca119d266dc163122.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * * globalCopy.cu * * Microbenchmark for copy bandwidth of global memory. * * Build with: nvcc -I ../chLib <options> globalCopy.cu * Requires: No minimum SM requirement. * * Copyright (c) 2011-2012, Archaea Software, LLC. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. * */ #include <stdio.h> #include <chError.h> #include <chCommandLine.h> template<class T, const int n> __global__ void GlobalCopy( T *out, const T *in, size_t N ) { T temp[n]; size_t i; for ( i = n*blockIdx.x*blockDim.x+threadIdx.x; i < N-n*blockDim.x*gridDim.x; i += n*blockDim.x*gridDim.x ) { for ( int j = 0; j < n; j++ ) { size_t index = i+j*blockDim.x; temp[j] = in[index]; } for ( int j = 0; j < n; j++ ) { size_t index = i+j*blockDim.x; out[index] = temp[j]; } } // to avoid the (index<N) conditional in the inner loop, // we left off some work at the end for ( int j = 0; j < n; j++ ) { for ( int j = 0; j < n; j++ ) { size_t index = i+j*blockDim.x; if ( index<N ) temp[j] = in[index]; } for ( int j = 0; j < n; j++ ) { size_t index = i+j*blockDim.x; if ( index<N ) out[index] = temp[j]; } } } template<class T, const int n, bool bOffsetDst, bool bOffsetSrc> double BandwidthCopy( T *deviceOut, T *deviceIn, T *hostOut, T *hostIn, size_t N, hipEvent_t evStart, hipEvent_t evStop, int cBlocks, int cThreads ) { double ret = 0.0; double elapsedTime; float ms; int cIterations; hipError_t status; for ( int i = 0; i < N; i++ ) { int r = rand(); hostIn[i] = *(T *)(&r); // for small ints, LSBs; for int2 and int4, some stack cruft } memset( hostOut, 0, N*sizeof(T) ); cuda(Memcpy( deviceIn, hostIn, N*sizeof(T), hipMemcpyHostToDevice ) ); { // confirm that kernel launch with this configuration writes correct result hipLaunchKernelGGL(( GlobalCopy<T,n>), dim3(cBlocks),dim3(cThreads), 0, 0, deviceOut+bOffsetDst, deviceIn+bOffsetSrc, N-bOffsetDst-bOffsetSrc ); cuda(Memcpy( hostOut, deviceOut, N*sizeof(T), hipMemcpyDeviceToHost ) ); cuda(GetLastError() ); if ( memcmp( hostOut+bOffsetDst, hostIn+bOffsetSrc, (N-bOffsetDst-bOffsetSrc)*sizeof(T) ) ) { printf( "Incorrect copy performed!\n" ); goto Error; } } cIterations = 10; hipEventRecord( evStart ); for ( int i = 0; i < cIterations; i++ ) { hipLaunchKernelGGL(( GlobalCopy<T,n>), dim3(cBlocks),dim3(cThreads), 0, 0, deviceOut+bOffsetDst, deviceIn+bOffsetSrc, N-bOffsetDst-bOffsetSrc ); } hipEventRecord( evStop ); cuda(ThreadSynchronize() ); // make configurations that cannot launch error-out with 0 bandwidth cuda(GetLastError() ); cuda(EventElapsedTime( &ms, evStart, evStop ) ); elapsedTime = ms/1000.0f; // bytes per second ret = ((double)2*N*cIterations*sizeof(T)) / elapsedTime; // gigabytes per second ret /= 1024.0*1048576.0; Error: return ret; } template<class T, const int n, bool bOffsetDst, bool bOffsetSrc> double ReportRow( size_t N, size_t threadStart, size_t threadStop, size_t cBlocks ) { T *deviceIn = 0; T *deviceOut = 0; T *hostIn = 0; T *hostOut = 0; hipEvent_t evStart = 0; hipEvent_t evStop = 0; hipError_t status; int maxThreads = 0; double maxBW = 0.0; cuda(Malloc( &deviceIn, N*sizeof(T) ) ); cuda(Malloc( &deviceOut, N*sizeof(T) ) ); cuda(Memset( deviceOut, 0, N*sizeof(T) ) ); hostIn = new T[N]; if ( ! hostIn ) goto Error; hostOut = new T[N]; if ( ! hostOut ) goto Error; cuda(EventCreate( &evStart ) ); cuda(EventCreate( &evStop ) ); printf( "%d\t", n ); for ( int cThreads = threadStart; cThreads <= threadStop; cThreads *= 2 ) { double bw = BandwidthCopy<T,n,bOffsetDst,bOffsetSrc>( deviceOut, deviceIn, hostOut, hostIn, N, evStart, evStop, cBlocks, cThreads ); if ( bw > maxBW ) { maxBW = bw; maxThreads = cThreads; } printf( "%.2f\t", bw ); } printf( "%.2f\t%d\n", maxBW, maxThreads ); Error: if ( hostIn ) delete[] hostIn; if ( hostOut ) delete[] hostOut; hipEventDestroy( evStart ); hipEventDestroy( evStop ); hipFree( deviceIn ); hipFree( deviceOut ); return maxBW; } template<class T, bool bOffsetDst, bool bOffsetSrc> void Shmoo( size_t N, size_t threadStart, size_t threadStop, size_t cBlocks ) { printf( "Operand size: %d byte%c\n", (int) sizeof(T), sizeof(T)==1 ? '\0' : 's' ); printf( "Input size: %dM operands\n", (int) (N>>20) ); printf( " Block Size\n" ); printf( "Unroll\t" ); for ( int cThreads = threadStart; cThreads <= threadStop; cThreads *= 2 ) { printf( "%d\t", cThreads ); } printf( "maxBW\tmaxThreads\n" ); ReportRow<T, 1, bOffsetDst, bOffsetSrc >( N, threadStart, threadStop, cBlocks ); ReportRow<T, 2, bOffsetDst, bOffsetSrc >( N, threadStart, threadStop, cBlocks ); ReportRow<T, 3, bOffsetDst, bOffsetSrc >( N, threadStart, threadStop, cBlocks ); ReportRow<T, 4, bOffsetDst, bOffsetSrc >( N, threadStart, threadStop, cBlocks ); ReportRow<T, 5, bOffsetDst, bOffsetSrc >( N, threadStart, threadStop, cBlocks ); ReportRow<T, 6, bOffsetDst, bOffsetSrc >( N, threadStart, threadStop, cBlocks ); ReportRow<T, 7, bOffsetDst, bOffsetSrc >( N, threadStart, threadStop, cBlocks ); ReportRow<T, 8, bOffsetDst, bOffsetSrc >( N, threadStart, threadStop, cBlocks ); ReportRow<T, 9, bOffsetDst, bOffsetSrc >( N, threadStart, threadStop, cBlocks ); ReportRow<T,10, bOffsetDst, bOffsetSrc >( N, threadStart, threadStop, cBlocks ); ReportRow<T,11, bOffsetDst, bOffsetSrc >( N, threadStart, threadStop, cBlocks ); ReportRow<T,12, bOffsetDst, bOffsetSrc >( N, threadStart, threadStop, cBlocks ); ReportRow<T,13, bOffsetDst, bOffsetSrc >( N, threadStart, threadStop, cBlocks ); ReportRow<T,14, bOffsetDst, bOffsetSrc >( N, threadStart, threadStop, cBlocks ); ReportRow<T,15, bOffsetDst, bOffsetSrc >( N, threadStart, threadStop, cBlocks ); ReportRow<T,16, bOffsetDst, bOffsetSrc >( N, threadStart, threadStop, cBlocks ); } int main( int argc, char *argv[] ) { int device = 0; int size = 16; if ( chCommandLineGet( &device, "device", argc, argv ) ) { printf( "Using device %d...\n", device ); } hipSetDevice(device); if ( chCommandLineGet( &size, "size", argc, argv ) ) { printf( "Using %dM operands ...\n", size ); } if ( chCommandLineGetBool( "uncoalesced_read", argc, argv ) ) { if ( chCommandLineGetBool( "uncoalesced_write", argc, argv ) ) { printf( "Using uncoalesced reads and writes\n" ); Shmoo< char, true, true>( (size_t) size*1048576, 32, 512, 150 ); Shmoo<short, true, true>( (size_t) size*1048576, 32, 512, 150 ); Shmoo< int, true, true>( (size_t) size*1048576, 32, 512, 150 ); Shmoo< int2, true, true>( (size_t) size*1048576, 32, 512, 150 ); Shmoo< int4, true, true>( (size_t) size*1048576, 32, 512, 150 ); } else { printf( "Using coalesced writes and uncoalesced reads\n" ); Shmoo< char,false, true>( (size_t) size*1048576, 32, 512, 150 ); Shmoo<short,false, true>( (size_t) size*1048576, 32, 512, 150 ); Shmoo< int,false, true>( (size_t) size*1048576, 32, 512, 150 ); Shmoo< int2,false, true>( (size_t) size*1048576, 32, 512, 150 ); Shmoo< int4,false, true>( (size_t) size*1048576, 32, 512, 150 ); } } else { if ( chCommandLineGetBool( "uncoalesced_write", argc, argv ) ) { printf( "Using uncoalesced writes and coalesced reads\n" ); Shmoo< char, true,false>( (size_t) size*1048576, 32, 512, 150 ); Shmoo<short, true,false>( (size_t) size*1048576, 32, 512, 150 ); Shmoo< int, true,false>( (size_t) size*1048576, 32, 512, 150 ); Shmoo< int2, true,false>( (size_t) size*1048576, 32, 512, 150 ); Shmoo< int4, true,false>( (size_t) size*1048576, 32, 512, 150 ); } else { printf( "Using coalesced reads and writes\n" ); Shmoo< char,false,false>( (size_t) size*1048576, 32, 512, 150 ); Shmoo<short,false,false>( (size_t) size*1048576, 32, 512, 150 ); Shmoo< int,false,false>( (size_t) size*1048576, 32, 512, 150 ); Shmoo< int2,false,false>( (size_t) size*1048576, 32, 512, 150 ); Shmoo< int4,false,false>( (size_t) size*1048576, 32, 512, 150 ); } } return 0; }
a240f7f29a48ec78b3b3dfeca119d266dc163122.cu
/* * * globalCopy.cu * * Microbenchmark for copy bandwidth of global memory. * * Build with: nvcc -I ../chLib <options> globalCopy.cu * Requires: No minimum SM requirement. * * Copyright (c) 2011-2012, Archaea Software, LLC. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. * */ #include <stdio.h> #include <chError.h> #include <chCommandLine.h> template<class T, const int n> __global__ void GlobalCopy( T *out, const T *in, size_t N ) { T temp[n]; size_t i; for ( i = n*blockIdx.x*blockDim.x+threadIdx.x; i < N-n*blockDim.x*gridDim.x; i += n*blockDim.x*gridDim.x ) { for ( int j = 0; j < n; j++ ) { size_t index = i+j*blockDim.x; temp[j] = in[index]; } for ( int j = 0; j < n; j++ ) { size_t index = i+j*blockDim.x; out[index] = temp[j]; } } // to avoid the (index<N) conditional in the inner loop, // we left off some work at the end for ( int j = 0; j < n; j++ ) { for ( int j = 0; j < n; j++ ) { size_t index = i+j*blockDim.x; if ( index<N ) temp[j] = in[index]; } for ( int j = 0; j < n; j++ ) { size_t index = i+j*blockDim.x; if ( index<N ) out[index] = temp[j]; } } } template<class T, const int n, bool bOffsetDst, bool bOffsetSrc> double BandwidthCopy( T *deviceOut, T *deviceIn, T *hostOut, T *hostIn, size_t N, cudaEvent_t evStart, cudaEvent_t evStop, int cBlocks, int cThreads ) { double ret = 0.0; double elapsedTime; float ms; int cIterations; cudaError_t status; for ( int i = 0; i < N; i++ ) { int r = rand(); hostIn[i] = *(T *)(&r); // for small ints, LSBs; for int2 and int4, some stack cruft } memset( hostOut, 0, N*sizeof(T) ); cuda(Memcpy( deviceIn, hostIn, N*sizeof(T), cudaMemcpyHostToDevice ) ); { // confirm that kernel launch with this configuration writes correct result GlobalCopy<T,n><<<cBlocks,cThreads>>>( deviceOut+bOffsetDst, deviceIn+bOffsetSrc, N-bOffsetDst-bOffsetSrc ); cuda(Memcpy( hostOut, deviceOut, N*sizeof(T), cudaMemcpyDeviceToHost ) ); cuda(GetLastError() ); if ( memcmp( hostOut+bOffsetDst, hostIn+bOffsetSrc, (N-bOffsetDst-bOffsetSrc)*sizeof(T) ) ) { printf( "Incorrect copy performed!\n" ); goto Error; } } cIterations = 10; cudaEventRecord( evStart ); for ( int i = 0; i < cIterations; i++ ) { GlobalCopy<T,n><<<cBlocks,cThreads>>>( deviceOut+bOffsetDst, deviceIn+bOffsetSrc, N-bOffsetDst-bOffsetSrc ); } cudaEventRecord( evStop ); cuda(ThreadSynchronize() ); // make configurations that cannot launch error-out with 0 bandwidth cuda(GetLastError() ); cuda(EventElapsedTime( &ms, evStart, evStop ) ); elapsedTime = ms/1000.0f; // bytes per second ret = ((double)2*N*cIterations*sizeof(T)) / elapsedTime; // gigabytes per second ret /= 1024.0*1048576.0; Error: return ret; } template<class T, const int n, bool bOffsetDst, bool bOffsetSrc> double ReportRow( size_t N, size_t threadStart, size_t threadStop, size_t cBlocks ) { T *deviceIn = 0; T *deviceOut = 0; T *hostIn = 0; T *hostOut = 0; cudaEvent_t evStart = 0; cudaEvent_t evStop = 0; cudaError_t status; int maxThreads = 0; double maxBW = 0.0; cuda(Malloc( &deviceIn, N*sizeof(T) ) ); cuda(Malloc( &deviceOut, N*sizeof(T) ) ); cuda(Memset( deviceOut, 0, N*sizeof(T) ) ); hostIn = new T[N]; if ( ! hostIn ) goto Error; hostOut = new T[N]; if ( ! hostOut ) goto Error; cuda(EventCreate( &evStart ) ); cuda(EventCreate( &evStop ) ); printf( "%d\t", n ); for ( int cThreads = threadStart; cThreads <= threadStop; cThreads *= 2 ) { double bw = BandwidthCopy<T,n,bOffsetDst,bOffsetSrc>( deviceOut, deviceIn, hostOut, hostIn, N, evStart, evStop, cBlocks, cThreads ); if ( bw > maxBW ) { maxBW = bw; maxThreads = cThreads; } printf( "%.2f\t", bw ); } printf( "%.2f\t%d\n", maxBW, maxThreads ); Error: if ( hostIn ) delete[] hostIn; if ( hostOut ) delete[] hostOut; cudaEventDestroy( evStart ); cudaEventDestroy( evStop ); cudaFree( deviceIn ); cudaFree( deviceOut ); return maxBW; } template<class T, bool bOffsetDst, bool bOffsetSrc> void Shmoo( size_t N, size_t threadStart, size_t threadStop, size_t cBlocks ) { printf( "Operand size: %d byte%c\n", (int) sizeof(T), sizeof(T)==1 ? '\0' : 's' ); printf( "Input size: %dM operands\n", (int) (N>>20) ); printf( " Block Size\n" ); printf( "Unroll\t" ); for ( int cThreads = threadStart; cThreads <= threadStop; cThreads *= 2 ) { printf( "%d\t", cThreads ); } printf( "maxBW\tmaxThreads\n" ); ReportRow<T, 1, bOffsetDst, bOffsetSrc >( N, threadStart, threadStop, cBlocks ); ReportRow<T, 2, bOffsetDst, bOffsetSrc >( N, threadStart, threadStop, cBlocks ); ReportRow<T, 3, bOffsetDst, bOffsetSrc >( N, threadStart, threadStop, cBlocks ); ReportRow<T, 4, bOffsetDst, bOffsetSrc >( N, threadStart, threadStop, cBlocks ); ReportRow<T, 5, bOffsetDst, bOffsetSrc >( N, threadStart, threadStop, cBlocks ); ReportRow<T, 6, bOffsetDst, bOffsetSrc >( N, threadStart, threadStop, cBlocks ); ReportRow<T, 7, bOffsetDst, bOffsetSrc >( N, threadStart, threadStop, cBlocks ); ReportRow<T, 8, bOffsetDst, bOffsetSrc >( N, threadStart, threadStop, cBlocks ); ReportRow<T, 9, bOffsetDst, bOffsetSrc >( N, threadStart, threadStop, cBlocks ); ReportRow<T,10, bOffsetDst, bOffsetSrc >( N, threadStart, threadStop, cBlocks ); ReportRow<T,11, bOffsetDst, bOffsetSrc >( N, threadStart, threadStop, cBlocks ); ReportRow<T,12, bOffsetDst, bOffsetSrc >( N, threadStart, threadStop, cBlocks ); ReportRow<T,13, bOffsetDst, bOffsetSrc >( N, threadStart, threadStop, cBlocks ); ReportRow<T,14, bOffsetDst, bOffsetSrc >( N, threadStart, threadStop, cBlocks ); ReportRow<T,15, bOffsetDst, bOffsetSrc >( N, threadStart, threadStop, cBlocks ); ReportRow<T,16, bOffsetDst, bOffsetSrc >( N, threadStart, threadStop, cBlocks ); } int main( int argc, char *argv[] ) { int device = 0; int size = 16; if ( chCommandLineGet( &device, "device", argc, argv ) ) { printf( "Using device %d...\n", device ); } cudaSetDevice(device); if ( chCommandLineGet( &size, "size", argc, argv ) ) { printf( "Using %dM operands ...\n", size ); } if ( chCommandLineGetBool( "uncoalesced_read", argc, argv ) ) { if ( chCommandLineGetBool( "uncoalesced_write", argc, argv ) ) { printf( "Using uncoalesced reads and writes\n" ); Shmoo< char, true, true>( (size_t) size*1048576, 32, 512, 150 ); Shmoo<short, true, true>( (size_t) size*1048576, 32, 512, 150 ); Shmoo< int, true, true>( (size_t) size*1048576, 32, 512, 150 ); Shmoo< int2, true, true>( (size_t) size*1048576, 32, 512, 150 ); Shmoo< int4, true, true>( (size_t) size*1048576, 32, 512, 150 ); } else { printf( "Using coalesced writes and uncoalesced reads\n" ); Shmoo< char,false, true>( (size_t) size*1048576, 32, 512, 150 ); Shmoo<short,false, true>( (size_t) size*1048576, 32, 512, 150 ); Shmoo< int,false, true>( (size_t) size*1048576, 32, 512, 150 ); Shmoo< int2,false, true>( (size_t) size*1048576, 32, 512, 150 ); Shmoo< int4,false, true>( (size_t) size*1048576, 32, 512, 150 ); } } else { if ( chCommandLineGetBool( "uncoalesced_write", argc, argv ) ) { printf( "Using uncoalesced writes and coalesced reads\n" ); Shmoo< char, true,false>( (size_t) size*1048576, 32, 512, 150 ); Shmoo<short, true,false>( (size_t) size*1048576, 32, 512, 150 ); Shmoo< int, true,false>( (size_t) size*1048576, 32, 512, 150 ); Shmoo< int2, true,false>( (size_t) size*1048576, 32, 512, 150 ); Shmoo< int4, true,false>( (size_t) size*1048576, 32, 512, 150 ); } else { printf( "Using coalesced reads and writes\n" ); Shmoo< char,false,false>( (size_t) size*1048576, 32, 512, 150 ); Shmoo<short,false,false>( (size_t) size*1048576, 32, 512, 150 ); Shmoo< int,false,false>( (size_t) size*1048576, 32, 512, 150 ); Shmoo< int2,false,false>( (size_t) size*1048576, 32, 512, 150 ); Shmoo< int4,false,false>( (size_t) size*1048576, 32, 512, 150 ); } } return 0; }
969f6651775702a16c777582e6784f3c08fde724.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* Reference: http://docs.nvidia.com/cuda/cuda-c-programming-guide/index. html#ixzz4CtH09yed */ #include <cstdlib> #include <ctime> #include <cstdio> #include <iostream> using namespace std; // Generate random floats between 0 and UP_BOUND #define UP_BOUND 100; // Matrices are stored in row-major order: // M(row, col) = *(M.elements + row * M.stride + col) typedef struct { int width; int height; int stride; float* elements; } Matrix; // Thread block size #define BLOCK_SIZE 20 // Forward declaration of the matrix multiplication kernel __global__ void MatMulKernel(const Matrix, const Matrix, Matrix); // Get a matrix element __device__ float GetElement(const Matrix A, int row, int col) { return A.elements[row * A.stride + col]; } // Set a matrix element __device__ void SetElement(Matrix A, int row, int col, float value) { A.elements[row * A.stride + col] = value; } // Get the BLOCK_SIZExBLOCK_SIZE sub-matrix Asub of A that is // located col sub-matrices to the right and row sub-matrices down // from the upper-left corner of A __device__ Matrix GetSubMatrix(Matrix A, int row, int col) { Matrix Asub; Asub.width = BLOCK_SIZE; Asub.height = BLOCK_SIZE; Asub.stride = A.stride; Asub.elements = &A.elements[A.stride * BLOCK_SIZE * row + BLOCK_SIZE * col]; return Asub; } // Matrix multiplication - Host code // Matrix dimensions are assumed to be multiples of BLOCK_SIZE void MatMul(const Matrix A, const Matrix B, Matrix C) { // Load A and B to device memory Matrix d_A; d_A.width = d_A.stride = A.width; d_A.height = A.height; size_t size = A.width * A.height * sizeof(float); hipError_t err = hipMalloc(&d_A.elements, size); cout << "CUDA malloc A: " << hipGetErrorString(err) << endl; err = hipMemcpy(d_A.elements, A.elements, size, hipMemcpyHostToDevice); cout << "Copy A to device: " << hipGetErrorString(err) << "\n" << endl; Matrix d_B; d_B.width = d_B.stride = B.width; d_B.height = B.height; size = B.width * B.height * sizeof(float); err = hipMalloc(&d_B.elements, size); cout << "CUDA malloc B: " << hipGetErrorString(err) << endl; err = hipMemcpy(d_B.elements, B.elements, size, hipMemcpyHostToDevice); cout << "Copy B to device: " << hipGetErrorString(err) << "\n" << endl; // Allocate C in device memory Matrix d_C; d_C.width = d_C.stride = C.width; d_C.height = C.height; size = C.width * C.height * sizeof(float); err = hipMalloc(&d_C.elements, size); cout << "CUDA malloc C: " << hipGetErrorString(err) << endl; // Invoke kernel dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE); dim3 dimGrid(B.width / dimBlock.x, A.height / dimBlock.y); hipLaunchKernelGGL(( MatMulKernel), dim3(dimGrid), dim3(dimBlock), 0, 0, d_A, d_B, d_C); err = hipDeviceSynchronize(); cout << "Run kernel: " << hipGetErrorString(err) << endl; // Read C from device memory err = hipMemcpy(C.elements, d_C.elements, size, hipMemcpyDeviceToHost); cout << "Copy C off of device: " << hipGetErrorString(err) << "\n" << endl; // Free device memory hipFree(d_A.elements); hipFree(d_B.elements); hipFree(d_C.elements); } // Matrix multiplication kernel called by MatMul() __global__ void MatMulKernel(Matrix A, Matrix B, Matrix C) { // Shared memory used to store Asub and Bsub respectively __shared__ float As[BLOCK_SIZE][BLOCK_SIZE]; __shared__ float Bs[BLOCK_SIZE][BLOCK_SIZE]; int row = blockIdx.y * BLOCK_SIZE + threadIdx.y; int col = blockIdx.x * BLOCK_SIZE + threadIdx.x; // Each thread computes one element of Csub // by accumulating results into Cvalue float Cvalue = 0.0; for (int i = 0; i < (A.width - 1)/BLOCK_SIZE + 1; ++i) { int temp = i * BLOCK_SIZE + threadIdx.x; if (row < A.height && temp < A.width) As[threadIdx.y][threadIdx.x] = A.elements[row * A.width + temp]; else As[threadIdx.y][threadIdx.x] = 0.0; temp = i * BLOCK_SIZE + threadIdx.y; if (col < B.width && temp < B.height) Bs[threadIdx.y][threadIdx.x] = B.elements[temp * B.width + col]; else Bs[threadIdx.y][threadIdx.x] = 0.0; __syncthreads(); for (int j = 0; j < BLOCK_SIZE; ++j) Cvalue += As[threadIdx.y][j] * Bs[j][threadIdx.x]; __syncthreads(); } if (row < C.height && col < C.width) C.elements[row * C.width + col] = Cvalue; /*---Original code from CUDA C Programming Guide---*/ /* // Block row and column int blockRow = blockIdx.y; int blockCol = blockIdx.x; // Each thread block computes one sub-matrix Csub of C Matrix Csub = GetSubMatrix(C, blockRow, blockCol); // Each thread computes one element of Csub // by accumulating results into Cvalue float Cvalue = 0; // Thread row and column within Csub int row = threadIdx.y; int col = threadIdx.x; // Loop over all the sub-matrices of A and B that are // required to compute Csub // Multiply each pair of sub-matrices together // and accumulate the results for (int m = 0; m < (A.width / BLOCK_SIZE); ++m) { // Get sub-matrix Asub of A Matrix Asub = GetSubMatrix(A, blockRow, m); // Get sub-matrix Bsub of B Matrix Bsub = GetSubMatrix(B, m, blockCol); // Shared memory used to store Asub and Bsub respectively __shared__ float As[BLOCK_SIZE][BLOCK_SIZE]; __shared__ float Bs[BLOCK_SIZE][BLOCK_SIZE]; // Load Asub and Bsub from device memory to shared memory // Each thread loads one element of each sub-matrix As[row][col] = GetElement(Asub, row, col); Bs[row][col] = GetElement(Bsub, row, col); // Synchronize to make sure the sub-matrices are loaded // before starting the computation __syncthreads(); // Multiply Asub and Bsub together for (int e = 0; e < BLOCK_SIZE; ++e) Cvalue += As[row][e] * Bs[e][col]; // Synchronize to make sure that the preceding // computation is done before loading two new // sub-matrices of A and B in the next iteration __syncthreads(); } // Write Csub to device memory // Each thread writes one element SetElement(Csub, row, col, Cvalue); */ } int main(int argc, char const *argv[]) { clock_t t; Matrix A, B, C; int a1, a2, b1, b2; int i, j; srand(time(NULL)); if (argc < 4) cout << "Usage: ./accuracy.o A.height A.width B.width" << endl; // Get dimensions of A and B // Run $ ./matrixMul 1 1000000 400 a1 = atoi(argv[1]); // A's height a2 = atoi(argv[2]); // A's width b1 = a2; // B's height b2 = atoi(argv[3]); // B's width A.height = a1; A.width = A.stride = a2; A.elements = new float[A.width * A.height]; B.height = b1; B.width = B.stride = b2; B.elements = new float[B.width * B. height]; C.height = A.height; C.width = C.stride = B.width; C.elements = new float[C.width * C.height]; // Fill A and B with random floats for (i = 0; i < A.height; ++i) for (j = 0; j < A.width; ++j) A.elements[i * A.width + j] = ((float)rand() / (float)RAND_MAX) * UP_BOUND; for (i = 0; i < B.height; ++i) for (j = 0; j < B.width; ++j) B.elements[i * B.width + j] = ((float)rand() / (float)RAND_MAX) * UP_BOUND; // Call MatMul(), and therefore MatMulKernel() t = clock(); MatMul(A, B, C); // Print time multiplication took t = clock() - t; cout << "It took me " << fixed << ((float)t)/CLOCKS_PER_SEC; cout << " seconds.\n" << endl; // Print A, B, and C for (i = 0; i < min(10, A.height); ++i) { for (j = 0; j < min(10, A.width); ++j) cout << fixed << A.elements[i * A.width + j] << "\t"; cout << endl; } cout << endl; for (i = 0; i < min(10, B.height); ++i) { for (j = 0; j < min(10, B.width); ++j) cout << fixed << B.elements[i * B.width + j] << "\t"; cout << endl; } cout << endl; for (i = 0; i < min(10, C.height); ++i) { for (j = 0; j < min(10, C.width); ++j) cout << fixed << C.elements[i * C.width + j] << "\t"; cout << endl; } cout << endl; delete[] A.elements; delete[] B.elements; delete[] C.elements; return 0; }
969f6651775702a16c777582e6784f3c08fde724.cu
/* Reference: http://docs.nvidia.com/cuda/cuda-c-programming-guide/index. html#ixzz4CtH09yed */ #include <cstdlib> #include <ctime> #include <cstdio> #include <iostream> using namespace std; // Generate random floats between 0 and UP_BOUND #define UP_BOUND 100; // Matrices are stored in row-major order: // M(row, col) = *(M.elements + row * M.stride + col) typedef struct { int width; int height; int stride; float* elements; } Matrix; // Thread block size #define BLOCK_SIZE 20 // Forward declaration of the matrix multiplication kernel __global__ void MatMulKernel(const Matrix, const Matrix, Matrix); // Get a matrix element __device__ float GetElement(const Matrix A, int row, int col) { return A.elements[row * A.stride + col]; } // Set a matrix element __device__ void SetElement(Matrix A, int row, int col, float value) { A.elements[row * A.stride + col] = value; } // Get the BLOCK_SIZExBLOCK_SIZE sub-matrix Asub of A that is // located col sub-matrices to the right and row sub-matrices down // from the upper-left corner of A __device__ Matrix GetSubMatrix(Matrix A, int row, int col) { Matrix Asub; Asub.width = BLOCK_SIZE; Asub.height = BLOCK_SIZE; Asub.stride = A.stride; Asub.elements = &A.elements[A.stride * BLOCK_SIZE * row + BLOCK_SIZE * col]; return Asub; } // Matrix multiplication - Host code // Matrix dimensions are assumed to be multiples of BLOCK_SIZE void MatMul(const Matrix A, const Matrix B, Matrix C) { // Load A and B to device memory Matrix d_A; d_A.width = d_A.stride = A.width; d_A.height = A.height; size_t size = A.width * A.height * sizeof(float); cudaError_t err = cudaMalloc(&d_A.elements, size); cout << "CUDA malloc A: " << cudaGetErrorString(err) << endl; err = cudaMemcpy(d_A.elements, A.elements, size, cudaMemcpyHostToDevice); cout << "Copy A to device: " << cudaGetErrorString(err) << "\n" << endl; Matrix d_B; d_B.width = d_B.stride = B.width; d_B.height = B.height; size = B.width * B.height * sizeof(float); err = cudaMalloc(&d_B.elements, size); cout << "CUDA malloc B: " << cudaGetErrorString(err) << endl; err = cudaMemcpy(d_B.elements, B.elements, size, cudaMemcpyHostToDevice); cout << "Copy B to device: " << cudaGetErrorString(err) << "\n" << endl; // Allocate C in device memory Matrix d_C; d_C.width = d_C.stride = C.width; d_C.height = C.height; size = C.width * C.height * sizeof(float); err = cudaMalloc(&d_C.elements, size); cout << "CUDA malloc C: " << cudaGetErrorString(err) << endl; // Invoke kernel dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE); dim3 dimGrid(B.width / dimBlock.x, A.height / dimBlock.y); MatMulKernel<<<dimGrid, dimBlock>>>(d_A, d_B, d_C); err = cudaThreadSynchronize(); cout << "Run kernel: " << cudaGetErrorString(err) << endl; // Read C from device memory err = cudaMemcpy(C.elements, d_C.elements, size, cudaMemcpyDeviceToHost); cout << "Copy C off of device: " << cudaGetErrorString(err) << "\n" << endl; // Free device memory cudaFree(d_A.elements); cudaFree(d_B.elements); cudaFree(d_C.elements); } // Matrix multiplication kernel called by MatMul() __global__ void MatMulKernel(Matrix A, Matrix B, Matrix C) { // Shared memory used to store Asub and Bsub respectively __shared__ float As[BLOCK_SIZE][BLOCK_SIZE]; __shared__ float Bs[BLOCK_SIZE][BLOCK_SIZE]; int row = blockIdx.y * BLOCK_SIZE + threadIdx.y; int col = blockIdx.x * BLOCK_SIZE + threadIdx.x; // Each thread computes one element of Csub // by accumulating results into Cvalue float Cvalue = 0.0; for (int i = 0; i < (A.width - 1)/BLOCK_SIZE + 1; ++i) { int temp = i * BLOCK_SIZE + threadIdx.x; if (row < A.height && temp < A.width) As[threadIdx.y][threadIdx.x] = A.elements[row * A.width + temp]; else As[threadIdx.y][threadIdx.x] = 0.0; temp = i * BLOCK_SIZE + threadIdx.y; if (col < B.width && temp < B.height) Bs[threadIdx.y][threadIdx.x] = B.elements[temp * B.width + col]; else Bs[threadIdx.y][threadIdx.x] = 0.0; __syncthreads(); for (int j = 0; j < BLOCK_SIZE; ++j) Cvalue += As[threadIdx.y][j] * Bs[j][threadIdx.x]; __syncthreads(); } if (row < C.height && col < C.width) C.elements[row * C.width + col] = Cvalue; /*---Original code from CUDA C Programming Guide---*/ /* // Block row and column int blockRow = blockIdx.y; int blockCol = blockIdx.x; // Each thread block computes one sub-matrix Csub of C Matrix Csub = GetSubMatrix(C, blockRow, blockCol); // Each thread computes one element of Csub // by accumulating results into Cvalue float Cvalue = 0; // Thread row and column within Csub int row = threadIdx.y; int col = threadIdx.x; // Loop over all the sub-matrices of A and B that are // required to compute Csub // Multiply each pair of sub-matrices together // and accumulate the results for (int m = 0; m < (A.width / BLOCK_SIZE); ++m) { // Get sub-matrix Asub of A Matrix Asub = GetSubMatrix(A, blockRow, m); // Get sub-matrix Bsub of B Matrix Bsub = GetSubMatrix(B, m, blockCol); // Shared memory used to store Asub and Bsub respectively __shared__ float As[BLOCK_SIZE][BLOCK_SIZE]; __shared__ float Bs[BLOCK_SIZE][BLOCK_SIZE]; // Load Asub and Bsub from device memory to shared memory // Each thread loads one element of each sub-matrix As[row][col] = GetElement(Asub, row, col); Bs[row][col] = GetElement(Bsub, row, col); // Synchronize to make sure the sub-matrices are loaded // before starting the computation __syncthreads(); // Multiply Asub and Bsub together for (int e = 0; e < BLOCK_SIZE; ++e) Cvalue += As[row][e] * Bs[e][col]; // Synchronize to make sure that the preceding // computation is done before loading two new // sub-matrices of A and B in the next iteration __syncthreads(); } // Write Csub to device memory // Each thread writes one element SetElement(Csub, row, col, Cvalue); */ } int main(int argc, char const *argv[]) { clock_t t; Matrix A, B, C; int a1, a2, b1, b2; int i, j; srand(time(NULL)); if (argc < 4) cout << "Usage: ./accuracy.o A.height A.width B.width" << endl; // Get dimensions of A and B // Run $ ./matrixMul 1 1000000 400 a1 = atoi(argv[1]); // A's height a2 = atoi(argv[2]); // A's width b1 = a2; // B's height b2 = atoi(argv[3]); // B's width A.height = a1; A.width = A.stride = a2; A.elements = new float[A.width * A.height]; B.height = b1; B.width = B.stride = b2; B.elements = new float[B.width * B. height]; C.height = A.height; C.width = C.stride = B.width; C.elements = new float[C.width * C.height]; // Fill A and B with random floats for (i = 0; i < A.height; ++i) for (j = 0; j < A.width; ++j) A.elements[i * A.width + j] = ((float)rand() / (float)RAND_MAX) * UP_BOUND; for (i = 0; i < B.height; ++i) for (j = 0; j < B.width; ++j) B.elements[i * B.width + j] = ((float)rand() / (float)RAND_MAX) * UP_BOUND; // Call MatMul(), and therefore MatMulKernel() t = clock(); MatMul(A, B, C); // Print time multiplication took t = clock() - t; cout << "It took me " << fixed << ((float)t)/CLOCKS_PER_SEC; cout << " seconds.\n" << endl; // Print A, B, and C for (i = 0; i < min(10, A.height); ++i) { for (j = 0; j < min(10, A.width); ++j) cout << fixed << A.elements[i * A.width + j] << "\t"; cout << endl; } cout << endl; for (i = 0; i < min(10, B.height); ++i) { for (j = 0; j < min(10, B.width); ++j) cout << fixed << B.elements[i * B.width + j] << "\t"; cout << endl; } cout << endl; for (i = 0; i < min(10, C.height); ++i) { for (j = 0; j < min(10, C.width); ++j) cout << fixed << C.elements[i * C.width + j] << "\t"; cout << endl; } cout << endl; delete[] A.elements; delete[] B.elements; delete[] C.elements; return 0; }
015adbbaab8016833c3eaad8f4c7854f75f2586d.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" # include <hip/hip_runtime.h> # include <hip/hip_runtime.h> extern "C" unsigned char * RGB2HSV(unsigned char * data, int npixels); __global__ void RGB2HSVcuda(unsigned char * dataRGBdev, unsigned char * dataHSVdev, int npixels){ int posThread = blockIdx.x*blockDim.x + threadIdx.x; // ** Size, just consider the number of pixel, non the total of data, // ** But in dataRGBdev is all channels data if(posThread < npixels){ // Get the maximun & minimun value of RGB pixel unsigned char max = 0; unsigned char min = 255; int position = posThread*3; // Max pos represents 0: R, 1: G, 2: B int maxpos = 0; for(int i = 0; i < 3; i++){ int pos = position + i; if(dataRGBdev[pos] > max){ max = dataRGBdev[pos]; maxpos = i; } else if(dataRGBdev[pos] < min) min = dataRGBdev[pos]; } int diff = (max - min > 0)? max - min: 1; //set H position int valH; if(maxpos == 0) valH = ((dataRGBdev[position + 1] - dataRGBdev[position + 2])/diff) % 6; else if(maxpos == 1) valH = (dataRGBdev[position +2] - dataRGBdev[position])/diff + 2; else valH = (dataRGBdev[position] - dataRGBdev[position + 1])/diff +4; valH *=60; valH = (valH > 0) ? valH:0; valH = (valH < 255) ? valH:255; dataHSVdev[position] = (unsigned char) valH; // Set S position dataHSVdev[position +1 ] = (max == 0)? 0 : (max-min)/max; // Set V Position dataHSVdev[position + 2] = max; } } unsigned char * RGB2HSV(unsigned char * data, int npixels){ unsigned char * hsv = new unsigned char[npixels*3]; unsigned char * hsvDev; unsigned char * dataDev; hipMalloc((void**)&hsvDev, npixels*3*sizeof(unsigned char)); hipMalloc((void**)&dataDev, npixels*3*sizeof(unsigned char)); hipMemcpy(dataDev, data, 3*npixels*sizeof(unsigned char), hipMemcpyHostToDevice); int nThreads = 1024; int nBlocks = (npixels % nThreads > 0) ? npixels/nThreads + 1: npixels/nThreads; hipLaunchKernelGGL(( RGB2HSVcuda), dim3(nBlocks), dim3(nThreads), 0, 0, dataDev, hsvDev, npixels); hipMemcpy(hsv, hsvDev, 3*npixels*sizeof(unsigned char), hipMemcpyDeviceToHost); hipFree(dataDev); hipFree(hsvDev); return hsv; }
015adbbaab8016833c3eaad8f4c7854f75f2586d.cu
# include <cuda.h> # include <cuda_runtime.h> extern "C" unsigned char * RGB2HSV(unsigned char * data, int npixels); __global__ void RGB2HSVcuda(unsigned char * dataRGBdev, unsigned char * dataHSVdev, int npixels){ int posThread = blockIdx.x*blockDim.x + threadIdx.x; // ** Size, just consider the number of pixel, non the total of data, // ** But in dataRGBdev is all channels data if(posThread < npixels){ // Get the maximun & minimun value of RGB pixel unsigned char max = 0; unsigned char min = 255; int position = posThread*3; // Max pos represents 0: R, 1: G, 2: B int maxpos = 0; for(int i = 0; i < 3; i++){ int pos = position + i; if(dataRGBdev[pos] > max){ max = dataRGBdev[pos]; maxpos = i; } else if(dataRGBdev[pos] < min) min = dataRGBdev[pos]; } int diff = (max - min > 0)? max - min: 1; //set H position int valH; if(maxpos == 0) valH = ((dataRGBdev[position + 1] - dataRGBdev[position + 2])/diff) % 6; else if(maxpos == 1) valH = (dataRGBdev[position +2] - dataRGBdev[position])/diff + 2; else valH = (dataRGBdev[position] - dataRGBdev[position + 1])/diff +4; valH *=60; valH = (valH > 0) ? valH:0; valH = (valH < 255) ? valH:255; dataHSVdev[position] = (unsigned char) valH; // Set S position dataHSVdev[position +1 ] = (max == 0)? 0 : (max-min)/max; // Set V Position dataHSVdev[position + 2] = max; } } unsigned char * RGB2HSV(unsigned char * data, int npixels){ unsigned char * hsv = new unsigned char[npixels*3]; unsigned char * hsvDev; unsigned char * dataDev; cudaMalloc((void**)&hsvDev, npixels*3*sizeof(unsigned char)); cudaMalloc((void**)&dataDev, npixels*3*sizeof(unsigned char)); cudaMemcpy(dataDev, data, 3*npixels*sizeof(unsigned char), cudaMemcpyHostToDevice); int nThreads = 1024; int nBlocks = (npixels % nThreads > 0) ? npixels/nThreads + 1: npixels/nThreads; RGB2HSVcuda<<<nBlocks, nThreads>>>(dataDev, hsvDev, npixels); cudaMemcpy(hsv, hsvDev, 3*npixels*sizeof(unsigned char), cudaMemcpyDeviceToHost); cudaFree(dataDev); cudaFree(hsvDev); return hsv; }
2866bcfd8e485a80b1b088a9b9df559c3a6fb16b.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #include "check.h" const int MAX_ITERATIONS = 100; struct Point2D { float x; float y; }; int readPoints(const char * fn, int pNr, Point2D * ps) { FILE * fp = fopen(fn, "rb"); // open a binary input file to read data from if (fp == NULL) return 0; int tmp = fread(ps, sizeof(float), 2*pNr, fp); // binary read fclose(fp); return tmp; // return the number of successfully read elements } void printClustres(int cNr, int pNr, const Point2D * ctds, Point2D * ps, const int * p2ctds, bool details) { for (int i = 0; i < cNr; i++) printf("center %d: %f %f\n", i, ctds[i].x, ctds[i].y); if (details){ printf ("\n------------------- details --------------------------\n"); for (int i = 0; i < cNr; i++) { printf("center %d: %f %f\n", i, ctds[i].x, ctds[i].y); printf( "Points: \n"); int k = 0; for (int j = 0; j < pNr; j++) { if (p2ctds[j] == i) { switch (k) { case 8 : printf ("\n(%.3f,%.3f)\t", ps[j].x, ps[j].y); break; default: printf ("(%.3f,%.3f)\t", ps[j].x, ps[j].y); } k = (k == 8) ? 0: k+1; } } printf("\n"); } } } // Initialize cluster centroids to the first K points from the dataset void initializeClusters(int n, Point2D const * ps, Point2D * ctds) { for (int i = 0; i < n; i++) { ctds[i].x = ps[i].x; ctds[i].y = ps[i].y; } } //Kernel that assigns each point to the closest cluster __global__ void assignClusters(Point2D * ps, int pNr, Point2D *ctds, int cNr, int * p2ctds) { //Thread index and stride size int tid = threadIdx.x + blockIdx.x * blockDim.x; int stride = blockDim.x * gridDim.x; //Go over all points using striding while(tid < pNr) { float cdis; //Distance to cluster int cIdx; //Id of the cluster float tmp, tmp_x, tmp_y; //Temp variables to compute and compare distances //Set the first cluster as the starting value of closest cluster tmp_x = ps[tid].x - ctds[0].x; tmp_y = ps[tid].y - ctds[0].y; cdis = tmp_x * tmp_x + tmp_y * tmp_y; cIdx = 0; //Foreach cluster for (int c = 1; c < cNr; c++) { //Compute the distance tmp_x = ps[tid].x - ctds[c].x; tmp_y = ps[tid].y - ctds[c].y; tmp = tmp_x*tmp_x + tmp_y*tmp_y; //If the current cluster distance is larger set the current cluster //as the closest one if ( cdis > tmp) { cdis = tmp; cIdx = c; } } //Set the points to clusters map for the current point p2ctds[tid] = cIdx; //Use stride to handle points that are out of the range of total threads tid += stride; } } //Kernel that computes the centroids' position as the average of the points assigned to them __global__ void centroids(Point2D * ps, int pNr, Point2D * ctds, int cNr, int * p2ctds, int * counters) { //Array in shared memory that contains the number of points of each cluster __shared__ int shared_counters[256]; //Thread index and stride size int tid = threadIdx.x + blockIdx.x * blockDim.x; int stride = blockDim.x * gridDim.x; //Reset the centroids' positions and number of points if(tid < cNr) { ctds[tid].x = ctds[tid].y = 0.0f; counters[tid] = 0; } //Initialize the shared array in each block if(threadIdx.x < cNr) { shared_counters[threadIdx.x] = 0; } __syncthreads(); //Go over all points using striding while(tid < pNr) { //Use atomicAdd to add the coords of the points to the clusters int ctdId = p2ctds[tid]; atomicAdd(&ctds[ctdId].x, ps[tid].x); atomicAdd(&ctds[ctdId].y, ps[tid].y); //Use atomicAdd to add the number of clusters to the shared array atomicAdd(&shared_counters[ctdId],1); //Use stride to handle points that are out of the range of total threads tid += stride; } __syncthreads(); //Add all the shared arrays to the global array if(threadIdx.x < cNr) { atomicAdd(&counters[threadIdx.x],shared_counters[threadIdx.x]); } } int main(int argc, char **argv) { //Check if the path of the file is provided if (argc < 2) { printf("Provide the address of input data file.\n"); return 0; } int pNr; //Number of points int cNr; //Number of centroids // Extract # of points and # of clusters // from the name of the input file if (sscanf(argv[1], "2DPoints_%d_C_%d.data", &pNr, &cNr) != 2) { printf("Error! Unexpected file name.\n"); return 0; } printf("Number of Points: %d\nNumber of Clusters: %d\n", pNr, cNr); Point2D * ps = new Point2D[pNr]; //Points Point2D * ctds = new Point2D[cNr]; //Centroids int * p2ctds = new int [pNr]; //Point to cluster map int counters[256]; //Number of points assigned to each cluster // Read observed points from the input file if (readPoints(argv[1], pNr, ps) == 0) printf("Error! Unable to open input file."); hipDeviceProp_t prop; //Device properties CHECK( hipGetDeviceProperties( &prop, 0 ) ); //Get the device properties //Device must have compute capability above 2 to use atomicAdd if(prop.major < 2) { printf("GPU's compute capability doesn't support 32-bit floating-point atomicAdd."); return 0; } // Initalize the cluster centroids initializeClusters(cNr, ps, ctds); Point2D * dev_ps, *dev_ctds; //Device points, centroids int * dev_p2ctds; //Device point to cluster map int * dev_counters; //Device number of points assigned to each cluster int blocks = 32; //Set the number of blocks to 32 int threads = prop.maxThreadsPerBlock; //Set the number of threads to the maximum possible //Cuda events for timing hipEvent_t start, stop; //Allocate memory on device for device variables CHECK( hipMalloc( (void**)&dev_ps, pNr*sizeof(Point2D))); CHECK( hipMalloc( (void**)&dev_ctds, cNr*sizeof(Point2D))); CHECK( hipMalloc( (void**)&dev_p2ctds, pNr*sizeof(int))); CHECK( hipMalloc( (void**)&dev_counters, cNr*sizeof(int))); //Create events and start timer CHECK( hipEventCreate(&start)); CHECK( hipEventCreate(&stop)); CHECK( hipEventRecord(start,0)); //Copy points to device CHECK( hipMemcpy( dev_ps, ps, pNr*sizeof(Point2D), hipMemcpyHostToDevice)); //For the number of iterations given for(int i =0 ; i < MAX_ITERATIONS ; i++) { //Copy the centroids position to device CHECK( hipMemcpy( dev_ctds, ctds, cNr*sizeof(Point2D), hipMemcpyHostToDevice)); //Call the kernel that assigns points to clusters hipLaunchKernelGGL(( assignClusters), dim3(blocks),dim3(threads), 0, 0, dev_ps,pNr,dev_ctds,cNr,dev_p2ctds); //Call the kernel that computes the sum of the points position assigned to each cluster hipLaunchKernelGGL(( centroids), dim3(blocks),dim3(threads), 0, 0, dev_ps,pNr,dev_ctds,cNr,dev_p2ctds,dev_counters); //Copy the number of points assigned to each cluster and the centroids positions to the host CHECK( hipMemcpy( counters, dev_counters, cNr*sizeof(int), hipMemcpyDeviceToHost)); CHECK( hipMemcpy( ctds, dev_ctds, cNr*sizeof(Point2D), hipMemcpyDeviceToHost)); //Do the division to find the average for(int i =0 ; i < cNr ; i++) { if(counters[i] > 0) { ctds[i].x = ctds[i].x/counters[i]; ctds[i].y = ctds[i].y/counters[i]; } else { ctds[i].x = ps[0].x; ctds[i].y = ps[0].y; } } } //Copy the points to clusters map to host CHECK( hipMemcpy(p2ctds, dev_p2ctds, pNr*sizeof(int), hipMemcpyDeviceToHost)); //Stop the timer CHECK( hipEventRecord(stop, 0)); CHECK( hipEventSynchronize(stop)); float elapsedTime; //Get elapsed time from event and print it CHECK( hipEventElapsedTime( &elapsedTime, start, stop)); printf("Time : %3.1f ms \n", elapsedTime); //Destroy the timing events CHECK( hipEventDestroy( start)); CHECK( hipEventDestroy( stop)); //Clear the device resources hipFree(dev_ps); hipFree(dev_ctds); hipFree(dev_p2ctds); hipFree(dev_counters); //Print the clusters printClustres(cNr, pNr, ctds, ps, p2ctds, false); return 0; }
2866bcfd8e485a80b1b088a9b9df559c3a6fb16b.cu
#include <stdio.h> #include "check.h" const int MAX_ITERATIONS = 100; struct Point2D { float x; float y; }; int readPoints(const char * fn, int pNr, Point2D * ps) { FILE * fp = fopen(fn, "rb"); // open a binary input file to read data from if (fp == NULL) return 0; int tmp = fread(ps, sizeof(float), 2*pNr, fp); // binary read fclose(fp); return tmp; // return the number of successfully read elements } void printClustres(int cNr, int pNr, const Point2D * ctds, Point2D * ps, const int * p2ctds, bool details) { for (int i = 0; i < cNr; i++) printf("center %d: %f %f\n", i, ctds[i].x, ctds[i].y); if (details){ printf ("\n------------------- details --------------------------\n"); for (int i = 0; i < cNr; i++) { printf("center %d: %f %f\n", i, ctds[i].x, ctds[i].y); printf( "Points: \n"); int k = 0; for (int j = 0; j < pNr; j++) { if (p2ctds[j] == i) { switch (k) { case 8 : printf ("\n(%.3f,%.3f)\t", ps[j].x, ps[j].y); break; default: printf ("(%.3f,%.3f)\t", ps[j].x, ps[j].y); } k = (k == 8) ? 0: k+1; } } printf("\n"); } } } // Initialize cluster centroids to the first K points from the dataset void initializeClusters(int n, Point2D const * ps, Point2D * ctds) { for (int i = 0; i < n; i++) { ctds[i].x = ps[i].x; ctds[i].y = ps[i].y; } } //Kernel that assigns each point to the closest cluster __global__ void assignClusters(Point2D * ps, int pNr, Point2D *ctds, int cNr, int * p2ctds) { //Thread index and stride size int tid = threadIdx.x + blockIdx.x * blockDim.x; int stride = blockDim.x * gridDim.x; //Go over all points using striding while(tid < pNr) { float cdis; //Distance to cluster int cIdx; //Id of the cluster float tmp, tmp_x, tmp_y; //Temp variables to compute and compare distances //Set the first cluster as the starting value of closest cluster tmp_x = ps[tid].x - ctds[0].x; tmp_y = ps[tid].y - ctds[0].y; cdis = tmp_x * tmp_x + tmp_y * tmp_y; cIdx = 0; //Foreach cluster for (int c = 1; c < cNr; c++) { //Compute the distance tmp_x = ps[tid].x - ctds[c].x; tmp_y = ps[tid].y - ctds[c].y; tmp = tmp_x*tmp_x + tmp_y*tmp_y; //If the current cluster distance is larger set the current cluster //as the closest one if ( cdis > tmp) { cdis = tmp; cIdx = c; } } //Set the points to clusters map for the current point p2ctds[tid] = cIdx; //Use stride to handle points that are out of the range of total threads tid += stride; } } //Kernel that computes the centroids' position as the average of the points assigned to them __global__ void centroids(Point2D * ps, int pNr, Point2D * ctds, int cNr, int * p2ctds, int * counters) { //Array in shared memory that contains the number of points of each cluster __shared__ int shared_counters[256]; //Thread index and stride size int tid = threadIdx.x + blockIdx.x * blockDim.x; int stride = blockDim.x * gridDim.x; //Reset the centroids' positions and number of points if(tid < cNr) { ctds[tid].x = ctds[tid].y = 0.0f; counters[tid] = 0; } //Initialize the shared array in each block if(threadIdx.x < cNr) { shared_counters[threadIdx.x] = 0; } __syncthreads(); //Go over all points using striding while(tid < pNr) { //Use atomicAdd to add the coords of the points to the clusters int ctdId = p2ctds[tid]; atomicAdd(&ctds[ctdId].x, ps[tid].x); atomicAdd(&ctds[ctdId].y, ps[tid].y); //Use atomicAdd to add the number of clusters to the shared array atomicAdd(&shared_counters[ctdId],1); //Use stride to handle points that are out of the range of total threads tid += stride; } __syncthreads(); //Add all the shared arrays to the global array if(threadIdx.x < cNr) { atomicAdd(&counters[threadIdx.x],shared_counters[threadIdx.x]); } } int main(int argc, char **argv) { //Check if the path of the file is provided if (argc < 2) { printf("Provide the address of input data file.\n"); return 0; } int pNr; //Number of points int cNr; //Number of centroids // Extract # of points and # of clusters // from the name of the input file if (sscanf(argv[1], "2DPoints_%d_C_%d.data", &pNr, &cNr) != 2) { printf("Error! Unexpected file name.\n"); return 0; } printf("Number of Points: %d\nNumber of Clusters: %d\n", pNr, cNr); Point2D * ps = new Point2D[pNr]; //Points Point2D * ctds = new Point2D[cNr]; //Centroids int * p2ctds = new int [pNr]; //Point to cluster map int counters[256]; //Number of points assigned to each cluster // Read observed points from the input file if (readPoints(argv[1], pNr, ps) == 0) printf("Error! Unable to open input file."); cudaDeviceProp prop; //Device properties CHECK( cudaGetDeviceProperties( &prop, 0 ) ); //Get the device properties //Device must have compute capability above 2 to use atomicAdd if(prop.major < 2) { printf("GPU's compute capability doesn't support 32-bit floating-point atomicAdd."); return 0; } // Initalize the cluster centroids initializeClusters(cNr, ps, ctds); Point2D * dev_ps, *dev_ctds; //Device points, centroids int * dev_p2ctds; //Device point to cluster map int * dev_counters; //Device number of points assigned to each cluster int blocks = 32; //Set the number of blocks to 32 int threads = prop.maxThreadsPerBlock; //Set the number of threads to the maximum possible //Cuda events for timing cudaEvent_t start, stop; //Allocate memory on device for device variables CHECK( cudaMalloc( (void**)&dev_ps, pNr*sizeof(Point2D))); CHECK( cudaMalloc( (void**)&dev_ctds, cNr*sizeof(Point2D))); CHECK( cudaMalloc( (void**)&dev_p2ctds, pNr*sizeof(int))); CHECK( cudaMalloc( (void**)&dev_counters, cNr*sizeof(int))); //Create events and start timer CHECK( cudaEventCreate(&start)); CHECK( cudaEventCreate(&stop)); CHECK( cudaEventRecord(start,0)); //Copy points to device CHECK( cudaMemcpy( dev_ps, ps, pNr*sizeof(Point2D), cudaMemcpyHostToDevice)); //For the number of iterations given for(int i =0 ; i < MAX_ITERATIONS ; i++) { //Copy the centroids position to device CHECK( cudaMemcpy( dev_ctds, ctds, cNr*sizeof(Point2D), cudaMemcpyHostToDevice)); //Call the kernel that assigns points to clusters assignClusters<<<blocks,threads>>>(dev_ps,pNr,dev_ctds,cNr,dev_p2ctds); //Call the kernel that computes the sum of the points position assigned to each cluster centroids<<<blocks,threads>>>(dev_ps,pNr,dev_ctds,cNr,dev_p2ctds,dev_counters); //Copy the number of points assigned to each cluster and the centroids positions to the host CHECK( cudaMemcpy( counters, dev_counters, cNr*sizeof(int), cudaMemcpyDeviceToHost)); CHECK( cudaMemcpy( ctds, dev_ctds, cNr*sizeof(Point2D), cudaMemcpyDeviceToHost)); //Do the division to find the average for(int i =0 ; i < cNr ; i++) { if(counters[i] > 0) { ctds[i].x = ctds[i].x/counters[i]; ctds[i].y = ctds[i].y/counters[i]; } else { ctds[i].x = ps[0].x; ctds[i].y = ps[0].y; } } } //Copy the points to clusters map to host CHECK( cudaMemcpy(p2ctds, dev_p2ctds, pNr*sizeof(int), cudaMemcpyDeviceToHost)); //Stop the timer CHECK( cudaEventRecord(stop, 0)); CHECK( cudaEventSynchronize(stop)); float elapsedTime; //Get elapsed time from event and print it CHECK( cudaEventElapsedTime( &elapsedTime, start, stop)); printf("Time : %3.1f ms \n", elapsedTime); //Destroy the timing events CHECK( cudaEventDestroy( start)); CHECK( cudaEventDestroy( stop)); //Clear the device resources cudaFree(dev_ps); cudaFree(dev_ctds); cudaFree(dev_p2ctds); cudaFree(dev_counters); //Print the clusters printClustres(cNr, pNr, ctds, ps, p2ctds, false); return 0; }
c78ddb0c951a619c6e3f9652678ed9dbe338a8cb.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <math.h> #include <device_launch_parameters.h> #include <hip/hip_runtime_api.h> #include <hip/hip_runtime.h> #define DATA_SIZE 99999 #define THREAD_NUM 256 #define BLOCK_NUM 32 void generate_numbers(int* number, int size) { for (int i = 0;i < size;i++) { number[i] = rand() % 10; } } __global__ static void sum_of_square(int *num, int *result) { extern __shared__ int shared[]; int bid = blockIdx.x; int tid = threadIdx.x; shared[tid] = 0; for (int i = bid * THREAD_NUM + tid; i < DATA_SIZE; i += BLOCK_NUM * THREAD_NUM) { shared[tid] += num[i] * num[i]; } __syncthreads(); int offset = 1; int mask = 1; // just a copy while(offset < THREAD_NUM) { if((tid & mask) == 0) { shared[tid] += shared[tid + offset]; } offset += offset; mask = offset + mask; __syncthreads(); } if (tid == 0) { result[bid] = shared[0]; } } int main() { int* data = (int*)malloc(DATA_SIZE * sizeof(int)); generate_numbers(data, DATA_SIZE); int* gpu_data, * result; hipMalloc((void**)&gpu_data, DATA_SIZE * sizeof(int)); hipMalloc((void**)&result, BLOCK_NUM * sizeof(int)); hipMemcpy(gpu_data, data, DATA_SIZE * sizeof(int), hipMemcpyHostToDevice); sum_of_square << <BLOCK_NUM, THREAD_NUM, THREAD_NUM * sizeof(int) >> > (gpu_data, result); int sum[BLOCK_NUM]; hipMemcpy(sum, result, sizeof(int) * BLOCK_NUM, hipMemcpyDeviceToHost); hipFree(gpu_data); hipFree(result); int sum_gpu = 0; for (int i = 0;i < BLOCK_NUM;i++) { sum_gpu += sum[i]; } int sum_cpu = 0; for (int i = 0; i < DATA_SIZE; i++) { sum_cpu += data[i] * data[i]; } if (sum_cpu == sum_gpu) { printf("True\n"); } else { printf("False\n"); } free(data); return 0; }
c78ddb0c951a619c6e3f9652678ed9dbe338a8cb.cu
#include <stdio.h> #include <stdlib.h> #include <cuda_runtime.h> #include <math.h> #include <device_launch_parameters.h> #include <cuda_runtime_api.h> #include <cuda.h> #define DATA_SIZE 99999 #define THREAD_NUM 256 #define BLOCK_NUM 32 void generate_numbers(int* number, int size) { for (int i = 0;i < size;i++) { number[i] = rand() % 10; } } __global__ static void sum_of_square(int *num, int *result) { extern __shared__ int shared[]; int bid = blockIdx.x; int tid = threadIdx.x; shared[tid] = 0; for (int i = bid * THREAD_NUM + tid; i < DATA_SIZE; i += BLOCK_NUM * THREAD_NUM) { shared[tid] += num[i] * num[i]; } __syncthreads(); int offset = 1; int mask = 1; // just a copy while(offset < THREAD_NUM) { if((tid & mask) == 0) { shared[tid] += shared[tid + offset]; } offset += offset; mask = offset + mask; __syncthreads(); } if (tid == 0) { result[bid] = shared[0]; } } int main() { int* data = (int*)malloc(DATA_SIZE * sizeof(int)); generate_numbers(data, DATA_SIZE); int* gpu_data, * result; cudaMalloc((void**)&gpu_data, DATA_SIZE * sizeof(int)); cudaMalloc((void**)&result, BLOCK_NUM * sizeof(int)); cudaMemcpy(gpu_data, data, DATA_SIZE * sizeof(int), cudaMemcpyHostToDevice); sum_of_square << <BLOCK_NUM, THREAD_NUM, THREAD_NUM * sizeof(int) >> > (gpu_data, result); int sum[BLOCK_NUM]; cudaMemcpy(sum, result, sizeof(int) * BLOCK_NUM, cudaMemcpyDeviceToHost); cudaFree(gpu_data); cudaFree(result); int sum_gpu = 0; for (int i = 0;i < BLOCK_NUM;i++) { sum_gpu += sum[i]; } int sum_cpu = 0; for (int i = 0; i < DATA_SIZE; i++) { sum_cpu += data[i] * data[i]; } if (sum_cpu == sum_gpu) { printf("True\n"); } else { printf("False\n"); } free(data); return 0; }
19b25e47d8eeb2d621c0dda1d21f1b5864ef009f.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* -- MAGMA (version 2.5.4) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date October 2020 @generated from sparse/blas/zmergebicgstab.cu, normal z -> s, Thu Oct 8 23:05:47 2020 @author Hartwig Anzt */ #include "magmasparse_internal.h" #define BLOCK_SIZE 512 #define PRECISION_s // These routines merge multiple kernels from bicgstab into one. /* -------------------------------------------------------------------------- */ __global__ void magma_sbicgstab_1_kernel( int num_rows, int num_cols, float beta, float omega, float *r, float *v, float *p ) { int i = blockIdx.x * blockDim.x + threadIdx.x; if ( i<num_rows ) { for( int j=0; j<num_cols; j++ ){ p[ i+j*num_rows ] = r[ i+j*num_rows ] + beta * ( p[ i+j*num_rows ] - omega * v[ i+j*num_rows ] ); } } } /** Purpose ------- Mergels multiple operations into one kernel: p = r + beta * ( p - omega * v ) @param[in] num_rows magma_int_t dimension m @param[in] num_cols magma_int_t dimension n @param[in] beta float scalar @param[in] omega float scalar @param[in] r magmaFloat_ptr vector @param[in] v magmaFloat_ptr vector @param[in,out] p magmaFloat_ptr vector @param[in] queue magma_queue_t Queue to execute in. @ingroup magmasparse_sgegpuk ********************************************************************/ extern "C" magma_int_t magma_sbicgstab_1( magma_int_t num_rows, magma_int_t num_cols, float beta, float omega, magmaFloat_ptr r, magmaFloat_ptr v, magmaFloat_ptr p, magma_queue_t queue ) { dim3 Bs( BLOCK_SIZE ); dim3 Gs( magma_ceildiv( num_rows, BLOCK_SIZE ) ); hipLaunchKernelGGL(( magma_sbicgstab_1_kernel), dim3(Gs), dim3(Bs), 0, queue->cuda_stream() , num_rows, num_cols, beta, omega, r, v, p ); return MAGMA_SUCCESS; } __global__ void magma_sbicgstab_2_kernel( int num_rows, int num_cols, float alpha, magmaFloat_ptr r, magmaFloat_ptr v, magmaFloat_ptr s ) { int i = blockIdx.x * blockDim.x + threadIdx.x; if ( i<num_rows ) { for( int j=0; j<num_cols; j++ ){ s[ i+j*num_rows ] = r[ i+j*num_rows ] - alpha * v[ i+j*num_rows ]; } } } /** Purpose ------- Mergels multiple operations into one kernel: s = r - alpha v Arguments --------- @param[in] num_rows magma_int_t dimension m @param[in] num_cols magma_int_t dimension n @param[in] alpha float scalar @param[in] r magmaFloat_ptr vector @param[in] v magmaFloat_ptr vector @param[in,out] s magmaFloat_ptr vector @param[in] queue magma_queue_t Queue to execute in. @ingroup magmasparse_sgegpuk ********************************************************************/ extern "C" magma_int_t magma_sbicgstab_2( magma_int_t num_rows, magma_int_t num_cols, float alpha, magmaFloat_ptr r, magmaFloat_ptr v, magmaFloat_ptr s, magma_queue_t queue ) { dim3 Bs( BLOCK_SIZE ); dim3 Gs( magma_ceildiv( num_rows, BLOCK_SIZE ) ); hipLaunchKernelGGL(( magma_sbicgstab_2_kernel), dim3(Gs), dim3(Bs), 0, queue->cuda_stream() , num_rows, num_cols, alpha, r, v, s ); return MAGMA_SUCCESS; } __global__ void magma_sbicgstab_3_kernel( int num_rows, int num_cols, float alpha, float omega, float *p, float *s, float *t, float *x, float *r ) { int i = blockIdx.x * blockDim.x + threadIdx.x; if ( i<num_rows ) { for( int j=0; j<num_cols; j++ ){ float tmp = s[ i+j*num_rows ]; x[ i+j*num_rows ] = x[ i+j*num_rows ] + alpha * p[ i+j*num_rows ] + omega * tmp; r[ i+j*num_rows ] = tmp - omega * t[ i+j*num_rows ]; } } } /** Purpose ------- Mergels multiple operations into one kernel: x = x + alpha * p + omega * s r = s - omega * t Arguments --------- @param[in] num_rows magma_int_t dimension m @param[in] num_cols magma_int_t dimension n @param[in] alpha float scalar @param[in] omega float scalar @param[in] p magmaFloat_ptr vector @param[in] s magmaFloat_ptr vector @param[in] t magmaFloat_ptr vector @param[in,out] x magmaFloat_ptr vector @param[in,out] r magmaFloat_ptr vector @param[in] queue magma_queue_t Queue to execute in. @ingroup magmasparse_sgegpuk ********************************************************************/ extern "C" magma_int_t magma_sbicgstab_3( magma_int_t num_rows, magma_int_t num_cols, float alpha, float omega, magmaFloat_ptr p, magmaFloat_ptr s, magmaFloat_ptr t, magmaFloat_ptr x, magmaFloat_ptr r, magma_queue_t queue ) { dim3 Bs( BLOCK_SIZE ); dim3 Gs( magma_ceildiv( num_rows, BLOCK_SIZE ) ); hipLaunchKernelGGL(( magma_sbicgstab_3_kernel), dim3(Gs), dim3(Bs), 0, queue->cuda_stream() , num_rows, num_cols, alpha, omega, p, s, t, x, r ); return MAGMA_SUCCESS; } __global__ void magma_sbicgstab_4_kernel( int num_rows, int num_cols, float alpha, float omega, float *y, float *z, float *s, float *t, float *x, float *r ) { int i = blockIdx.x * blockDim.x + threadIdx.x; if ( i<num_rows ) { for( int j=0; j<num_cols; j++ ){ x[ i+j*num_rows ] = x[ i+j*num_rows ] + alpha * y[ i+j*num_rows ] + omega * z[ i+j*num_rows ]; r[ i+j*num_rows ] = s[ i+j*num_rows ] - omega * t[ i+j*num_rows ]; } } } /** Purpose ------- Mergels multiple operations into one kernel: x = x + alpha * y + omega * z r = s - omega * t Arguments --------- @param[in] num_rows magma_int_t dimension m @param[in] num_cols magma_int_t dimension n @param[in] alpha float scalar @param[in] omega float scalar @param[in] y magmaFloat_ptr vector @param[in] z magmaFloat_ptr vector @param[in] s magmaFloat_ptr vector @param[in] t magmaFloat_ptr vector @param[in,out] x magmaFloat_ptr vector @param[in,out] r magmaFloat_ptr vector @param[in] queue magma_queue_t Queue to execute in. @ingroup magmasparse_sgegpuk ********************************************************************/ extern "C" magma_int_t magma_sbicgstab_4( magma_int_t num_rows, magma_int_t num_cols, float alpha, float omega, magmaFloat_ptr y, magmaFloat_ptr z, magmaFloat_ptr s, magmaFloat_ptr t, magmaFloat_ptr x, magmaFloat_ptr r, magma_queue_t queue ) { dim3 Bs( BLOCK_SIZE ); dim3 Gs( magma_ceildiv( num_rows, BLOCK_SIZE ) ); hipLaunchKernelGGL(( magma_sbicgstab_4_kernel), dim3(Gs), dim3(Bs), 0, queue->cuda_stream() , num_rows, num_cols, alpha, omega, y, z, s, t, x, r ); return MAGMA_SUCCESS; }
19b25e47d8eeb2d621c0dda1d21f1b5864ef009f.cu
/* -- MAGMA (version 2.5.4) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date October 2020 @generated from sparse/blas/zmergebicgstab.cu, normal z -> s, Thu Oct 8 23:05:47 2020 @author Hartwig Anzt */ #include "magmasparse_internal.h" #define BLOCK_SIZE 512 #define PRECISION_s // These routines merge multiple kernels from bicgstab into one. /* -------------------------------------------------------------------------- */ __global__ void magma_sbicgstab_1_kernel( int num_rows, int num_cols, float beta, float omega, float *r, float *v, float *p ) { int i = blockIdx.x * blockDim.x + threadIdx.x; if ( i<num_rows ) { for( int j=0; j<num_cols; j++ ){ p[ i+j*num_rows ] = r[ i+j*num_rows ] + beta * ( p[ i+j*num_rows ] - omega * v[ i+j*num_rows ] ); } } } /** Purpose ------- Mergels multiple operations into one kernel: p = r + beta * ( p - omega * v ) @param[in] num_rows magma_int_t dimension m @param[in] num_cols magma_int_t dimension n @param[in] beta float scalar @param[in] omega float scalar @param[in] r magmaFloat_ptr vector @param[in] v magmaFloat_ptr vector @param[in,out] p magmaFloat_ptr vector @param[in] queue magma_queue_t Queue to execute in. @ingroup magmasparse_sgegpuk ********************************************************************/ extern "C" magma_int_t magma_sbicgstab_1( magma_int_t num_rows, magma_int_t num_cols, float beta, float omega, magmaFloat_ptr r, magmaFloat_ptr v, magmaFloat_ptr p, magma_queue_t queue ) { dim3 Bs( BLOCK_SIZE ); dim3 Gs( magma_ceildiv( num_rows, BLOCK_SIZE ) ); magma_sbicgstab_1_kernel<<< Gs, Bs, 0, queue->cuda_stream() >>>( num_rows, num_cols, beta, omega, r, v, p ); return MAGMA_SUCCESS; } __global__ void magma_sbicgstab_2_kernel( int num_rows, int num_cols, float alpha, magmaFloat_ptr r, magmaFloat_ptr v, magmaFloat_ptr s ) { int i = blockIdx.x * blockDim.x + threadIdx.x; if ( i<num_rows ) { for( int j=0; j<num_cols; j++ ){ s[ i+j*num_rows ] = r[ i+j*num_rows ] - alpha * v[ i+j*num_rows ]; } } } /** Purpose ------- Mergels multiple operations into one kernel: s = r - alpha v Arguments --------- @param[in] num_rows magma_int_t dimension m @param[in] num_cols magma_int_t dimension n @param[in] alpha float scalar @param[in] r magmaFloat_ptr vector @param[in] v magmaFloat_ptr vector @param[in,out] s magmaFloat_ptr vector @param[in] queue magma_queue_t Queue to execute in. @ingroup magmasparse_sgegpuk ********************************************************************/ extern "C" magma_int_t magma_sbicgstab_2( magma_int_t num_rows, magma_int_t num_cols, float alpha, magmaFloat_ptr r, magmaFloat_ptr v, magmaFloat_ptr s, magma_queue_t queue ) { dim3 Bs( BLOCK_SIZE ); dim3 Gs( magma_ceildiv( num_rows, BLOCK_SIZE ) ); magma_sbicgstab_2_kernel<<< Gs, Bs, 0, queue->cuda_stream() >>>( num_rows, num_cols, alpha, r, v, s ); return MAGMA_SUCCESS; } __global__ void magma_sbicgstab_3_kernel( int num_rows, int num_cols, float alpha, float omega, float *p, float *s, float *t, float *x, float *r ) { int i = blockIdx.x * blockDim.x + threadIdx.x; if ( i<num_rows ) { for( int j=0; j<num_cols; j++ ){ float tmp = s[ i+j*num_rows ]; x[ i+j*num_rows ] = x[ i+j*num_rows ] + alpha * p[ i+j*num_rows ] + omega * tmp; r[ i+j*num_rows ] = tmp - omega * t[ i+j*num_rows ]; } } } /** Purpose ------- Mergels multiple operations into one kernel: x = x + alpha * p + omega * s r = s - omega * t Arguments --------- @param[in] num_rows magma_int_t dimension m @param[in] num_cols magma_int_t dimension n @param[in] alpha float scalar @param[in] omega float scalar @param[in] p magmaFloat_ptr vector @param[in] s magmaFloat_ptr vector @param[in] t magmaFloat_ptr vector @param[in,out] x magmaFloat_ptr vector @param[in,out] r magmaFloat_ptr vector @param[in] queue magma_queue_t Queue to execute in. @ingroup magmasparse_sgegpuk ********************************************************************/ extern "C" magma_int_t magma_sbicgstab_3( magma_int_t num_rows, magma_int_t num_cols, float alpha, float omega, magmaFloat_ptr p, magmaFloat_ptr s, magmaFloat_ptr t, magmaFloat_ptr x, magmaFloat_ptr r, magma_queue_t queue ) { dim3 Bs( BLOCK_SIZE ); dim3 Gs( magma_ceildiv( num_rows, BLOCK_SIZE ) ); magma_sbicgstab_3_kernel<<< Gs, Bs, 0, queue->cuda_stream() >>>( num_rows, num_cols, alpha, omega, p, s, t, x, r ); return MAGMA_SUCCESS; } __global__ void magma_sbicgstab_4_kernel( int num_rows, int num_cols, float alpha, float omega, float *y, float *z, float *s, float *t, float *x, float *r ) { int i = blockIdx.x * blockDim.x + threadIdx.x; if ( i<num_rows ) { for( int j=0; j<num_cols; j++ ){ x[ i+j*num_rows ] = x[ i+j*num_rows ] + alpha * y[ i+j*num_rows ] + omega * z[ i+j*num_rows ]; r[ i+j*num_rows ] = s[ i+j*num_rows ] - omega * t[ i+j*num_rows ]; } } } /** Purpose ------- Mergels multiple operations into one kernel: x = x + alpha * y + omega * z r = s - omega * t Arguments --------- @param[in] num_rows magma_int_t dimension m @param[in] num_cols magma_int_t dimension n @param[in] alpha float scalar @param[in] omega float scalar @param[in] y magmaFloat_ptr vector @param[in] z magmaFloat_ptr vector @param[in] s magmaFloat_ptr vector @param[in] t magmaFloat_ptr vector @param[in,out] x magmaFloat_ptr vector @param[in,out] r magmaFloat_ptr vector @param[in] queue magma_queue_t Queue to execute in. @ingroup magmasparse_sgegpuk ********************************************************************/ extern "C" magma_int_t magma_sbicgstab_4( magma_int_t num_rows, magma_int_t num_cols, float alpha, float omega, magmaFloat_ptr y, magmaFloat_ptr z, magmaFloat_ptr s, magmaFloat_ptr t, magmaFloat_ptr x, magmaFloat_ptr r, magma_queue_t queue ) { dim3 Bs( BLOCK_SIZE ); dim3 Gs( magma_ceildiv( num_rows, BLOCK_SIZE ) ); magma_sbicgstab_4_kernel<<< Gs, Bs, 0, queue->cuda_stream() >>>( num_rows, num_cols, alpha, omega, y, z, s, t, x, r ); return MAGMA_SUCCESS; }
0397a1042d5a0d555307af251ab9014bb07e005b.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <iostream> #include <hip/hip_runtime_api.h> //#include <cutil.h> #include <hip/hip_runtime.h> #include <string> #define SHARED_MEM_ELEMENTS 1024 #define GLOBAL_MEM_ELEMENTS 196608 int num_blocks; int num_threads_per_block; int num_iterations; int divergence; float* h_A; float* h_B; float* h_C; float* h_res; float* d_A; float* d_B; float* d_C; float* d_res; __global__ void init_memory (unsigned long long ** my_ptr_array, unsigned long long * my_array, int stride, int num_blocks_k, int num_threads_per_block_k) { int block_id; int warp_id; int i; int index; int tid = blockDim.x * blockIdx.x + threadIdx.x; void **ptr_array = (void **)my_ptr_array; unsigned long long *array = (unsigned long long *)my_array; if (tid == 0) { // int elements_per_block = GLOBAL_MEM_ELEMENTS / num_blocks_k; int num_warps_per_block = num_threads_per_block_k / 32; //int elements_per_warp = elements_per_block / num_warps_per_block; int elements_per_warp = GLOBAL_MEM_ELEMENTS / num_warps_per_block; // for (block_id = 0; block_id < num_blocks_k; block_id++) { for (warp_id = 0; warp_id < num_warps_per_block; warp_id++) { for (i = 0; i < elements_per_warp; i++) { //index = (block_id * elements_per_block) + (warp_id * elements_per_warp); index = (warp_id * elements_per_warp); ptr_array[index + i] = (void*)&array[(index + ((i + 48) % elements_per_warp))]; } } /* for (i = 0; i < GLOBAL_MEM_ELEMENTS; i++) { ptr_array[i] = (void*)&array[(i + 32)%GLOBAL_MEM_ELEMENTS]; } */ for (i = 0; i < GLOBAL_MEM_ELEMENTS; i++) { //array[i] = (unsigned long long)ptr_array[(i+stride)%GLOBAL_MEM_ELEMENTS]; array[i] = (unsigned long long)ptr_array[i]; } } __syncthreads(); } __global__ void shared_latency (unsigned long long ** my_ptr_array, unsigned long long * my_array, int array_length, int iterations, unsigned long long * duration, int stride, int divergence, int num_blocks_k, int num_threads_per_block_k) { // unsigned long long int start_time, end_time; unsigned long long int sum_time = 0; int i, k; int tid = blockDim.x * blockIdx.x + threadIdx.x; int block_id = blockIdx.x; int warp_id = threadIdx.x / 32; int warp_thread_id = threadIdx.x % 32; // int elements_per_block = GLOBAL_MEM_ELEMENTS / num_blocks_k; int num_warps_per_block = num_threads_per_block_k / 32; // int elements_per_warp = elements_per_block / num_warps_per_block; int elements_per_warp = GLOBAL_MEM_ELEMENTS / num_warps_per_block; //int index1 = (block_id * elements_per_block) + (warp_id * elements_per_warp) + warp_thread_id; int index1 = (warp_id * elements_per_warp) + warp_thread_id; void **ptr_array = (void **)my_ptr_array; unsigned long long int *array = (unsigned long long int *)my_array; void **tmp_ptr; //tmp_ptr = (void *)sdata; //tmp_ptr = (void **)(&(ptr_array[(threadIdx.x * stride)%GLOBAL_MEM_ELEMENTS])); //tmp_ptr = (void **)(&(ptr_array[(tid * stride)%GLOBAL_MEM_ELEMENTS])); //tmp_ptr = (void **)(&(ptr_array[index1])); tmp_ptr = (void **)(&(array[index1])); double f1, f2, f3; f1 = 1.1; f2 = 2.5; if (warp_thread_id < divergence) { /* __asm volatile ( ".reg .f32 %r14;\n\t" "mov.f32 %r14, 2.2;\n\t" ); */ for (int l = 0; l < iterations; l++) { f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); } } // __syncthreads(); // if ((blockDim.x * blockIdx.x + threadIdx.x) == 0) duration[tid] = (unsigned long long)(*tmp_ptr) + (f1 * tid); // __syncthreads(); } void usage() { std::cout << "Usage ./binary <num_blocks> <num_threads_per_block> <iterations>" "threads active per warp" << std::endl; } void parametric_measure_shared(int N, int iterations, int stride) { hipProfilerStop(); int i; unsigned long long int * h_a; unsigned long long int * d_a; unsigned long long ** h_ptr_a; unsigned long long ** d_ptr_a; unsigned long long * duration; unsigned long long * latency; hipError_t error_id; /* allocate array on CPU */ h_a = (unsigned long long *)malloc(sizeof(unsigned long long int) * N); h_ptr_a = (unsigned long long **)malloc(sizeof(unsigned long long int*)*N); latency = (unsigned long long *)malloc(sizeof(unsigned long long) * num_threads_per_block * num_blocks); /* initialize array elements on CPU */ for (i = 0; i < N; i++) { h_ptr_a[i] = (unsigned long long *)&h_a[i]; } for (i = 0; i < N; i++) { h_a[i] = (unsigned long long)h_ptr_a[(i + 1 + stride) % N]; } /* allocate arrays on GPU */ hipMalloc ((void **) &d_a, sizeof(unsigned long long int) * N ); hipMalloc ((void **) &d_ptr_a, sizeof(unsigned long long int*) * N ); hipMalloc ((void **) &duration, sizeof(unsigned long long) * num_threads_per_block * num_blocks); hipDeviceSynchronize (); error_id = hipGetLastError(); if (error_id != hipSuccess) { printf("Error 1 is %s\n", hipGetErrorString(error_id)); } /* copy array elements from CPU to GPU */ hipMemcpy((void *)d_a, (void *)h_a, sizeof(unsigned long long int) * N, hipMemcpyHostToDevice); hipMemcpy((void *)d_ptr_a, (void *)h_ptr_a, sizeof(unsigned long long int *) * N, hipMemcpyHostToDevice); hipMemcpy((void *)duration, (void *)latency, sizeof(unsigned long long) * num_threads_per_block * num_blocks, hipMemcpyHostToDevice); hipDeviceSynchronize (); error_id = hipGetLastError(); if (error_id != hipSuccess) { printf("Error 2 is %s\n", hipGetErrorString(error_id)); } hipLaunchKernelGGL(( init_memory) , dim3(1), dim3(1), 0, 0, d_ptr_a, d_a, stride, num_blocks, num_threads_per_block); hipDeviceSynchronize(); /* launch kernel*/ //dim3 Db = dim3(13); //dim3 Dg = dim3(768,1,1); //printf("Launch kernel with parameters: %d, N: %d, stride: %d\n", iterations, N, stride); // int sharedMemSize = sizeof(unsigned long long int) * N ; hipEvent_t start, stop; float time; hipEventCreate(&start); hipEventCreate(&stop); hipEventRecord(start, 0); hipProfilerStart(); hipFuncSetCacheConfig(shared_latency, hipFuncCachePreferL1); //shared_latency <<<Dg, Db, sharedMemSize>>>(d_a, N, iterations, duration); //shared_latency <<<num_blocks, num_threads_per_block, sharedMemSize>>>(d_a, N, num_iterations, duration, stride, divergence); hipLaunchKernelGGL(( shared_latency) , dim3(num_blocks), dim3(num_threads_per_block), 0, 0, d_ptr_a, d_a, N, num_iterations, duration, stride, divergence, num_blocks, num_threads_per_block); hipDeviceSynchronize(); ///hipDeviceSynchronize (); hipProfilerStop(); hipEventRecord(stop, 0); hipEventSynchronize(stop); hipEventElapsedTime(&time, start, stop); error_id = hipGetLastError(); if (error_id != hipSuccess) { printf("Error 3 is %s\n", hipGetErrorString(error_id)); } /* copy results from GPU to CPU */ hipMemcpy((void *)h_a, (void *)d_a, sizeof(unsigned long long int) * N, hipMemcpyDeviceToHost); hipMemcpy((void *)latency, (void *)duration, sizeof(unsigned long long) * num_threads_per_block * num_blocks, hipMemcpyDeviceToHost); hipDeviceSynchronize (); /* print results*/ unsigned long long max_dur = latency[0]; unsigned long long min_dur = latency[0]; unsigned long long avg_lat = latency[0]; for (int i = 1; i < num_threads_per_block * num_blocks; i++) { avg_lat += latency[i]; if (latency[i] > max_dur) { max_dur = latency[i]; } else if (latency[i] < min_dur) { min_dur = latency[i]; } } // printf(" %d, %f, %f, %f, %f\n",stride,(double)(avg_lat/(num_threads_per_block * num_blocks * 256.0 *num_iterations)), (double)(min_dur/(256.0 * num_iterations)), (double)(max_dur/(256.0 * num_iterations)), time); printf("%f\n", time); /* free memory on GPU */ hipFree(d_a); hipFree(d_ptr_a); hipFree(duration); hipDeviceSynchronize (); /*free memory on CPU */ free(h_a); free(h_ptr_a); free(latency); } int main(int argc, char **argv) { int N; if (argc != 6) { usage(); exit(1); } num_blocks = atoi(argv[1]); num_threads_per_block = atoi(argv[2]); num_iterations = atoi(argv[3]); divergence = atoi(argv[4]); int stride = atoi(argv[5]); N = GLOBAL_MEM_ELEMENTS; parametric_measure_shared(N, 10, stride); return 0; }
0397a1042d5a0d555307af251ab9014bb07e005b.cu
#include <stdio.h> #include <iostream> #include <cuda_profiler_api.h> //#include <cutil.h> #include <cuda_runtime.h> #include <string> #define SHARED_MEM_ELEMENTS 1024 #define GLOBAL_MEM_ELEMENTS 196608 int num_blocks; int num_threads_per_block; int num_iterations; int divergence; float* h_A; float* h_B; float* h_C; float* h_res; float* d_A; float* d_B; float* d_C; float* d_res; __global__ void init_memory (unsigned long long ** my_ptr_array, unsigned long long * my_array, int stride, int num_blocks_k, int num_threads_per_block_k) { int block_id; int warp_id; int i; int index; int tid = blockDim.x * blockIdx.x + threadIdx.x; void **ptr_array = (void **)my_ptr_array; unsigned long long *array = (unsigned long long *)my_array; if (tid == 0) { // int elements_per_block = GLOBAL_MEM_ELEMENTS / num_blocks_k; int num_warps_per_block = num_threads_per_block_k / 32; //int elements_per_warp = elements_per_block / num_warps_per_block; int elements_per_warp = GLOBAL_MEM_ELEMENTS / num_warps_per_block; // for (block_id = 0; block_id < num_blocks_k; block_id++) { for (warp_id = 0; warp_id < num_warps_per_block; warp_id++) { for (i = 0; i < elements_per_warp; i++) { //index = (block_id * elements_per_block) + (warp_id * elements_per_warp); index = (warp_id * elements_per_warp); ptr_array[index + i] = (void*)&array[(index + ((i + 48) % elements_per_warp))]; } } /* for (i = 0; i < GLOBAL_MEM_ELEMENTS; i++) { ptr_array[i] = (void*)&array[(i + 32)%GLOBAL_MEM_ELEMENTS]; } */ for (i = 0; i < GLOBAL_MEM_ELEMENTS; i++) { //array[i] = (unsigned long long)ptr_array[(i+stride)%GLOBAL_MEM_ELEMENTS]; array[i] = (unsigned long long)ptr_array[i]; } } __syncthreads(); } __global__ void shared_latency (unsigned long long ** my_ptr_array, unsigned long long * my_array, int array_length, int iterations, unsigned long long * duration, int stride, int divergence, int num_blocks_k, int num_threads_per_block_k) { // unsigned long long int start_time, end_time; unsigned long long int sum_time = 0; int i, k; int tid = blockDim.x * blockIdx.x + threadIdx.x; int block_id = blockIdx.x; int warp_id = threadIdx.x / 32; int warp_thread_id = threadIdx.x % 32; // int elements_per_block = GLOBAL_MEM_ELEMENTS / num_blocks_k; int num_warps_per_block = num_threads_per_block_k / 32; // int elements_per_warp = elements_per_block / num_warps_per_block; int elements_per_warp = GLOBAL_MEM_ELEMENTS / num_warps_per_block; //int index1 = (block_id * elements_per_block) + (warp_id * elements_per_warp) + warp_thread_id; int index1 = (warp_id * elements_per_warp) + warp_thread_id; void **ptr_array = (void **)my_ptr_array; unsigned long long int *array = (unsigned long long int *)my_array; void **tmp_ptr; //tmp_ptr = (void *)sdata; //tmp_ptr = (void **)(&(ptr_array[(threadIdx.x * stride)%GLOBAL_MEM_ELEMENTS])); //tmp_ptr = (void **)(&(ptr_array[(tid * stride)%GLOBAL_MEM_ELEMENTS])); //tmp_ptr = (void **)(&(ptr_array[index1])); tmp_ptr = (void **)(&(array[index1])); double f1, f2, f3; f1 = 1.1; f2 = 2.5; if (warp_thread_id < divergence) { /* __asm volatile ( ".reg .f32 %r14;\n\t" "mov.f32 %r14, 2.2;\n\t" ); */ for (int l = 0; l < iterations; l++) { f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); } } // __syncthreads(); // if ((blockDim.x * blockIdx.x + threadIdx.x) == 0) duration[tid] = (unsigned long long)(*tmp_ptr) + (f1 * tid); // __syncthreads(); } void usage() { std::cout << "Usage ./binary <num_blocks> <num_threads_per_block> <iterations>" "threads active per warp" << std::endl; } void parametric_measure_shared(int N, int iterations, int stride) { cudaProfilerStop(); int i; unsigned long long int * h_a; unsigned long long int * d_a; unsigned long long ** h_ptr_a; unsigned long long ** d_ptr_a; unsigned long long * duration; unsigned long long * latency; cudaError_t error_id; /* allocate array on CPU */ h_a = (unsigned long long *)malloc(sizeof(unsigned long long int) * N); h_ptr_a = (unsigned long long **)malloc(sizeof(unsigned long long int*)*N); latency = (unsigned long long *)malloc(sizeof(unsigned long long) * num_threads_per_block * num_blocks); /* initialize array elements on CPU */ for (i = 0; i < N; i++) { h_ptr_a[i] = (unsigned long long *)&h_a[i]; } for (i = 0; i < N; i++) { h_a[i] = (unsigned long long)h_ptr_a[(i + 1 + stride) % N]; } /* allocate arrays on GPU */ cudaMalloc ((void **) &d_a, sizeof(unsigned long long int) * N ); cudaMalloc ((void **) &d_ptr_a, sizeof(unsigned long long int*) * N ); cudaMalloc ((void **) &duration, sizeof(unsigned long long) * num_threads_per_block * num_blocks); cudaThreadSynchronize (); error_id = cudaGetLastError(); if (error_id != cudaSuccess) { printf("Error 1 is %s\n", cudaGetErrorString(error_id)); } /* copy array elements from CPU to GPU */ cudaMemcpy((void *)d_a, (void *)h_a, sizeof(unsigned long long int) * N, cudaMemcpyHostToDevice); cudaMemcpy((void *)d_ptr_a, (void *)h_ptr_a, sizeof(unsigned long long int *) * N, cudaMemcpyHostToDevice); cudaMemcpy((void *)duration, (void *)latency, sizeof(unsigned long long) * num_threads_per_block * num_blocks, cudaMemcpyHostToDevice); cudaThreadSynchronize (); error_id = cudaGetLastError(); if (error_id != cudaSuccess) { printf("Error 2 is %s\n", cudaGetErrorString(error_id)); } init_memory <<<1, 1>>>(d_ptr_a, d_a, stride, num_blocks, num_threads_per_block); cudaDeviceSynchronize(); /* launch kernel*/ //dim3 Db = dim3(13); //dim3 Dg = dim3(768,1,1); //printf("Launch kernel with parameters: %d, N: %d, stride: %d\n", iterations, N, stride); // int sharedMemSize = sizeof(unsigned long long int) * N ; cudaEvent_t start, stop; float time; cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start, 0); cudaProfilerStart(); cudaFuncSetCacheConfig(shared_latency, cudaFuncCachePreferL1); //shared_latency <<<Dg, Db, sharedMemSize>>>(d_a, N, iterations, duration); //shared_latency <<<num_blocks, num_threads_per_block, sharedMemSize>>>(d_a, N, num_iterations, duration, stride, divergence); shared_latency <<<num_blocks, num_threads_per_block>>>(d_ptr_a, d_a, N, num_iterations, duration, stride, divergence, num_blocks, num_threads_per_block); cudaDeviceSynchronize(); ///cudaThreadSynchronize (); cudaProfilerStop(); cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&time, start, stop); error_id = cudaGetLastError(); if (error_id != cudaSuccess) { printf("Error 3 is %s\n", cudaGetErrorString(error_id)); } /* copy results from GPU to CPU */ cudaMemcpy((void *)h_a, (void *)d_a, sizeof(unsigned long long int) * N, cudaMemcpyDeviceToHost); cudaMemcpy((void *)latency, (void *)duration, sizeof(unsigned long long) * num_threads_per_block * num_blocks, cudaMemcpyDeviceToHost); cudaThreadSynchronize (); /* print results*/ unsigned long long max_dur = latency[0]; unsigned long long min_dur = latency[0]; unsigned long long avg_lat = latency[0]; for (int i = 1; i < num_threads_per_block * num_blocks; i++) { avg_lat += latency[i]; if (latency[i] > max_dur) { max_dur = latency[i]; } else if (latency[i] < min_dur) { min_dur = latency[i]; } } // printf(" %d, %f, %f, %f, %f\n",stride,(double)(avg_lat/(num_threads_per_block * num_blocks * 256.0 *num_iterations)), (double)(min_dur/(256.0 * num_iterations)), (double)(max_dur/(256.0 * num_iterations)), time); printf("%f\n", time); /* free memory on GPU */ cudaFree(d_a); cudaFree(d_ptr_a); cudaFree(duration); cudaThreadSynchronize (); /*free memory on CPU */ free(h_a); free(h_ptr_a); free(latency); } int main(int argc, char **argv) { int N; if (argc != 6) { usage(); exit(1); } num_blocks = atoi(argv[1]); num_threads_per_block = atoi(argv[2]); num_iterations = atoi(argv[3]); divergence = atoi(argv[4]); int stride = atoi(argv[5]); N = GLOBAL_MEM_ELEMENTS; parametric_measure_shared(N, 10, stride); return 0; }
0a349e908f36e125dfe7ec55896c24941d5dffb5.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved. // // NVIDIA CORPORATION and its licensors retain all intellectual property // and proprietary rights in and to this software, related documentation // and any modifications thereto. Any use, reproduction, disclosure or // distribution of this software and related documentation without an express // license agreement from NVIDIA CORPORATION is strictly prohibited. #include <c10/util/Half.h> #include "filtered_lrelu.h" #include <cstdint> //------------------------------------------------------------------------ // Helpers. enum // Filter modes. { MODE_SUSD = 0, // Separable upsampling, separable downsampling. MODE_FUSD = 1, // Full upsampling, separable downsampling. MODE_SUFD = 2, // Separable upsampling, full downsampling. MODE_FUFD = 3, // Full upsampling, full downsampling. }; template <class T> struct InternalType; template <> struct InternalType<double> { typedef double scalar_t; typedef double2 vec2_t; typedef double4 vec4_t; __device__ __forceinline__ static vec2_t zero_vec2(void) { return make_double2(0, 0); } __device__ __forceinline__ static vec4_t zero_vec4(void) { return make_double4(0, 0, 0, 0); } __device__ __forceinline__ static double clamp(double x, double c) { return fmin(fmax(x, -c), c); } }; template <> struct InternalType<float> { typedef float scalar_t; typedef float2 vec2_t; typedef float4 vec4_t; __device__ __forceinline__ static vec2_t zero_vec2(void) { return make_float2(0, 0); } __device__ __forceinline__ static vec4_t zero_vec4(void) { return make_float4(0, 0, 0, 0); } __device__ __forceinline__ static float clamp(float x, float c) { return fminf(fmaxf(x, -c), c); } }; template <> struct InternalType<c10::Half> { typedef float scalar_t; typedef float2 vec2_t; typedef float4 vec4_t; __device__ __forceinline__ static vec2_t zero_vec2(void) { return make_float2(0, 0); } __device__ __forceinline__ static vec4_t zero_vec4(void) { return make_float4(0, 0, 0, 0); } __device__ __forceinline__ static float clamp(float x, float c) { return fminf(fmaxf(x, -c), c); } }; #define MIN(A, B) ((A) < (B) ? (A) : (B)) #define MAX(A, B) ((A) > (B) ? (A) : (B)) #define CEIL_DIV(A, B) (((B)==1) ? (A) : \ ((B)==2) ? ((int)((A)+1) >> 1) : \ ((B)==4) ? ((int)((A)+3) >> 2) : \ (((A) + ((A) > 0 ? (B) - 1 : 0)) / (B))) // This works only up to blocks of size 256 x 256 and for all N that are powers of two. template <int N> __device__ __forceinline__ void fast_div_mod(int& x, int& y, unsigned int i) { if ((N & (N-1)) && N <= 256) y = (i * ((1<<24)/N + 1)) >> 24; // Assumes N <= 256, i < N*256. else y = i/N; x = i - y*N; } // Type cast stride before reading it. template <class T> __device__ __forceinline__ T get_stride(const int64_t& x) { return *reinterpret_cast<const T*>(&x); } //------------------------------------------------------------------------ // Filters, setup kernel, copying function. #define MAX_FILTER_SIZE 32 // Combined up/down filter buffers so that transfer can be done with one copy. __device__ float g_fbuf[2 * MAX_FILTER_SIZE * MAX_FILTER_SIZE]; // Filters in global memory, written by setup kernel. __device__ __constant__ float c_fbuf[2 * MAX_FILTER_SIZE * MAX_FILTER_SIZE]; // Filters in constant memory, read by main kernel. // Accessors to combined buffers to index up/down filters individually. #define c_fu (c_fbuf) #define c_fd (c_fbuf + MAX_FILTER_SIZE * MAX_FILTER_SIZE) #define g_fu (g_fbuf) #define g_fd (g_fbuf + MAX_FILTER_SIZE * MAX_FILTER_SIZE) // Set up filters into global memory buffer. static __global__ void setup_filters_kernel(filtered_lrelu_kernel_params p) { for (int idx = threadIdx.x; idx < MAX_FILTER_SIZE * MAX_FILTER_SIZE; idx += blockDim.x) { int x, y; fast_div_mod<MAX_FILTER_SIZE>(x, y, idx); int fu_x = p.flip ? x : (p.fuShape.x - 1 - x); int fu_y = p.flip ? y : (p.fuShape.y - 1 - y); if (p.fuShape.y > 0) g_fu[idx] = (x >= p.fuShape.x || y >= p.fuShape.y) ? 0.0f : p.fu[fu_x * p.fuStride.x + fu_y * p.fuStride.y]; else g_fu[idx] = (x >= p.fuShape.x || y > 0) ? 0.0f : p.fu[fu_x * p.fuStride.x]; int fd_x = p.flip ? x : (p.fdShape.x - 1 - x); int fd_y = p.flip ? y : (p.fdShape.y - 1 - y); if (p.fdShape.y > 0) g_fd[idx] = (x >= p.fdShape.x || y >= p.fdShape.y) ? 0.0f : p.fd[fd_x * p.fdStride.x + fd_y * p.fdStride.y]; else g_fd[idx] = (x >= p.fdShape.x || y > 0) ? 0.0f : p.fd[fd_x * p.fdStride.x]; } } // Host function to copy filters written by setup kernel into constant buffer for main kernel. template <bool, bool> static hipError_t copy_filters(hipStream_t stream) { void* src = 0; hipError_t err = hipGetSymbolAddress(&src, g_fbuf); if (err) return err; return hipMemcpyToSymbolAsync(c_fbuf, src, 2 * MAX_FILTER_SIZE * MAX_FILTER_SIZE * sizeof(float), 0, hipMemcpyDeviceToDevice, stream); } //------------------------------------------------------------------------ // Coordinate spaces: // - Relative to input tensor: inX, inY, tileInX, tileInY // - Relative to input tile: relInX, relInY, tileInW, tileInH // - Relative to upsampled tile: relUpX, relUpY, tileUpW, tileUpH // - Relative to output tile: relOutX, relOutY, tileOutW, tileOutH // - Relative to output tensor: outX, outY, tileOutX, tileOutY // // Relationships between coordinate spaces: // - inX = tileInX + relInX // - inY = tileInY + relInY // - relUpX = relInX * up + phaseInX // - relUpY = relInY * up + phaseInY // - relUpX = relOutX * down // - relUpY = relOutY * down // - outX = tileOutX + relOutX // - outY = tileOutY + relOutY extern __shared__ char s_buf_raw[]; // When sharedKB <= 48, allocate shared memory statically inside the kernel, otherwise use the externally allocated shared memory buffer. template <class T, class index_t, int sharedKB, bool signWrite, bool signRead, int filterMode, int up, int fuSize, int down, int fdSize, int tileOutW, int tileOutH, int threadsPerBlock, bool enableXrep, bool enableWriteSkip> static __global__ void filtered_lrelu_kernel(filtered_lrelu_kernel_params p) { // Check that we don't try to support non-existing filter modes. static_assert(up == 1 || up == 2 || up == 4, "only up=1, up=2, up=4 scales supported"); static_assert(down == 1 || down == 2 || down == 4, "only down=1, down=2, down=4 scales supported"); static_assert(fuSize >= up, "upsampling filter size must be at least upsampling factor"); static_assert(fdSize >= down, "downsampling filter size must be at least downsampling factor"); static_assert(fuSize % up == 0, "upsampling filter size must be divisible with upsampling factor"); static_assert(fdSize % down == 0, "downsampling filter size must be divisible with downsampling factor"); static_assert(fuSize <= MAX_FILTER_SIZE && fdSize <= MAX_FILTER_SIZE, "filter size greater than MAX_FILTER_SIZE"); static_assert(up != 1 || (fuSize == 1 && (filterMode == MODE_FUFD || filterMode == MODE_FUSD)), "up=1 supported only for 1x1 full filters"); static_assert(down != 1 || (fdSize == 1 && (filterMode == MODE_FUFD || filterMode == MODE_SUFD)), "down=1 supported only for 1x1 full filters"); static_assert(!(up == 4 && (filterMode == MODE_FUFD || filterMode == MODE_FUSD)), "full filters not supported for up=4"); static_assert(!(down == 4 && (filterMode == MODE_FUFD || filterMode == MODE_SUFD)), "full filters not supported for down=4"); // Static definitions. typedef typename InternalType<T>::scalar_t scalar_t; typedef typename InternalType<T>::vec2_t vec2_t; typedef typename InternalType<T>::vec4_t vec4_t; const int tileUpW = (tileOutW * down + (fdSize - 1) - (down - 1) + 3) & ~3; // Upsampled tile width, rounded up to multiple of 4. const int tileUpH = tileOutH * down + (fdSize - 1) - (down - 1); // Upsampled tile height. const int tileInW = CEIL_DIV(tileUpW + (fuSize - 1), up); // Input tile width. const int tileInH = CEIL_DIV(tileUpH + (fuSize - 1), up); // Input tile height. const int tileUpH_up = CEIL_DIV(tileUpH, up) * up; // Upsampled tile height rounded up to a multiple of up. const int tileInH_up = CEIL_DIV(tileUpH_up + (fuSize - 1), up); // For allocations only, to avoid shared memory read overruns with up=2 and up=4. // Merge 1x1 downsampling into last upsampling step for upf1 and ups2. const bool downInline = (down == 1) && ((up == 1 && filterMode == MODE_FUFD) || (up == 2 && filterMode == MODE_SUFD)); // Sizes of logical buffers. const int szIn = tileInH_up * tileInW; const int szUpX = tileInH_up * tileUpW; const int szUpXY = downInline ? 0 : (tileUpH * tileUpW); const int szDownX = tileUpH * tileOutW; // Sizes for shared memory arrays. const int s_buf0_size_base = (filterMode == MODE_SUSD) ? MAX(szIn, szUpXY) : (filterMode == MODE_FUSD) ? MAX(szIn, szDownX) : (filterMode == MODE_SUFD) ? MAX(szIn, szUpXY) : (filterMode == MODE_FUFD) ? szIn : -1; const int s_buf1_size_base = (filterMode == MODE_SUSD) ? MAX(szUpX, szDownX) : (filterMode == MODE_FUSD) ? szUpXY : (filterMode == MODE_SUFD) ? szUpX : (filterMode == MODE_FUFD) ? szUpXY : -1; // Ensure U128 alignment. const int s_buf0_size = (s_buf0_size_base + 3) & ~3; const int s_buf1_size = (s_buf1_size_base + 3) & ~3; // Check at compile time that we don't use too much shared memory. static_assert((s_buf0_size + s_buf1_size) * sizeof(scalar_t) <= (sharedKB << 10), "shared memory overflow"); // Declare shared memory arrays. scalar_t* s_buf0; scalar_t* s_buf1; if (sharedKB <= 48) { // Allocate shared memory arrays here. __shared__ scalar_t s_buf0_st[(sharedKB > 48) ? (1<<24) : (s_buf0_size + s_buf1_size)]; // Prevent launching if this isn't optimized away when unused. s_buf0 = s_buf0_st; s_buf1 = s_buf0 + s_buf0_size; } else { // Use the dynamically allocated shared memory array. s_buf0 = (scalar_t*)s_buf_raw; s_buf1 = s_buf0 + s_buf0_size; } // Pointers to the buffers. scalar_t* s_tileIn; // Input tile: [relInX * tileInH + relInY] scalar_t* s_tileUpX; // After horizontal upsampling: [relInY * tileUpW + relUpX] scalar_t* s_tileUpXY; // After upsampling: [relUpY * tileUpW + relUpX] scalar_t* s_tileDownX; // After horizontal downsampling: [relUpY * tileOutW + relOutX] if (filterMode == MODE_SUSD) { s_tileIn = s_buf0; s_tileUpX = s_buf1; s_tileUpXY = s_buf0; s_tileDownX = s_buf1; } else if (filterMode == MODE_FUSD) { s_tileIn = s_buf0; s_tileUpXY = s_buf1; s_tileDownX = s_buf0; } else if (filterMode == MODE_SUFD) { s_tileIn = s_buf0; s_tileUpX = s_buf1; s_tileUpXY = s_buf0; } else if (filterMode == MODE_FUFD) { s_tileIn = s_buf0; s_tileUpXY = s_buf1; } // Allow large grids in z direction via per-launch offset. int channelIdx = blockIdx.z + p.blockZofs; int batchIdx = channelIdx / p.yShape.z; channelIdx -= batchIdx * p.yShape.z; // Offset to output feature map. In bytes. index_t mapOfsOut = channelIdx * get_stride<index_t>(p.yStride.z) + batchIdx * get_stride<index_t>(p.yStride.w); // Sign shift amount. uint32_t signXo = ((threadIdx.x + p.sOfs.x) << 1) & 6; // Inner tile loop. #pragma unroll 1 for (int tileIdx = 0; !enableXrep || (tileIdx < MIN(p.tilesXrep, p.tilesXdim - p.tilesXrep * blockIdx.y)); tileIdx++) { // Locate output tile. int tileX = enableXrep ? blockIdx.y * p.tilesXrep + tileIdx : blockIdx.x; int tileOutX = tileX * tileOutW; int tileOutY = (enableXrep ? blockIdx.x : blockIdx.y) * tileOutH; // Locate input tile. int tmpX = tileOutX * down - p.pad0.x; int tmpY = tileOutY * down - p.pad0.y; int tileInX = CEIL_DIV(tmpX, up); int tileInY = CEIL_DIV(tmpY, up); const int phaseInX = tileInX * up - tmpX; const int phaseInY = tileInY * up - tmpY; // Extra sync if input and output buffers are the same and we are not on first tile. if (enableXrep && tileIdx > 0 && (filterMode == MODE_FUSD || (filterMode == MODE_SUFD && !downInline) || (filterMode == MODE_FUFD && downInline))) __syncthreads(); // Load input tile & apply bias. Unrolled. scalar_t b = (scalar_t)*(const T*)((const char*)p.b + (channelIdx * get_stride<index_t>(p.bStride))); index_t mapOfsIn = channelIdx * get_stride<index_t>(p.xStride.z) + batchIdx * get_stride<index_t>(p.xStride.w); int idx = threadIdx.x; const int loopCountIN = CEIL_DIV(tileInW * tileInH, threadsPerBlock); #pragma unroll for (int loop = 0; loop < loopCountIN; loop++) { int relInX, relInY; fast_div_mod<tileInW>(relInX, relInY, idx); int inX = tileInX + relInX; int inY = tileInY + relInY; scalar_t v = 0; if ((uint32_t)inX < p.xShape.x && (uint32_t)inY < p.xShape.y) v = (scalar_t)*((const T*)((const char*)p.x + (inX * get_stride<index_t>(p.xStride.x) + inY * get_stride<index_t>(p.xStride.y) + mapOfsIn))) + b; bool skip = (loop == loopCountIN-1) && (idx >= tileInW * tileInH); if (!skip) s_tileIn[idx] = v; idx += threadsPerBlock; } if (filterMode == MODE_SUSD || filterMode == MODE_SUFD) // Separable upsampling filter. { // Horizontal upsampling. __syncthreads(); if (up == 4) { for (int idx = threadIdx.x*up; idx < tileUpW * tileInH; idx += blockDim.x*up) { int relUpX0, relInY; fast_div_mod<tileUpW>(relUpX0, relInY, idx); int relInX0 = relUpX0 / up; int src0 = relInX0 + tileInW * relInY; int dst = relInY * tileUpW + relUpX0; vec4_t v = InternalType<T>::zero_vec4(); scalar_t a = s_tileIn[src0]; if (phaseInX == 0) { #pragma unroll for (int step = 0; step < fuSize / up; step++) { v.x += a * (scalar_t)c_fu[step * up + 0]; a = s_tileIn[src0 + step + 1]; v.y += a * (scalar_t)c_fu[step * up + 3]; v.z += a * (scalar_t)c_fu[step * up + 2]; v.w += a * (scalar_t)c_fu[step * up + 1]; } } else if (phaseInX == 1) { #pragma unroll for (int step = 0; step < fuSize / up; step++) { v.x += a * (scalar_t)c_fu[step * up + 1]; v.y += a * (scalar_t)c_fu[step * up + 0]; a = s_tileIn[src0 + step + 1]; v.z += a * (scalar_t)c_fu[step * up + 3]; v.w += a * (scalar_t)c_fu[step * up + 2]; } } else if (phaseInX == 2) { #pragma unroll for (int step = 0; step < fuSize / up; step++) { v.x += a * (scalar_t)c_fu[step * up + 2]; v.y += a * (scalar_t)c_fu[step * up + 1]; v.z += a * (scalar_t)c_fu[step * up + 0]; a = s_tileIn[src0 + step + 1]; v.w += a * (scalar_t)c_fu[step * up + 3]; } } else // (phaseInX == 3) { #pragma unroll for (int step = 0; step < fuSize / up; step++) { v.x += a * (scalar_t)c_fu[step * up + 3]; v.y += a * (scalar_t)c_fu[step * up + 2]; v.z += a * (scalar_t)c_fu[step * up + 1]; v.w += a * (scalar_t)c_fu[step * up + 0]; a = s_tileIn[src0 + step + 1]; } } s_tileUpX[dst+0] = v.x; s_tileUpX[dst+1] = v.y; s_tileUpX[dst+2] = v.z; s_tileUpX[dst+3] = v.w; } } else if (up == 2) { bool p0 = (phaseInX == 0); for (int idx = threadIdx.x*up; idx < tileUpW * tileInH; idx += blockDim.x*up) { int relUpX0, relInY; fast_div_mod<tileUpW>(relUpX0, relInY, idx); int relInX0 = relUpX0 / up; int src0 = relInX0 + tileInW * relInY; int dst = relInY * tileUpW + relUpX0; vec2_t v = InternalType<T>::zero_vec2(); scalar_t a = s_tileIn[src0]; if (p0) // (phaseInX == 0) { #pragma unroll for (int step = 0; step < fuSize / up; step++) { v.x += a * (scalar_t)c_fu[step * up + 0]; a = s_tileIn[src0 + step + 1]; v.y += a * (scalar_t)c_fu[step * up + 1]; } } else // (phaseInX == 1) { #pragma unroll for (int step = 0; step < fuSize / up; step++) { v.x += a * (scalar_t)c_fu[step * up + 1]; v.y += a * (scalar_t)c_fu[step * up + 0]; a = s_tileIn[src0 + step + 1]; } } s_tileUpX[dst+0] = v.x; s_tileUpX[dst+1] = v.y; } } // Vertical upsampling & nonlinearity. __syncthreads(); int groupMask = 15 << ((threadIdx.x & 31) & ~3); int minY = tileOutY ? (tileOutY - tileOutH) * down + tileUpH : 0; // Skip already written signs. int sShapeMaxY = MIN(p.sShape.y, tileOutY * down + tileUpH); // Avoid out-of-tile sign writes. if (up == 4) { minY -= 3; // Adjust according to block height. for (int idx = threadIdx.x; idx < tileUpW * tileUpH_up / up; idx += blockDim.x) { int relUpX, relInY0; fast_div_mod<tileUpW>(relUpX, relInY0, idx); int relUpY0 = relInY0 * up; int src0 = relInY0 * tileUpW + relUpX; int dst = relUpY0 * tileUpW + relUpX; vec4_t v = InternalType<T>::zero_vec4(); scalar_t a = s_tileUpX[src0]; if (phaseInY == 0) { #pragma unroll for (int step = 0; step < fuSize / up; step++) { v.x += a * (scalar_t)c_fu[step * up + 0]; a = s_tileUpX[src0 + (step + 1) * tileUpW]; v.y += a * (scalar_t)c_fu[step * up + 3]; v.z += a * (scalar_t)c_fu[step * up + 2]; v.w += a * (scalar_t)c_fu[step * up + 1]; } } else if (phaseInY == 1) { #pragma unroll for (int step = 0; step < fuSize / up; step++) { v.x += a * (scalar_t)c_fu[step * up + 1]; v.y += a * (scalar_t)c_fu[step * up + 0]; a = s_tileUpX[src0 + (step + 1) * tileUpW]; v.z += a * (scalar_t)c_fu[step * up + 3]; v.w += a * (scalar_t)c_fu[step * up + 2]; } } else if (phaseInY == 2) { #pragma unroll for (int step = 0; step < fuSize / up; step++) { v.x += a * (scalar_t)c_fu[step * up + 2]; v.y += a * (scalar_t)c_fu[step * up + 1]; v.z += a * (scalar_t)c_fu[step * up + 0]; a = s_tileUpX[src0 + (step + 1) * tileUpW]; v.w += a * (scalar_t)c_fu[step * up + 3]; } } else // (phaseInY == 3) { #pragma unroll for (int step = 0; step < fuSize / up; step++) { v.x += a * (scalar_t)c_fu[step * up + 3]; v.y += a * (scalar_t)c_fu[step * up + 2]; v.z += a * (scalar_t)c_fu[step * up + 1]; v.w += a * (scalar_t)c_fu[step * up + 0]; a = s_tileUpX[src0 + (step + 1) * tileUpW]; } } int x = tileOutX * down + relUpX; int y = tileOutY * down + relUpY0; int signX = x + p.sOfs.x; int signY = y + p.sOfs.y; int signZ = blockIdx.z + p.blockZofs; int signXb = signX >> 2; index_t si0 = signXb + p.sShape.x * (signY + (index_t)p.sShape.y * signZ); index_t si1 = si0 + p.sShape.x; index_t si2 = si0 + p.sShape.x * 2; index_t si3 = si0 + p.sShape.x * 3; v.x *= (scalar_t)((float)up * (float)up * p.gain); v.y *= (scalar_t)((float)up * (float)up * p.gain); v.z *= (scalar_t)((float)up * (float)up * p.gain); v.w *= (scalar_t)((float)up * (float)up * p.gain); if (signWrite) { if (!enableWriteSkip) { // Determine and write signs. int sx = __float_as_uint(v.x) >> 31 << 0; int sy = __float_as_uint(v.y) >> 31 << 8; int sz = __float_as_uint(v.z) >> 31 << 16; int sw = __float_as_uint(v.w) >> 31 << 24; if (sx) v.x *= p.slope; if (sy) v.y *= p.slope; if (sz) v.z *= p.slope; if (sw) v.w *= p.slope; if (fabsf(v.x) > p.clamp) { sx = 2 << 0; v.x = InternalType<T>::clamp(v.x, p.clamp); } if (fabsf(v.y) > p.clamp) { sy = 2 << 8; v.y = InternalType<T>::clamp(v.y, p.clamp); } if (fabsf(v.z) > p.clamp) { sz = 2 << 16; v.z = InternalType<T>::clamp(v.z, p.clamp); } if (fabsf(v.w) > p.clamp) { sw = 2 << 24; v.w = InternalType<T>::clamp(v.w, p.clamp); } if ((uint32_t)signXb < p.swLimit && signY >= minY) { // Combine signs. uint32_t s = sx + sy + sw + sz; s <<= (signX & 3) << 1; s |= __shfl_xor_sync(groupMask, s, 1); s |= __shfl_xor_sync(groupMask, s, 2); // Write signs. if ((uint32_t)(signY + 0) < sShapeMaxY) { p.s[si0] = (unsigned char)(s >> 0); } if ((uint32_t)(signY + 1) < sShapeMaxY) { p.s[si1] = (unsigned char)(s >> 8); } if ((uint32_t)(signY + 2) < sShapeMaxY) { p.s[si2] = (unsigned char)(s >> 16); } if ((uint32_t)(signY + 3) < sShapeMaxY) { p.s[si3] = (unsigned char)(s >> 24); } } } else { // Determine and write signs. if ((uint32_t)signXb < p.swLimit && signY >= minY) { int sx = __float_as_uint(v.x) >> 31 << 0; int sy = __float_as_uint(v.y) >> 31 << 8; int sz = __float_as_uint(v.z) >> 31 << 16; int sw = __float_as_uint(v.w) >> 31 << 24; if (sx) v.x *= p.slope; if (sy) v.y *= p.slope; if (sz) v.z *= p.slope; if (sw) v.w *= p.slope; if (fabsf(v.x) > p.clamp) { sx = 2 << 0; v.x = InternalType<T>::clamp(v.x, p.clamp); } if (fabsf(v.y) > p.clamp) { sy = 2 << 8; v.y = InternalType<T>::clamp(v.y, p.clamp); } if (fabsf(v.z) > p.clamp) { sz = 2 << 16; v.z = InternalType<T>::clamp(v.z, p.clamp); } if (fabsf(v.w) > p.clamp) { sw = 2 << 24; v.w = InternalType<T>::clamp(v.w, p.clamp); } // Combine signs. uint32_t s = sx + sy + sw + sz; s <<= (signX & 3) << 1; s |= __shfl_xor_sync(groupMask, s, 1); s |= __shfl_xor_sync(groupMask, s, 2); // Write signs. if ((uint32_t)(signY + 0) < sShapeMaxY) { p.s[si0] = (unsigned char)(s >> 0); } if ((uint32_t)(signY + 1) < sShapeMaxY) { p.s[si1] = (unsigned char)(s >> 8); } if ((uint32_t)(signY + 2) < sShapeMaxY) { p.s[si2] = (unsigned char)(s >> 16); } if ((uint32_t)(signY + 3) < sShapeMaxY) { p.s[si3] = (unsigned char)(s >> 24); } } else { // Just compute the values. if (v.x < 0.f) v.x *= p.slope; v.x = InternalType<T>::clamp(v.x, p.clamp); if (v.y < 0.f) v.y *= p.slope; v.y = InternalType<T>::clamp(v.y, p.clamp); if (v.z < 0.f) v.z *= p.slope; v.z = InternalType<T>::clamp(v.z, p.clamp); if (v.w < 0.f) v.w *= p.slope; v.w = InternalType<T>::clamp(v.w, p.clamp); } } } else if (signRead) // Read signs and apply. { if ((uint32_t)signXb < p.swLimit) { int ss = (signX & 3) << 1; if ((uint32_t)(signY + 0) < p.sShape.y) { int s = p.s[si0] >> ss; if (s & 1) v.x *= p.slope; if (s & 2) v.x = 0.f; } if ((uint32_t)(signY + 1) < p.sShape.y) { int s = p.s[si1] >> ss; if (s & 1) v.y *= p.slope; if (s & 2) v.y = 0.f; } if ((uint32_t)(signY + 2) < p.sShape.y) { int s = p.s[si2] >> ss; if (s & 1) v.z *= p.slope; if (s & 2) v.z = 0.f; } if ((uint32_t)(signY + 3) < p.sShape.y) { int s = p.s[si3] >> ss; if (s & 1) v.w *= p.slope; if (s & 2) v.w = 0.f; } } } else // Forward pass with no sign write. { if (v.x < 0.f) v.x *= p.slope; v.x = InternalType<T>::clamp(v.x, p.clamp); if (v.y < 0.f) v.y *= p.slope; v.y = InternalType<T>::clamp(v.y, p.clamp); if (v.z < 0.f) v.z *= p.slope; v.z = InternalType<T>::clamp(v.z, p.clamp); if (v.w < 0.f) v.w *= p.slope; v.w = InternalType<T>::clamp(v.w, p.clamp); } s_tileUpXY[dst + 0 * tileUpW] = v.x; if (relUpY0 + 1 < tileUpH) s_tileUpXY[dst + 1 * tileUpW] = v.y; if (relUpY0 + 2 < tileUpH) s_tileUpXY[dst + 2 * tileUpW] = v.z; if (relUpY0 + 3 < tileUpH) s_tileUpXY[dst + 3 * tileUpW] = v.w; } } else if (up == 2) { minY -= 1; // Adjust according to block height. for (int idx = threadIdx.x; idx < tileUpW * tileUpH_up / up; idx += blockDim.x) { int relUpX, relInY0; fast_div_mod<tileUpW>(relUpX, relInY0, idx); int relUpY0 = relInY0 * up; int src0 = relInY0 * tileUpW + relUpX; int dst = relUpY0 * tileUpW + relUpX; vec2_t v = InternalType<T>::zero_vec2(); scalar_t a = s_tileUpX[src0]; if (phaseInY == 0) { #pragma unroll for (int step = 0; step < fuSize / up; step++) { v.x += a * (scalar_t)c_fu[step * up + 0]; a = s_tileUpX[src0 + (step + 1) * tileUpW]; v.y += a * (scalar_t)c_fu[step * up + 1]; } } else // (phaseInY == 1) { #pragma unroll for (int step = 0; step < fuSize / up; step++) { v.x += a * (scalar_t)c_fu[step * up + 1]; v.y += a * (scalar_t)c_fu[step * up + 0]; a = s_tileUpX[src0 + (step + 1) * tileUpW]; } } int x = tileOutX * down + relUpX; int y = tileOutY * down + relUpY0; int signX = x + p.sOfs.x; int signY = y + p.sOfs.y; int signZ = blockIdx.z + p.blockZofs; int signXb = signX >> 2; index_t si0 = signXb + p.sShape.x * (signY + (index_t)p.sShape.y * signZ); index_t si1 = si0 + p.sShape.x; v.x *= (scalar_t)((float)up * (float)up * p.gain); v.y *= (scalar_t)((float)up * (float)up * p.gain); if (signWrite) { if (!enableWriteSkip) { // Determine and write signs. int sx = __float_as_uint(v.x) >> 31 << 0; int sy = __float_as_uint(v.y) >> 31 << 8; if (sx) v.x *= p.slope; if (sy) v.y *= p.slope; if (fabsf(v.x) > p.clamp) { sx = 2 << 0; v.x = InternalType<T>::clamp(v.x, p.clamp); } if (fabsf(v.y) > p.clamp) { sy = 2 << 8; v.y = InternalType<T>::clamp(v.y, p.clamp); } if ((uint32_t)signXb < p.swLimit && signY >= minY) { // Combine signs. int s = sx + sy; s <<= signXo; s |= __shfl_xor_sync(groupMask, s, 1); s |= __shfl_xor_sync(groupMask, s, 2); // Write signs. if ((uint32_t)(signY + 0) < sShapeMaxY) { p.s[si0] = (unsigned char)(s >> 0); } if ((uint32_t)(signY + 1) < sShapeMaxY) { p.s[si1] = (unsigned char)(s >> 8); } } } else { // Determine and write signs. if ((uint32_t)signXb < p.swLimit && signY >= minY) { int sx = __float_as_uint(v.x) >> 31 << 0; int sy = __float_as_uint(v.y) >> 31 << 8; if (sx) v.x *= p.slope; if (sy) v.y *= p.slope; if (fabsf(v.x) > p.clamp) { sx = 2 << 0; v.x = InternalType<T>::clamp(v.x, p.clamp); } if (fabsf(v.y) > p.clamp) { sy = 2 << 8; v.y = InternalType<T>::clamp(v.y, p.clamp); } // Combine signs. int s = sx + sy; s <<= signXo; s |= __shfl_xor_sync(groupMask, s, 1); s |= __shfl_xor_sync(groupMask, s, 2); // Write signs. if ((uint32_t)(signY + 0) < sShapeMaxY) { p.s[si0] = (unsigned char)(s >> 0); } if ((uint32_t)(signY + 1) < sShapeMaxY) { p.s[si1] = (unsigned char)(s >> 8); } } else { // Just compute the values. if (v.x < 0.f) v.x *= p.slope; v.x = InternalType<T>::clamp(v.x, p.clamp); if (v.y < 0.f) v.y *= p.slope; v.y = InternalType<T>::clamp(v.y, p.clamp); } } } else if (signRead) // Read signs and apply. { if ((uint32_t)signXb < p.swLimit) { if ((uint32_t)(signY + 0) < p.sShape.y) { int s = p.s[si0] >> signXo; if (s & 1) v.x *= p.slope; if (s & 2) v.x = 0.f; } if ((uint32_t)(signY + 1) < p.sShape.y) { int s = p.s[si1] >> signXo; if (s & 1) v.y *= p.slope; if (s & 2) v.y = 0.f; } } } else // Forward pass with no sign write. { if (v.x < 0.f) v.x *= p.slope; v.x = InternalType<T>::clamp(v.x, p.clamp); if (v.y < 0.f) v.y *= p.slope; v.y = InternalType<T>::clamp(v.y, p.clamp); } if (!downInline) { // Write into temporary buffer. s_tileUpXY[dst] = v.x; if (relUpY0 < tileUpH - 1) s_tileUpXY[dst + tileUpW] = v.y; } else { // Write directly into output buffer. if ((uint32_t)x < p.yShape.x) { int ymax = MIN(p.yShape.y, tileUpH + tileOutY * down); index_t ofs = x * get_stride<index_t>(p.yStride.x) + y * get_stride<index_t>(p.yStride.y) + mapOfsOut; if ((uint32_t)y + 0 < p.yShape.y) *((T*)((char*)p.y + ofs)) = (T)(v.x * (scalar_t)c_fd[0]); if ((uint32_t)y + 1 < ymax) *((T*)((char*)p.y + ofs + get_stride<index_t>(p.yStride.y))) = (T)(v.y * (scalar_t)c_fd[0]); } } } } } else if (filterMode == MODE_FUSD || filterMode == MODE_FUFD) { // Full upsampling filter. if (up == 2) { // 2 x 2-wide. __syncthreads(); int minY = tileOutY ? (tileOutY - tileOutH) * down + tileUpH + p.sOfs.y : 0; // Skip already written signs. for (int idx = threadIdx.x * 4; idx < tileUpW * tileUpH; idx += blockDim.x * 4) { int relUpX0, relUpY0; fast_div_mod<tileUpW>(relUpX0, relUpY0, idx); int relInX0 = CEIL_DIV(relUpX0 - phaseInX, up); int relInY0 = CEIL_DIV(relUpY0 - phaseInY, up); int src0 = relInX0 + tileInW * relInY0; int tap0y = (relInY0 * up + phaseInY - relUpY0); #define X_LOOP(TAPY, PX) \ for (int sx = 0; sx < fuSize / up; sx++) \ { \ v.x += a * (scalar_t)c_fu[(sx * up + (((PX) - 0) & (up - 1))) + (sy * up + (TAPY)) * MAX_FILTER_SIZE]; \ v.z += b * (scalar_t)c_fu[(sx * up + (((PX) - 0) & (up - 1))) + (sy * up + (TAPY)) * MAX_FILTER_SIZE]; if ((PX) == 0) { a = b; b = s_tileIn[src0 + 2 + sx + sy * tileInW]; } \ v.y += a * (scalar_t)c_fu[(sx * up + (((PX) - 1) & (up - 1))) + (sy * up + (TAPY)) * MAX_FILTER_SIZE]; \ v.w += b * (scalar_t)c_fu[(sx * up + (((PX) - 1) & (up - 1))) + (sy * up + (TAPY)) * MAX_FILTER_SIZE]; if ((PX) == 1) { a = b; b = s_tileIn[src0 + 2 + sx + sy * tileInW]; } \ } vec4_t v = InternalType<T>::zero_vec4(); if (tap0y == 0 && phaseInX == 0) #pragma unroll for (int sy = 0; sy < fuSize / up; sy++) { scalar_t a = s_tileIn[src0 + sy * tileInW]; scalar_t b = s_tileIn[src0 + sy * tileInW + 1]; #pragma unroll X_LOOP(0, 0) } if (tap0y == 0 && phaseInX == 1) #pragma unroll for (int sy = 0; sy < fuSize / up; sy++) { scalar_t a = s_tileIn[src0 + sy * tileInW]; scalar_t b = s_tileIn[src0 + sy * tileInW + 1]; #pragma unroll X_LOOP(0, 1) } if (tap0y == 1 && phaseInX == 0) #pragma unroll for (int sy = 0; sy < fuSize / up; sy++) { scalar_t a = s_tileIn[src0 + sy * tileInW]; scalar_t b = s_tileIn[src0 + sy * tileInW + 1]; #pragma unroll X_LOOP(1, 0) } if (tap0y == 1 && phaseInX == 1) #pragma unroll for (int sy = 0; sy < fuSize / up; sy++) { scalar_t a = s_tileIn[src0 + sy * tileInW]; scalar_t b = s_tileIn[src0 + sy * tileInW + 1]; #pragma unroll X_LOOP(1, 1) } #undef X_LOOP int x = tileOutX * down + relUpX0; int y = tileOutY * down + relUpY0; int signX = x + p.sOfs.x; int signY = y + p.sOfs.y; int signZ = blockIdx.z + p.blockZofs; int signXb = signX >> 2; index_t si = signXb + p.sShape.x * (signY + (index_t)p.sShape.y * signZ); v.x *= (scalar_t)((float)up * (float)up * p.gain); v.y *= (scalar_t)((float)up * (float)up * p.gain); v.z *= (scalar_t)((float)up * (float)up * p.gain); v.w *= (scalar_t)((float)up * (float)up * p.gain); if (signWrite) { if (!enableWriteSkip) { // Determine and write signs. int sx = __float_as_uint(v.x) >> 31; int sy = __float_as_uint(v.y) >> 31; int sz = __float_as_uint(v.z) >> 31; int sw = __float_as_uint(v.w) >> 31; if (sx) v.x *= p.slope; if (fabsf(v.x) > p.clamp) { sx = 2; v.x = InternalType<T>::clamp(v.x, p.clamp); } if (sy) v.y *= p.slope; if (fabsf(v.y) > p.clamp) { sy = 2; v.y = InternalType<T>::clamp(v.y, p.clamp); } if (sz) v.z *= p.slope; if (fabsf(v.z) > p.clamp) { sz = 2; v.z = InternalType<T>::clamp(v.z, p.clamp); } if (sw) v.w *= p.slope; if (fabsf(v.w) > p.clamp) { sw = 2; v.w = InternalType<T>::clamp(v.w, p.clamp); } if ((uint32_t)signXb < p.swLimit && (uint32_t)signY < p.sShape.y && signY >= minY) { p.s[si] = sx + (sy << 2) + (sz << 4) + (sw << 6); } } else { // Determine and write signs. if ((uint32_t)signXb < p.swLimit && (uint32_t)signY < p.sShape.y && signY >= minY) { int sx = __float_as_uint(v.x) >> 31; int sy = __float_as_uint(v.y) >> 31; int sz = __float_as_uint(v.z) >> 31; int sw = __float_as_uint(v.w) >> 31; if (sx) v.x *= p.slope; if (fabsf(v.x) > p.clamp) { sx = 2; v.x = InternalType<T>::clamp(v.x, p.clamp); } if (sy) v.y *= p.slope; if (fabsf(v.y) > p.clamp) { sy = 2; v.y = InternalType<T>::clamp(v.y, p.clamp); } if (sz) v.z *= p.slope; if (fabsf(v.z) > p.clamp) { sz = 2; v.z = InternalType<T>::clamp(v.z, p.clamp); } if (sw) v.w *= p.slope; if (fabsf(v.w) > p.clamp) { sw = 2; v.w = InternalType<T>::clamp(v.w, p.clamp); } p.s[si] = sx + (sy << 2) + (sz << 4) + (sw << 6); } else { // Just compute the values. if (v.x < 0.f) v.x *= p.slope; v.x = InternalType<T>::clamp(v.x, p.clamp); if (v.y < 0.f) v.y *= p.slope; v.y = InternalType<T>::clamp(v.y, p.clamp); if (v.z < 0.f) v.z *= p.slope; v.z = InternalType<T>::clamp(v.z, p.clamp); if (v.w < 0.f) v.w *= p.slope; v.w = InternalType<T>::clamp(v.w, p.clamp); } } } else if (signRead) // Read sign and apply. { if ((uint32_t)signY < p.sShape.y) { int s = 0; if ((uint32_t)signXb < p.swLimit) s = p.s[si]; if ((uint32_t)signXb + 1 < p.swLimit) s |= p.s[si + 1] << 8; s >>= (signX & 3) << 1; if (s & 0x01) v.x *= p.slope; if (s & 0x02) v.x = 0.f; if (s & 0x04) v.y *= p.slope; if (s & 0x08) v.y = 0.f; if (s & 0x10) v.z *= p.slope; if (s & 0x20) v.z = 0.f; if (s & 0x40) v.w *= p.slope; if (s & 0x80) v.w = 0.f; } } else // Forward pass with no sign write. { if (v.x < 0.f) v.x *= p.slope; v.x = InternalType<T>::clamp(v.x, p.clamp); if (v.y < 0.f) v.y *= p.slope; v.y = InternalType<T>::clamp(v.y, p.clamp); if (v.z < 0.f) v.z *= p.slope; v.z = InternalType<T>::clamp(v.z, p.clamp); if (v.w < 0.f) v.w *= p.slope; v.w = InternalType<T>::clamp(v.w, p.clamp); } s_tileUpXY[idx + 0] = v.x; s_tileUpXY[idx + 1] = v.y; s_tileUpXY[idx + 2] = v.z; s_tileUpXY[idx + 3] = v.w; } } else if (up == 1) { __syncthreads(); uint32_t groupMask = 15 << ((threadIdx.x & 31) & ~3); int minY = tileOutY ? (tileOutY - tileOutH) * down + tileUpH : 0; // Skip already written signs. for (int idx = threadIdx.x; idx < tileUpW * tileUpH; idx += blockDim.x) { int relUpX0, relUpY0; fast_div_mod<tileUpW>(relUpX0, relUpY0, idx); scalar_t v = s_tileIn[idx] * (scalar_t)c_fu[0]; // 1x1 filter. int x = tileOutX * down + relUpX0; int y = tileOutY * down + relUpY0; int signX = x + p.sOfs.x; int signY = y + p.sOfs.y; int signZ = blockIdx.z + p.blockZofs; int signXb = signX >> 2; index_t si = signXb + p.sShape.x * (signY + (index_t)p.sShape.y * signZ); v *= (scalar_t)((float)up * (float)up * p.gain); if (signWrite) { if (!enableWriteSkip) { // Determine and write sign. uint32_t s = 0; uint32_t signXbit = (1u << signXo); if (v < 0.f) { s = signXbit; v *= p.slope; } if (fabsf(v) > p.clamp) { s = signXbit * 2; v = InternalType<T>::clamp(v, p.clamp); } if ((uint32_t)signXb < p.swLimit && (uint32_t)signY < p.sShape.y && signY >= minY) { s += __shfl_xor_sync(groupMask, s, 1); // Coalesce. s += __shfl_xor_sync(groupMask, s, 2); // Coalesce. p.s[si] = s; // Write. } } else { // Determine and write sign. if ((uint32_t)signXb < p.swLimit && (uint32_t)signY < p.sShape.y && signY >= minY) { uint32_t s = 0; uint32_t signXbit = (1u << signXo); if (v < 0.f) { s = signXbit; v *= p.slope; } if (fabsf(v) > p.clamp) { s = signXbit * 2; v = InternalType<T>::clamp(v, p.clamp); } s += __shfl_xor_sync(groupMask, s, 1); // Coalesce. s += __shfl_xor_sync(groupMask, s, 2); // Coalesce. p.s[si] = s; // Write. } else { // Just compute the value. if (v < 0.f) v *= p.slope; v = InternalType<T>::clamp(v, p.clamp); } } } else if (signRead) { // Read sign and apply if within sign tensor bounds. if ((uint32_t)signXb < p.swLimit && (uint32_t)signY < p.sShape.y) { int s = p.s[si]; s >>= signXo; if (s & 1) v *= p.slope; if (s & 2) v = 0.f; } } else // Forward pass with no sign write. { if (v < 0.f) v *= p.slope; v = InternalType<T>::clamp(v, p.clamp); } if (!downInline) // Write into temporary buffer. s_tileUpXY[idx] = v; else if ((uint32_t)x < p.yShape.x && (uint32_t)y < p.yShape.y) // Write directly into output buffer *((T*)((char*)p.y + (x * get_stride<index_t>(p.yStride.x) + y * get_stride<index_t>(p.yStride.y) + mapOfsOut))) = (T)(v * (scalar_t)c_fd[0]); } } } // Downsampling. if (filterMode == MODE_SUSD || filterMode == MODE_FUSD) { // Horizontal downsampling. __syncthreads(); if (down == 4 && tileOutW % 4 == 0) { // Calculate 4 pixels at a time. for (int idx = threadIdx.x * 4; idx < tileOutW * tileUpH; idx += blockDim.x * 4) { int relOutX0, relUpY; fast_div_mod<tileOutW>(relOutX0, relUpY, idx); int relUpX0 = relOutX0 * down; int src0 = relUpY * tileUpW + relUpX0; vec4_t v = InternalType<T>::zero_vec4(); #pragma unroll for (int step = 0; step < fdSize; step++) { v.x += s_tileUpXY[src0 + 0 + step] * (scalar_t)c_fd[step]; v.y += s_tileUpXY[src0 + 4 + step] * (scalar_t)c_fd[step]; v.z += s_tileUpXY[src0 + 8 + step] * (scalar_t)c_fd[step]; v.w += s_tileUpXY[src0 + 12 + step] * (scalar_t)c_fd[step]; } s_tileDownX[idx+0] = v.x; s_tileDownX[idx+1] = v.y; s_tileDownX[idx+2] = v.z; s_tileDownX[idx+3] = v.w; } } else if ((down == 2 || down == 4) && (tileOutW % 2 == 0)) { // Calculate 2 pixels at a time. for (int idx = threadIdx.x * 2; idx < tileOutW * tileUpH; idx += blockDim.x * 2) { int relOutX0, relUpY; fast_div_mod<tileOutW>(relOutX0, relUpY, idx); int relUpX0 = relOutX0 * down; int src0 = relUpY * tileUpW + relUpX0; vec2_t v = InternalType<T>::zero_vec2(); #pragma unroll for (int step = 0; step < fdSize; step++) { v.x += s_tileUpXY[src0 + 0 + step] * (scalar_t)c_fd[step]; v.y += s_tileUpXY[src0 + down + step] * (scalar_t)c_fd[step]; } s_tileDownX[idx+0] = v.x; s_tileDownX[idx+1] = v.y; } } else { // Calculate 1 pixel at a time. for (int idx = threadIdx.x; idx < tileOutW * tileUpH; idx += blockDim.x) { int relOutX0, relUpY; fast_div_mod<tileOutW>(relOutX0, relUpY, idx); int relUpX0 = relOutX0 * down; int src = relUpY * tileUpW + relUpX0; scalar_t v = 0.f; #pragma unroll for (int step = 0; step < fdSize; step++) v += s_tileUpXY[src + step] * (scalar_t)c_fd[step]; s_tileDownX[idx] = v; } } // Vertical downsampling & store output tile. __syncthreads(); for (int idx = threadIdx.x; idx < tileOutW * tileOutH; idx += blockDim.x) { int relOutX, relOutY0; fast_div_mod<tileOutW>(relOutX, relOutY0, idx); int relUpY0 = relOutY0 * down; int src0 = relUpY0 * tileOutW + relOutX; scalar_t v = 0; #pragma unroll for (int step = 0; step < fdSize; step++) v += s_tileDownX[src0 + step * tileOutW] * (scalar_t)c_fd[step]; int outX = tileOutX + relOutX; int outY = tileOutY + relOutY0; if (outX < p.yShape.x & outY < p.yShape.y) *((T*)((char*)p.y + (outX * get_stride<index_t>(p.yStride.x) + outY * get_stride<index_t>(p.yStride.y) + mapOfsOut))) = (T)v; } } else if (filterMode == MODE_SUFD || filterMode == MODE_FUFD) { // Full downsampling filter. if (down == 2) { // 2-wide. __syncthreads(); for (int idx = threadIdx.x * 2; idx < tileOutW * tileOutH; idx += blockDim.x * 2) { int relOutX0, relOutY0; fast_div_mod<tileOutW>(relOutX0, relOutY0, idx); int relUpX0 = relOutX0 * down; int relUpY0 = relOutY0 * down; int src0 = relUpY0 * tileUpW + relUpX0; vec2_t v = InternalType<T>::zero_vec2(); #pragma unroll for (int sy = 0; sy < fdSize; sy++) #pragma unroll for (int sx = 0; sx < fdSize; sx++) { v.x += s_tileUpXY[src0 + 0 + sx + sy * tileUpW] * (scalar_t)c_fd[sx + sy * MAX_FILTER_SIZE]; v.y += s_tileUpXY[src0 + 2 + sx + sy * tileUpW] * (scalar_t)c_fd[sx + sy * MAX_FILTER_SIZE]; } int outX = tileOutX + relOutX0; int outY = tileOutY + relOutY0; if ((uint32_t)outY < p.yShape.y) { index_t ofs = outX * get_stride<index_t>(p.yStride.x) + outY * get_stride<index_t>(p.yStride.y) + mapOfsOut; if (outX + 0 < p.yShape.x) *((T*)((char*)p.y + ofs)) = (T)v.x; if (outX + 1 < p.yShape.x) *((T*)((char*)p.y + ofs + get_stride<index_t>(p.yStride.x))) = (T)v.y; } } } else if (down == 1 && !downInline) { // Thread per pixel. __syncthreads(); for (int idx = threadIdx.x; idx < tileOutW * tileOutH; idx += blockDim.x) { int relOutX0, relOutY0; fast_div_mod<tileOutW>(relOutX0, relOutY0, idx); scalar_t v = s_tileUpXY[idx] * (scalar_t)c_fd[0]; // 1x1 filter. int outX = tileOutX + relOutX0; int outY = tileOutY + relOutY0; if ((uint32_t)outX < p.yShape.x && (uint32_t)outY < p.yShape.y) *((T*)((char*)p.y + (outX * get_stride<index_t>(p.yStride.x) + outY * get_stride<index_t>(p.yStride.y) + mapOfsOut))) = (T)v; } } } if (!enableXrep) break; } } //------------------------------------------------------------------------ // Compute activation function and signs for upsampled data tensor, modifying data tensor in-place. Used for accelerating the generic variant. // Sign tensor is known to be contiguous, and p.x and p.s have the same z, w dimensions. 64-bit indexing is always used. template <class T, bool signWrite, bool signRead> static __global__ void filtered_lrelu_act_kernel(filtered_lrelu_act_kernel_params p) { typedef typename InternalType<T>::scalar_t scalar_t; // Indexing. int32_t x = threadIdx.x + blockIdx.x * blockDim.x; int32_t ymax = signWrite ? p.sShape.y : p.xShape.y; int32_t qmax = p.xShape.z * p.xShape.w; // Combined minibatch*channel maximum index. // Loop to accommodate oversized tensors. for (int32_t q = blockIdx.z; q < qmax; q += gridDim.z) for (int32_t y = blockIdx.y; y < ymax; y += gridDim.y) { // Extract z and w (channel, minibatch index). int32_t w = q / p.xShape.z; int32_t z = q - w * p.xShape.z; // Choose behavior based on sign read/write mode. if (signWrite) { // Process value if in p.x. uint32_t s = 0; if (x < p.xShape.x && y < p.xShape.y) { int64_t ix = x * p.xStride.x + y * p.xStride.y + z * p.xStride.z + w * p.xStride.w; T* pv = ((T*)p.x) + ix; scalar_t v = (scalar_t)(*pv); // Gain, LReLU, clamp. v *= p.gain; if (v < 0.f) { v *= p.slope; s = 1; // Sign. } if (fabsf(v) > p.clamp) { v = InternalType<T>::clamp(v, p.clamp); s = 2; // Clamp. } *pv = (T)v; // Write value. } // Coalesce into threads 0 and 16 of warp. uint32_t m = (threadIdx.x & 16) ? 0xffff0000u : 0x0000ffffu; s <<= ((threadIdx.x & 15) << 1); // Shift into place. s |= __shfl_xor_sync(m, s, 1); // Distribute. s |= __shfl_xor_sync(m, s, 2); s |= __shfl_xor_sync(m, s, 4); s |= __shfl_xor_sync(m, s, 8); // Write signs if leader and in p.s. if (!(threadIdx.x & 15) && x < p.sShape.x) // y is always in. { uint64_t is = x + p.sShape.x * (y + (int64_t)p.sShape.y * q); // Contiguous. ((uint32_t*)p.s)[is >> 4] = s; } } else if (signRead) { // Process value if in p.x. if (x < p.xShape.x) // y is always in. { int64_t ix = x * p.xStride.x + y * p.xStride.y + z * p.xStride.z + w * p.xStride.w; T* pv = ((T*)p.x) + ix; scalar_t v = (scalar_t)(*pv); v *= p.gain; // Apply sign buffer offset. uint32_t sx = x + p.sOfs.x; uint32_t sy = y + p.sOfs.y; // Read and apply signs if we land inside valid region of sign buffer. if (sx < p.sShape.x && sy < p.sShape.y) { uint64_t is = (sx >> 2) + (p.sShape.x >> 2) * (sy + (uint64_t)p.sShape.y * q); // Contiguous. unsigned char s = p.s[is]; s >>= (sx & 3) << 1; // Shift into place. if (s & 1) // Sign? v *= p.slope; if (s & 2) // Clamp? v = 0.f; } *pv = (T)v; // Write value. } } else { // Forward pass with no sign write. Process value if in p.x. if (x < p.xShape.x) // y is always in. { int64_t ix = x * p.xStride.x + y * p.xStride.y + z * p.xStride.z + w * p.xStride.w; T* pv = ((T*)p.x) + ix; scalar_t v = (scalar_t)(*pv); v *= p.gain; if (v < 0.f) v *= p.slope; if (fabsf(v) > p.clamp) v = InternalType<T>::clamp(v, p.clamp); *pv = (T)v; // Write value. } } } } template <class T, bool signWrite, bool signRead> void* choose_filtered_lrelu_act_kernel(void) { return (void*)filtered_lrelu_act_kernel<T, signWrite, signRead>; } //------------------------------------------------------------------------ // CUDA kernel selection. template <class T, class index_t, bool signWrite, bool signRead> filtered_lrelu_kernel_spec choose_filtered_lrelu_kernel(const filtered_lrelu_kernel_params& p, int sharedKB) { filtered_lrelu_kernel_spec s = { 0 }; // Return the first matching kernel. #define CASE(SH, U, FU, D, FD, MODE, TW, TH, W, XR, WS) \ if (sharedKB >= SH) \ if ((p.fuShape.y == 0 && (MODE == MODE_SUSD || MODE == MODE_SUFD)) || (p.fuShape.y > 0 && (MODE == MODE_FUSD || MODE == MODE_FUFD))) \ if ((p.fdShape.y == 0 && (MODE == MODE_SUSD || MODE == MODE_FUSD)) || (p.fdShape.y > 0 && (MODE == MODE_SUFD || MODE == MODE_FUFD))) \ if (p.up == U && p.fuShape.x <= FU && p.fuShape.y <= FU && p.down == D && p.fdShape.x <= FD && p.fdShape.y <= FD) \ { \ static_assert((D*TW % 4) == 0, "down * tileWidth must be divisible by 4"); \ static_assert(FU % U == 0, "upscaling filter size must be multiple of upscaling factor"); \ static_assert(FD % D == 0, "downscaling filter size must be multiple of downscaling factor"); \ s.setup = (void*)setup_filters_kernel; \ s.exec = (void*)filtered_lrelu_kernel<T, index_t, SH, signWrite, signRead, MODE, U, FU, D, FD, TW, TH, W*32, !!XR, !!WS>; \ s.tileOut = make_int2(TW, TH); \ s.numWarps = W; \ s.xrep = XR; \ s.dynamicSharedKB = (SH == 48) ? 0 : SH; \ return s; \ } // Launch parameters for various kernel specializations. // Small filters must be listed before large filters, otherwise the kernel for larger filter will always match first. // Kernels that use more shared memory must be listed before those that use less, for the same reason. CASE(/*sharedKB*/48, /*up,fu*/1,1, /*down,fd*/1,1, /*mode*/MODE_FUFD, /*tw,th,warps,xrep,wskip*/64, 178, 32, 0, 0) // 1t-upf1-downf1 CASE(/*sharedKB*/48, /*up,fu*/2,8, /*down,fd*/1,1, /*mode*/MODE_SUFD, /*tw,th,warps,xrep,wskip*/152, 95, 16, 0, 0) // 4t-ups2-downf1 CASE(/*sharedKB*/48, /*up,fu*/1,1, /*down,fd*/2,8, /*mode*/MODE_FUSD, /*tw,th,warps,xrep,wskip*/56, 22, 16, 0, 0) // 4t-upf1-downs2 CASE(/*sharedKB*/48, /*up,fu*/2,8, /*down,fd*/2,8, /*mode*/MODE_SUSD, /*tw,th,warps,xrep,wskip*/56, 29, 16, 11, 0) // 4t-ups2-downs2 CASE(/*sharedKB*/48, /*up,fu*/2,8, /*down,fd*/2,8, /*mode*/MODE_FUSD, /*tw,th,warps,xrep,wskip*/60, 28, 16, 0, 0) // 4t-upf2-downs2 CASE(/*sharedKB*/48, /*up,fu*/2,8, /*down,fd*/2,8, /*mode*/MODE_SUFD, /*tw,th,warps,xrep,wskip*/56, 28, 16, 0, 0) // 4t-ups2-downf2 CASE(/*sharedKB*/48, /*up,fu*/4,16, /*down,fd*/2,8, /*mode*/MODE_SUSD, /*tw,th,warps,xrep,wskip*/56, 31, 16, 11, 0) // 4t-ups4-downs2 CASE(/*sharedKB*/48, /*up,fu*/4,16, /*down,fd*/2,8, /*mode*/MODE_SUFD, /*tw,th,warps,xrep,wskip*/56, 36, 16, 0, 0) // 4t-ups4-downf2 CASE(/*sharedKB*/48, /*up,fu*/2,8, /*down,fd*/4,16, /*mode*/MODE_SUSD, /*tw,th,warps,xrep,wskip*/16, 22, 16, 12, 0) // 4t-ups2-downs4 CASE(/*sharedKB*/48, /*up,fu*/2,8, /*down,fd*/4,16, /*mode*/MODE_FUSD, /*tw,th,warps,xrep,wskip*/29, 15, 16, 0, 0) // 4t-upf2-downs4 CASE(/*sharedKB*/48, /*up,fu*/2,12, /*down,fd*/1,1, /*mode*/MODE_SUFD, /*tw,th,warps,xrep,wskip*/96, 150, 28, 0, 0) // 6t-ups2-downf1 CASE(/*sharedKB*/48, /*up,fu*/1,1, /*down,fd*/2,12, /*mode*/MODE_FUSD, /*tw,th,warps,xrep,wskip*/32, 35, 24, 0, 0) // 6t-upf1-downs2 CASE(/*sharedKB*/48, /*up,fu*/2,12, /*down,fd*/2,12, /*mode*/MODE_SUSD, /*tw,th,warps,xrep,wskip*/32, 46, 16, 10, 0) // 6t-ups2-downs2 CASE(/*sharedKB*/48, /*up,fu*/2,12, /*down,fd*/2,12, /*mode*/MODE_FUSD, /*tw,th,warps,xrep,wskip*/58, 28, 24, 8, 0) // 6t-upf2-downs2 CASE(/*sharedKB*/48, /*up,fu*/2,12, /*down,fd*/2,12, /*mode*/MODE_SUFD, /*tw,th,warps,xrep,wskip*/52, 28, 16, 0, 0) // 6t-ups2-downf2 CASE(/*sharedKB*/48, /*up,fu*/4,24, /*down,fd*/2,12, /*mode*/MODE_SUSD, /*tw,th,warps,xrep,wskip*/32, 51, 16, 5, 0) // 6t-ups4-downs2 CASE(/*sharedKB*/48, /*up,fu*/4,24, /*down,fd*/2,12, /*mode*/MODE_SUFD, /*tw,th,warps,xrep,wskip*/32, 56, 16, 6, 0) // 6t-ups4-downf2 CASE(/*sharedKB*/48, /*up,fu*/2,12, /*down,fd*/4,24, /*mode*/MODE_SUSD, /*tw,th,warps,xrep,wskip*/16, 18, 16, 12, 0) // 6t-ups2-downs4 CASE(/*sharedKB*/96, /*up,fu*/2,12, /*down,fd*/4,24, /*mode*/MODE_FUSD, /*tw,th,warps,xrep,wskip*/27, 31, 32, 6, 0) // 6t-upf2-downs4 96kB CASE(/*sharedKB*/48, /*up,fu*/2,12, /*down,fd*/4,24, /*mode*/MODE_FUSD, /*tw,th,warps,xrep,wskip*/27, 13, 24, 0, 0) // 6t-upf2-downs4 CASE(/*sharedKB*/48, /*up,fu*/2,16, /*down,fd*/1,1, /*mode*/MODE_SUFD, /*tw,th,warps,xrep,wskip*/148, 89, 24, 0, 0) // 8t-ups2-downf1 CASE(/*sharedKB*/48, /*up,fu*/1,1, /*down,fd*/2,16, /*mode*/MODE_FUSD, /*tw,th,warps,xrep,wskip*/32, 31, 16, 5, 0) // 8t-upf1-downs2 CASE(/*sharedKB*/48, /*up,fu*/2,16, /*down,fd*/2,16, /*mode*/MODE_SUSD, /*tw,th,warps,xrep,wskip*/32, 41, 16, 9, 0) // 8t-ups2-downs2 CASE(/*sharedKB*/48, /*up,fu*/2,16, /*down,fd*/2,16, /*mode*/MODE_FUSD, /*tw,th,warps,xrep,wskip*/56, 26, 24, 0, 0) // 8t-upf2-downs2 CASE(/*sharedKB*/48, /*up,fu*/2,16, /*down,fd*/2,16, /*mode*/MODE_SUFD, /*tw,th,warps,xrep,wskip*/32, 40, 16, 0, 0) // 8t-ups2-downf2 CASE(/*sharedKB*/48, /*up,fu*/4,32, /*down,fd*/2,16, /*mode*/MODE_SUSD, /*tw,th,warps,xrep,wskip*/32, 46, 24, 5, 0) // 8t-ups4-downs2 CASE(/*sharedKB*/48, /*up,fu*/4,32, /*down,fd*/2,16, /*mode*/MODE_SUFD, /*tw,th,warps,xrep,wskip*/32, 50, 16, 0, 0) // 8t-ups4-downf2 CASE(/*sharedKB*/96, /*up,fu*/2,16, /*down,fd*/4,32, /*mode*/MODE_SUSD, /*tw,th,warps,xrep,wskip*/24, 24, 32, 12, 1) // 8t-ups2-downs4 96kB CASE(/*sharedKB*/48, /*up,fu*/2,16, /*down,fd*/4,32, /*mode*/MODE_SUSD, /*tw,th,warps,xrep,wskip*/16, 13, 16, 10, 1) // 8t-ups2-downs4 CASE(/*sharedKB*/96, /*up,fu*/2,16, /*down,fd*/4,32, /*mode*/MODE_FUSD, /*tw,th,warps,xrep,wskip*/25, 28, 28, 4, 0) // 8t-upf2-downs4 96kB CASE(/*sharedKB*/48, /*up,fu*/2,16, /*down,fd*/4,32, /*mode*/MODE_FUSD, /*tw,th,warps,xrep,wskip*/25, 10, 24, 0, 0) // 8t-upf2-downs4 #undef CASE return s; // No kernel found. } //------------------------------------------------------------------------
0a349e908f36e125dfe7ec55896c24941d5dffb5.cu
// Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved. // // NVIDIA CORPORATION and its licensors retain all intellectual property // and proprietary rights in and to this software, related documentation // and any modifications thereto. Any use, reproduction, disclosure or // distribution of this software and related documentation without an express // license agreement from NVIDIA CORPORATION is strictly prohibited. #include <c10/util/Half.h> #include "filtered_lrelu.h" #include <cstdint> //------------------------------------------------------------------------ // Helpers. enum // Filter modes. { MODE_SUSD = 0, // Separable upsampling, separable downsampling. MODE_FUSD = 1, // Full upsampling, separable downsampling. MODE_SUFD = 2, // Separable upsampling, full downsampling. MODE_FUFD = 3, // Full upsampling, full downsampling. }; template <class T> struct InternalType; template <> struct InternalType<double> { typedef double scalar_t; typedef double2 vec2_t; typedef double4 vec4_t; __device__ __forceinline__ static vec2_t zero_vec2(void) { return make_double2(0, 0); } __device__ __forceinline__ static vec4_t zero_vec4(void) { return make_double4(0, 0, 0, 0); } __device__ __forceinline__ static double clamp(double x, double c) { return fmin(fmax(x, -c), c); } }; template <> struct InternalType<float> { typedef float scalar_t; typedef float2 vec2_t; typedef float4 vec4_t; __device__ __forceinline__ static vec2_t zero_vec2(void) { return make_float2(0, 0); } __device__ __forceinline__ static vec4_t zero_vec4(void) { return make_float4(0, 0, 0, 0); } __device__ __forceinline__ static float clamp(float x, float c) { return fminf(fmaxf(x, -c), c); } }; template <> struct InternalType<c10::Half> { typedef float scalar_t; typedef float2 vec2_t; typedef float4 vec4_t; __device__ __forceinline__ static vec2_t zero_vec2(void) { return make_float2(0, 0); } __device__ __forceinline__ static vec4_t zero_vec4(void) { return make_float4(0, 0, 0, 0); } __device__ __forceinline__ static float clamp(float x, float c) { return fminf(fmaxf(x, -c), c); } }; #define MIN(A, B) ((A) < (B) ? (A) : (B)) #define MAX(A, B) ((A) > (B) ? (A) : (B)) #define CEIL_DIV(A, B) (((B)==1) ? (A) : \ ((B)==2) ? ((int)((A)+1) >> 1) : \ ((B)==4) ? ((int)((A)+3) >> 2) : \ (((A) + ((A) > 0 ? (B) - 1 : 0)) / (B))) // This works only up to blocks of size 256 x 256 and for all N that are powers of two. template <int N> __device__ __forceinline__ void fast_div_mod(int& x, int& y, unsigned int i) { if ((N & (N-1)) && N <= 256) y = (i * ((1<<24)/N + 1)) >> 24; // Assumes N <= 256, i < N*256. else y = i/N; x = i - y*N; } // Type cast stride before reading it. template <class T> __device__ __forceinline__ T get_stride(const int64_t& x) { return *reinterpret_cast<const T*>(&x); } //------------------------------------------------------------------------ // Filters, setup kernel, copying function. #define MAX_FILTER_SIZE 32 // Combined up/down filter buffers so that transfer can be done with one copy. __device__ float g_fbuf[2 * MAX_FILTER_SIZE * MAX_FILTER_SIZE]; // Filters in global memory, written by setup kernel. __device__ __constant__ float c_fbuf[2 * MAX_FILTER_SIZE * MAX_FILTER_SIZE]; // Filters in constant memory, read by main kernel. // Accessors to combined buffers to index up/down filters individually. #define c_fu (c_fbuf) #define c_fd (c_fbuf + MAX_FILTER_SIZE * MAX_FILTER_SIZE) #define g_fu (g_fbuf) #define g_fd (g_fbuf + MAX_FILTER_SIZE * MAX_FILTER_SIZE) // Set up filters into global memory buffer. static __global__ void setup_filters_kernel(filtered_lrelu_kernel_params p) { for (int idx = threadIdx.x; idx < MAX_FILTER_SIZE * MAX_FILTER_SIZE; idx += blockDim.x) { int x, y; fast_div_mod<MAX_FILTER_SIZE>(x, y, idx); int fu_x = p.flip ? x : (p.fuShape.x - 1 - x); int fu_y = p.flip ? y : (p.fuShape.y - 1 - y); if (p.fuShape.y > 0) g_fu[idx] = (x >= p.fuShape.x || y >= p.fuShape.y) ? 0.0f : p.fu[fu_x * p.fuStride.x + fu_y * p.fuStride.y]; else g_fu[idx] = (x >= p.fuShape.x || y > 0) ? 0.0f : p.fu[fu_x * p.fuStride.x]; int fd_x = p.flip ? x : (p.fdShape.x - 1 - x); int fd_y = p.flip ? y : (p.fdShape.y - 1 - y); if (p.fdShape.y > 0) g_fd[idx] = (x >= p.fdShape.x || y >= p.fdShape.y) ? 0.0f : p.fd[fd_x * p.fdStride.x + fd_y * p.fdStride.y]; else g_fd[idx] = (x >= p.fdShape.x || y > 0) ? 0.0f : p.fd[fd_x * p.fdStride.x]; } } // Host function to copy filters written by setup kernel into constant buffer for main kernel. template <bool, bool> static cudaError_t copy_filters(cudaStream_t stream) { void* src = 0; cudaError_t err = cudaGetSymbolAddress(&src, g_fbuf); if (err) return err; return cudaMemcpyToSymbolAsync(c_fbuf, src, 2 * MAX_FILTER_SIZE * MAX_FILTER_SIZE * sizeof(float), 0, cudaMemcpyDeviceToDevice, stream); } //------------------------------------------------------------------------ // Coordinate spaces: // - Relative to input tensor: inX, inY, tileInX, tileInY // - Relative to input tile: relInX, relInY, tileInW, tileInH // - Relative to upsampled tile: relUpX, relUpY, tileUpW, tileUpH // - Relative to output tile: relOutX, relOutY, tileOutW, tileOutH // - Relative to output tensor: outX, outY, tileOutX, tileOutY // // Relationships between coordinate spaces: // - inX = tileInX + relInX // - inY = tileInY + relInY // - relUpX = relInX * up + phaseInX // - relUpY = relInY * up + phaseInY // - relUpX = relOutX * down // - relUpY = relOutY * down // - outX = tileOutX + relOutX // - outY = tileOutY + relOutY extern __shared__ char s_buf_raw[]; // When sharedKB <= 48, allocate shared memory statically inside the kernel, otherwise use the externally allocated shared memory buffer. template <class T, class index_t, int sharedKB, bool signWrite, bool signRead, int filterMode, int up, int fuSize, int down, int fdSize, int tileOutW, int tileOutH, int threadsPerBlock, bool enableXrep, bool enableWriteSkip> static __global__ void filtered_lrelu_kernel(filtered_lrelu_kernel_params p) { // Check that we don't try to support non-existing filter modes. static_assert(up == 1 || up == 2 || up == 4, "only up=1, up=2, up=4 scales supported"); static_assert(down == 1 || down == 2 || down == 4, "only down=1, down=2, down=4 scales supported"); static_assert(fuSize >= up, "upsampling filter size must be at least upsampling factor"); static_assert(fdSize >= down, "downsampling filter size must be at least downsampling factor"); static_assert(fuSize % up == 0, "upsampling filter size must be divisible with upsampling factor"); static_assert(fdSize % down == 0, "downsampling filter size must be divisible with downsampling factor"); static_assert(fuSize <= MAX_FILTER_SIZE && fdSize <= MAX_FILTER_SIZE, "filter size greater than MAX_FILTER_SIZE"); static_assert(up != 1 || (fuSize == 1 && (filterMode == MODE_FUFD || filterMode == MODE_FUSD)), "up=1 supported only for 1x1 full filters"); static_assert(down != 1 || (fdSize == 1 && (filterMode == MODE_FUFD || filterMode == MODE_SUFD)), "down=1 supported only for 1x1 full filters"); static_assert(!(up == 4 && (filterMode == MODE_FUFD || filterMode == MODE_FUSD)), "full filters not supported for up=4"); static_assert(!(down == 4 && (filterMode == MODE_FUFD || filterMode == MODE_SUFD)), "full filters not supported for down=4"); // Static definitions. typedef typename InternalType<T>::scalar_t scalar_t; typedef typename InternalType<T>::vec2_t vec2_t; typedef typename InternalType<T>::vec4_t vec4_t; const int tileUpW = (tileOutW * down + (fdSize - 1) - (down - 1) + 3) & ~3; // Upsampled tile width, rounded up to multiple of 4. const int tileUpH = tileOutH * down + (fdSize - 1) - (down - 1); // Upsampled tile height. const int tileInW = CEIL_DIV(tileUpW + (fuSize - 1), up); // Input tile width. const int tileInH = CEIL_DIV(tileUpH + (fuSize - 1), up); // Input tile height. const int tileUpH_up = CEIL_DIV(tileUpH, up) * up; // Upsampled tile height rounded up to a multiple of up. const int tileInH_up = CEIL_DIV(tileUpH_up + (fuSize - 1), up); // For allocations only, to avoid shared memory read overruns with up=2 and up=4. // Merge 1x1 downsampling into last upsampling step for upf1 and ups2. const bool downInline = (down == 1) && ((up == 1 && filterMode == MODE_FUFD) || (up == 2 && filterMode == MODE_SUFD)); // Sizes of logical buffers. const int szIn = tileInH_up * tileInW; const int szUpX = tileInH_up * tileUpW; const int szUpXY = downInline ? 0 : (tileUpH * tileUpW); const int szDownX = tileUpH * tileOutW; // Sizes for shared memory arrays. const int s_buf0_size_base = (filterMode == MODE_SUSD) ? MAX(szIn, szUpXY) : (filterMode == MODE_FUSD) ? MAX(szIn, szDownX) : (filterMode == MODE_SUFD) ? MAX(szIn, szUpXY) : (filterMode == MODE_FUFD) ? szIn : -1; const int s_buf1_size_base = (filterMode == MODE_SUSD) ? MAX(szUpX, szDownX) : (filterMode == MODE_FUSD) ? szUpXY : (filterMode == MODE_SUFD) ? szUpX : (filterMode == MODE_FUFD) ? szUpXY : -1; // Ensure U128 alignment. const int s_buf0_size = (s_buf0_size_base + 3) & ~3; const int s_buf1_size = (s_buf1_size_base + 3) & ~3; // Check at compile time that we don't use too much shared memory. static_assert((s_buf0_size + s_buf1_size) * sizeof(scalar_t) <= (sharedKB << 10), "shared memory overflow"); // Declare shared memory arrays. scalar_t* s_buf0; scalar_t* s_buf1; if (sharedKB <= 48) { // Allocate shared memory arrays here. __shared__ scalar_t s_buf0_st[(sharedKB > 48) ? (1<<24) : (s_buf0_size + s_buf1_size)]; // Prevent launching if this isn't optimized away when unused. s_buf0 = s_buf0_st; s_buf1 = s_buf0 + s_buf0_size; } else { // Use the dynamically allocated shared memory array. s_buf0 = (scalar_t*)s_buf_raw; s_buf1 = s_buf0 + s_buf0_size; } // Pointers to the buffers. scalar_t* s_tileIn; // Input tile: [relInX * tileInH + relInY] scalar_t* s_tileUpX; // After horizontal upsampling: [relInY * tileUpW + relUpX] scalar_t* s_tileUpXY; // After upsampling: [relUpY * tileUpW + relUpX] scalar_t* s_tileDownX; // After horizontal downsampling: [relUpY * tileOutW + relOutX] if (filterMode == MODE_SUSD) { s_tileIn = s_buf0; s_tileUpX = s_buf1; s_tileUpXY = s_buf0; s_tileDownX = s_buf1; } else if (filterMode == MODE_FUSD) { s_tileIn = s_buf0; s_tileUpXY = s_buf1; s_tileDownX = s_buf0; } else if (filterMode == MODE_SUFD) { s_tileIn = s_buf0; s_tileUpX = s_buf1; s_tileUpXY = s_buf0; } else if (filterMode == MODE_FUFD) { s_tileIn = s_buf0; s_tileUpXY = s_buf1; } // Allow large grids in z direction via per-launch offset. int channelIdx = blockIdx.z + p.blockZofs; int batchIdx = channelIdx / p.yShape.z; channelIdx -= batchIdx * p.yShape.z; // Offset to output feature map. In bytes. index_t mapOfsOut = channelIdx * get_stride<index_t>(p.yStride.z) + batchIdx * get_stride<index_t>(p.yStride.w); // Sign shift amount. uint32_t signXo = ((threadIdx.x + p.sOfs.x) << 1) & 6; // Inner tile loop. #pragma unroll 1 for (int tileIdx = 0; !enableXrep || (tileIdx < MIN(p.tilesXrep, p.tilesXdim - p.tilesXrep * blockIdx.y)); tileIdx++) { // Locate output tile. int tileX = enableXrep ? blockIdx.y * p.tilesXrep + tileIdx : blockIdx.x; int tileOutX = tileX * tileOutW; int tileOutY = (enableXrep ? blockIdx.x : blockIdx.y) * tileOutH; // Locate input tile. int tmpX = tileOutX * down - p.pad0.x; int tmpY = tileOutY * down - p.pad0.y; int tileInX = CEIL_DIV(tmpX, up); int tileInY = CEIL_DIV(tmpY, up); const int phaseInX = tileInX * up - tmpX; const int phaseInY = tileInY * up - tmpY; // Extra sync if input and output buffers are the same and we are not on first tile. if (enableXrep && tileIdx > 0 && (filterMode == MODE_FUSD || (filterMode == MODE_SUFD && !downInline) || (filterMode == MODE_FUFD && downInline))) __syncthreads(); // Load input tile & apply bias. Unrolled. scalar_t b = (scalar_t)*(const T*)((const char*)p.b + (channelIdx * get_stride<index_t>(p.bStride))); index_t mapOfsIn = channelIdx * get_stride<index_t>(p.xStride.z) + batchIdx * get_stride<index_t>(p.xStride.w); int idx = threadIdx.x; const int loopCountIN = CEIL_DIV(tileInW * tileInH, threadsPerBlock); #pragma unroll for (int loop = 0; loop < loopCountIN; loop++) { int relInX, relInY; fast_div_mod<tileInW>(relInX, relInY, idx); int inX = tileInX + relInX; int inY = tileInY + relInY; scalar_t v = 0; if ((uint32_t)inX < p.xShape.x && (uint32_t)inY < p.xShape.y) v = (scalar_t)*((const T*)((const char*)p.x + (inX * get_stride<index_t>(p.xStride.x) + inY * get_stride<index_t>(p.xStride.y) + mapOfsIn))) + b; bool skip = (loop == loopCountIN-1) && (idx >= tileInW * tileInH); if (!skip) s_tileIn[idx] = v; idx += threadsPerBlock; } if (filterMode == MODE_SUSD || filterMode == MODE_SUFD) // Separable upsampling filter. { // Horizontal upsampling. __syncthreads(); if (up == 4) { for (int idx = threadIdx.x*up; idx < tileUpW * tileInH; idx += blockDim.x*up) { int relUpX0, relInY; fast_div_mod<tileUpW>(relUpX0, relInY, idx); int relInX0 = relUpX0 / up; int src0 = relInX0 + tileInW * relInY; int dst = relInY * tileUpW + relUpX0; vec4_t v = InternalType<T>::zero_vec4(); scalar_t a = s_tileIn[src0]; if (phaseInX == 0) { #pragma unroll for (int step = 0; step < fuSize / up; step++) { v.x += a * (scalar_t)c_fu[step * up + 0]; a = s_tileIn[src0 + step + 1]; v.y += a * (scalar_t)c_fu[step * up + 3]; v.z += a * (scalar_t)c_fu[step * up + 2]; v.w += a * (scalar_t)c_fu[step * up + 1]; } } else if (phaseInX == 1) { #pragma unroll for (int step = 0; step < fuSize / up; step++) { v.x += a * (scalar_t)c_fu[step * up + 1]; v.y += a * (scalar_t)c_fu[step * up + 0]; a = s_tileIn[src0 + step + 1]; v.z += a * (scalar_t)c_fu[step * up + 3]; v.w += a * (scalar_t)c_fu[step * up + 2]; } } else if (phaseInX == 2) { #pragma unroll for (int step = 0; step < fuSize / up; step++) { v.x += a * (scalar_t)c_fu[step * up + 2]; v.y += a * (scalar_t)c_fu[step * up + 1]; v.z += a * (scalar_t)c_fu[step * up + 0]; a = s_tileIn[src0 + step + 1]; v.w += a * (scalar_t)c_fu[step * up + 3]; } } else // (phaseInX == 3) { #pragma unroll for (int step = 0; step < fuSize / up; step++) { v.x += a * (scalar_t)c_fu[step * up + 3]; v.y += a * (scalar_t)c_fu[step * up + 2]; v.z += a * (scalar_t)c_fu[step * up + 1]; v.w += a * (scalar_t)c_fu[step * up + 0]; a = s_tileIn[src0 + step + 1]; } } s_tileUpX[dst+0] = v.x; s_tileUpX[dst+1] = v.y; s_tileUpX[dst+2] = v.z; s_tileUpX[dst+3] = v.w; } } else if (up == 2) { bool p0 = (phaseInX == 0); for (int idx = threadIdx.x*up; idx < tileUpW * tileInH; idx += blockDim.x*up) { int relUpX0, relInY; fast_div_mod<tileUpW>(relUpX0, relInY, idx); int relInX0 = relUpX0 / up; int src0 = relInX0 + tileInW * relInY; int dst = relInY * tileUpW + relUpX0; vec2_t v = InternalType<T>::zero_vec2(); scalar_t a = s_tileIn[src0]; if (p0) // (phaseInX == 0) { #pragma unroll for (int step = 0; step < fuSize / up; step++) { v.x += a * (scalar_t)c_fu[step * up + 0]; a = s_tileIn[src0 + step + 1]; v.y += a * (scalar_t)c_fu[step * up + 1]; } } else // (phaseInX == 1) { #pragma unroll for (int step = 0; step < fuSize / up; step++) { v.x += a * (scalar_t)c_fu[step * up + 1]; v.y += a * (scalar_t)c_fu[step * up + 0]; a = s_tileIn[src0 + step + 1]; } } s_tileUpX[dst+0] = v.x; s_tileUpX[dst+1] = v.y; } } // Vertical upsampling & nonlinearity. __syncthreads(); int groupMask = 15 << ((threadIdx.x & 31) & ~3); int minY = tileOutY ? (tileOutY - tileOutH) * down + tileUpH : 0; // Skip already written signs. int sShapeMaxY = MIN(p.sShape.y, tileOutY * down + tileUpH); // Avoid out-of-tile sign writes. if (up == 4) { minY -= 3; // Adjust according to block height. for (int idx = threadIdx.x; idx < tileUpW * tileUpH_up / up; idx += blockDim.x) { int relUpX, relInY0; fast_div_mod<tileUpW>(relUpX, relInY0, idx); int relUpY0 = relInY0 * up; int src0 = relInY0 * tileUpW + relUpX; int dst = relUpY0 * tileUpW + relUpX; vec4_t v = InternalType<T>::zero_vec4(); scalar_t a = s_tileUpX[src0]; if (phaseInY == 0) { #pragma unroll for (int step = 0; step < fuSize / up; step++) { v.x += a * (scalar_t)c_fu[step * up + 0]; a = s_tileUpX[src0 + (step + 1) * tileUpW]; v.y += a * (scalar_t)c_fu[step * up + 3]; v.z += a * (scalar_t)c_fu[step * up + 2]; v.w += a * (scalar_t)c_fu[step * up + 1]; } } else if (phaseInY == 1) { #pragma unroll for (int step = 0; step < fuSize / up; step++) { v.x += a * (scalar_t)c_fu[step * up + 1]; v.y += a * (scalar_t)c_fu[step * up + 0]; a = s_tileUpX[src0 + (step + 1) * tileUpW]; v.z += a * (scalar_t)c_fu[step * up + 3]; v.w += a * (scalar_t)c_fu[step * up + 2]; } } else if (phaseInY == 2) { #pragma unroll for (int step = 0; step < fuSize / up; step++) { v.x += a * (scalar_t)c_fu[step * up + 2]; v.y += a * (scalar_t)c_fu[step * up + 1]; v.z += a * (scalar_t)c_fu[step * up + 0]; a = s_tileUpX[src0 + (step + 1) * tileUpW]; v.w += a * (scalar_t)c_fu[step * up + 3]; } } else // (phaseInY == 3) { #pragma unroll for (int step = 0; step < fuSize / up; step++) { v.x += a * (scalar_t)c_fu[step * up + 3]; v.y += a * (scalar_t)c_fu[step * up + 2]; v.z += a * (scalar_t)c_fu[step * up + 1]; v.w += a * (scalar_t)c_fu[step * up + 0]; a = s_tileUpX[src0 + (step + 1) * tileUpW]; } } int x = tileOutX * down + relUpX; int y = tileOutY * down + relUpY0; int signX = x + p.sOfs.x; int signY = y + p.sOfs.y; int signZ = blockIdx.z + p.blockZofs; int signXb = signX >> 2; index_t si0 = signXb + p.sShape.x * (signY + (index_t)p.sShape.y * signZ); index_t si1 = si0 + p.sShape.x; index_t si2 = si0 + p.sShape.x * 2; index_t si3 = si0 + p.sShape.x * 3; v.x *= (scalar_t)((float)up * (float)up * p.gain); v.y *= (scalar_t)((float)up * (float)up * p.gain); v.z *= (scalar_t)((float)up * (float)up * p.gain); v.w *= (scalar_t)((float)up * (float)up * p.gain); if (signWrite) { if (!enableWriteSkip) { // Determine and write signs. int sx = __float_as_uint(v.x) >> 31 << 0; int sy = __float_as_uint(v.y) >> 31 << 8; int sz = __float_as_uint(v.z) >> 31 << 16; int sw = __float_as_uint(v.w) >> 31 << 24; if (sx) v.x *= p.slope; if (sy) v.y *= p.slope; if (sz) v.z *= p.slope; if (sw) v.w *= p.slope; if (fabsf(v.x) > p.clamp) { sx = 2 << 0; v.x = InternalType<T>::clamp(v.x, p.clamp); } if (fabsf(v.y) > p.clamp) { sy = 2 << 8; v.y = InternalType<T>::clamp(v.y, p.clamp); } if (fabsf(v.z) > p.clamp) { sz = 2 << 16; v.z = InternalType<T>::clamp(v.z, p.clamp); } if (fabsf(v.w) > p.clamp) { sw = 2 << 24; v.w = InternalType<T>::clamp(v.w, p.clamp); } if ((uint32_t)signXb < p.swLimit && signY >= minY) { // Combine signs. uint32_t s = sx + sy + sw + sz; s <<= (signX & 3) << 1; s |= __shfl_xor_sync(groupMask, s, 1); s |= __shfl_xor_sync(groupMask, s, 2); // Write signs. if ((uint32_t)(signY + 0) < sShapeMaxY) { p.s[si0] = (unsigned char)(s >> 0); } if ((uint32_t)(signY + 1) < sShapeMaxY) { p.s[si1] = (unsigned char)(s >> 8); } if ((uint32_t)(signY + 2) < sShapeMaxY) { p.s[si2] = (unsigned char)(s >> 16); } if ((uint32_t)(signY + 3) < sShapeMaxY) { p.s[si3] = (unsigned char)(s >> 24); } } } else { // Determine and write signs. if ((uint32_t)signXb < p.swLimit && signY >= minY) { int sx = __float_as_uint(v.x) >> 31 << 0; int sy = __float_as_uint(v.y) >> 31 << 8; int sz = __float_as_uint(v.z) >> 31 << 16; int sw = __float_as_uint(v.w) >> 31 << 24; if (sx) v.x *= p.slope; if (sy) v.y *= p.slope; if (sz) v.z *= p.slope; if (sw) v.w *= p.slope; if (fabsf(v.x) > p.clamp) { sx = 2 << 0; v.x = InternalType<T>::clamp(v.x, p.clamp); } if (fabsf(v.y) > p.clamp) { sy = 2 << 8; v.y = InternalType<T>::clamp(v.y, p.clamp); } if (fabsf(v.z) > p.clamp) { sz = 2 << 16; v.z = InternalType<T>::clamp(v.z, p.clamp); } if (fabsf(v.w) > p.clamp) { sw = 2 << 24; v.w = InternalType<T>::clamp(v.w, p.clamp); } // Combine signs. uint32_t s = sx + sy + sw + sz; s <<= (signX & 3) << 1; s |= __shfl_xor_sync(groupMask, s, 1); s |= __shfl_xor_sync(groupMask, s, 2); // Write signs. if ((uint32_t)(signY + 0) < sShapeMaxY) { p.s[si0] = (unsigned char)(s >> 0); } if ((uint32_t)(signY + 1) < sShapeMaxY) { p.s[si1] = (unsigned char)(s >> 8); } if ((uint32_t)(signY + 2) < sShapeMaxY) { p.s[si2] = (unsigned char)(s >> 16); } if ((uint32_t)(signY + 3) < sShapeMaxY) { p.s[si3] = (unsigned char)(s >> 24); } } else { // Just compute the values. if (v.x < 0.f) v.x *= p.slope; v.x = InternalType<T>::clamp(v.x, p.clamp); if (v.y < 0.f) v.y *= p.slope; v.y = InternalType<T>::clamp(v.y, p.clamp); if (v.z < 0.f) v.z *= p.slope; v.z = InternalType<T>::clamp(v.z, p.clamp); if (v.w < 0.f) v.w *= p.slope; v.w = InternalType<T>::clamp(v.w, p.clamp); } } } else if (signRead) // Read signs and apply. { if ((uint32_t)signXb < p.swLimit) { int ss = (signX & 3) << 1; if ((uint32_t)(signY + 0) < p.sShape.y) { int s = p.s[si0] >> ss; if (s & 1) v.x *= p.slope; if (s & 2) v.x = 0.f; } if ((uint32_t)(signY + 1) < p.sShape.y) { int s = p.s[si1] >> ss; if (s & 1) v.y *= p.slope; if (s & 2) v.y = 0.f; } if ((uint32_t)(signY + 2) < p.sShape.y) { int s = p.s[si2] >> ss; if (s & 1) v.z *= p.slope; if (s & 2) v.z = 0.f; } if ((uint32_t)(signY + 3) < p.sShape.y) { int s = p.s[si3] >> ss; if (s & 1) v.w *= p.slope; if (s & 2) v.w = 0.f; } } } else // Forward pass with no sign write. { if (v.x < 0.f) v.x *= p.slope; v.x = InternalType<T>::clamp(v.x, p.clamp); if (v.y < 0.f) v.y *= p.slope; v.y = InternalType<T>::clamp(v.y, p.clamp); if (v.z < 0.f) v.z *= p.slope; v.z = InternalType<T>::clamp(v.z, p.clamp); if (v.w < 0.f) v.w *= p.slope; v.w = InternalType<T>::clamp(v.w, p.clamp); } s_tileUpXY[dst + 0 * tileUpW] = v.x; if (relUpY0 + 1 < tileUpH) s_tileUpXY[dst + 1 * tileUpW] = v.y; if (relUpY0 + 2 < tileUpH) s_tileUpXY[dst + 2 * tileUpW] = v.z; if (relUpY0 + 3 < tileUpH) s_tileUpXY[dst + 3 * tileUpW] = v.w; } } else if (up == 2) { minY -= 1; // Adjust according to block height. for (int idx = threadIdx.x; idx < tileUpW * tileUpH_up / up; idx += blockDim.x) { int relUpX, relInY0; fast_div_mod<tileUpW>(relUpX, relInY0, idx); int relUpY0 = relInY0 * up; int src0 = relInY0 * tileUpW + relUpX; int dst = relUpY0 * tileUpW + relUpX; vec2_t v = InternalType<T>::zero_vec2(); scalar_t a = s_tileUpX[src0]; if (phaseInY == 0) { #pragma unroll for (int step = 0; step < fuSize / up; step++) { v.x += a * (scalar_t)c_fu[step * up + 0]; a = s_tileUpX[src0 + (step + 1) * tileUpW]; v.y += a * (scalar_t)c_fu[step * up + 1]; } } else // (phaseInY == 1) { #pragma unroll for (int step = 0; step < fuSize / up; step++) { v.x += a * (scalar_t)c_fu[step * up + 1]; v.y += a * (scalar_t)c_fu[step * up + 0]; a = s_tileUpX[src0 + (step + 1) * tileUpW]; } } int x = tileOutX * down + relUpX; int y = tileOutY * down + relUpY0; int signX = x + p.sOfs.x; int signY = y + p.sOfs.y; int signZ = blockIdx.z + p.blockZofs; int signXb = signX >> 2; index_t si0 = signXb + p.sShape.x * (signY + (index_t)p.sShape.y * signZ); index_t si1 = si0 + p.sShape.x; v.x *= (scalar_t)((float)up * (float)up * p.gain); v.y *= (scalar_t)((float)up * (float)up * p.gain); if (signWrite) { if (!enableWriteSkip) { // Determine and write signs. int sx = __float_as_uint(v.x) >> 31 << 0; int sy = __float_as_uint(v.y) >> 31 << 8; if (sx) v.x *= p.slope; if (sy) v.y *= p.slope; if (fabsf(v.x) > p.clamp) { sx = 2 << 0; v.x = InternalType<T>::clamp(v.x, p.clamp); } if (fabsf(v.y) > p.clamp) { sy = 2 << 8; v.y = InternalType<T>::clamp(v.y, p.clamp); } if ((uint32_t)signXb < p.swLimit && signY >= minY) { // Combine signs. int s = sx + sy; s <<= signXo; s |= __shfl_xor_sync(groupMask, s, 1); s |= __shfl_xor_sync(groupMask, s, 2); // Write signs. if ((uint32_t)(signY + 0) < sShapeMaxY) { p.s[si0] = (unsigned char)(s >> 0); } if ((uint32_t)(signY + 1) < sShapeMaxY) { p.s[si1] = (unsigned char)(s >> 8); } } } else { // Determine and write signs. if ((uint32_t)signXb < p.swLimit && signY >= minY) { int sx = __float_as_uint(v.x) >> 31 << 0; int sy = __float_as_uint(v.y) >> 31 << 8; if (sx) v.x *= p.slope; if (sy) v.y *= p.slope; if (fabsf(v.x) > p.clamp) { sx = 2 << 0; v.x = InternalType<T>::clamp(v.x, p.clamp); } if (fabsf(v.y) > p.clamp) { sy = 2 << 8; v.y = InternalType<T>::clamp(v.y, p.clamp); } // Combine signs. int s = sx + sy; s <<= signXo; s |= __shfl_xor_sync(groupMask, s, 1); s |= __shfl_xor_sync(groupMask, s, 2); // Write signs. if ((uint32_t)(signY + 0) < sShapeMaxY) { p.s[si0] = (unsigned char)(s >> 0); } if ((uint32_t)(signY + 1) < sShapeMaxY) { p.s[si1] = (unsigned char)(s >> 8); } } else { // Just compute the values. if (v.x < 0.f) v.x *= p.slope; v.x = InternalType<T>::clamp(v.x, p.clamp); if (v.y < 0.f) v.y *= p.slope; v.y = InternalType<T>::clamp(v.y, p.clamp); } } } else if (signRead) // Read signs and apply. { if ((uint32_t)signXb < p.swLimit) { if ((uint32_t)(signY + 0) < p.sShape.y) { int s = p.s[si0] >> signXo; if (s & 1) v.x *= p.slope; if (s & 2) v.x = 0.f; } if ((uint32_t)(signY + 1) < p.sShape.y) { int s = p.s[si1] >> signXo; if (s & 1) v.y *= p.slope; if (s & 2) v.y = 0.f; } } } else // Forward pass with no sign write. { if (v.x < 0.f) v.x *= p.slope; v.x = InternalType<T>::clamp(v.x, p.clamp); if (v.y < 0.f) v.y *= p.slope; v.y = InternalType<T>::clamp(v.y, p.clamp); } if (!downInline) { // Write into temporary buffer. s_tileUpXY[dst] = v.x; if (relUpY0 < tileUpH - 1) s_tileUpXY[dst + tileUpW] = v.y; } else { // Write directly into output buffer. if ((uint32_t)x < p.yShape.x) { int ymax = MIN(p.yShape.y, tileUpH + tileOutY * down); index_t ofs = x * get_stride<index_t>(p.yStride.x) + y * get_stride<index_t>(p.yStride.y) + mapOfsOut; if ((uint32_t)y + 0 < p.yShape.y) *((T*)((char*)p.y + ofs)) = (T)(v.x * (scalar_t)c_fd[0]); if ((uint32_t)y + 1 < ymax) *((T*)((char*)p.y + ofs + get_stride<index_t>(p.yStride.y))) = (T)(v.y * (scalar_t)c_fd[0]); } } } } } else if (filterMode == MODE_FUSD || filterMode == MODE_FUFD) { // Full upsampling filter. if (up == 2) { // 2 x 2-wide. __syncthreads(); int minY = tileOutY ? (tileOutY - tileOutH) * down + tileUpH + p.sOfs.y : 0; // Skip already written signs. for (int idx = threadIdx.x * 4; idx < tileUpW * tileUpH; idx += blockDim.x * 4) { int relUpX0, relUpY0; fast_div_mod<tileUpW>(relUpX0, relUpY0, idx); int relInX0 = CEIL_DIV(relUpX0 - phaseInX, up); int relInY0 = CEIL_DIV(relUpY0 - phaseInY, up); int src0 = relInX0 + tileInW * relInY0; int tap0y = (relInY0 * up + phaseInY - relUpY0); #define X_LOOP(TAPY, PX) \ for (int sx = 0; sx < fuSize / up; sx++) \ { \ v.x += a * (scalar_t)c_fu[(sx * up + (((PX) - 0) & (up - 1))) + (sy * up + (TAPY)) * MAX_FILTER_SIZE]; \ v.z += b * (scalar_t)c_fu[(sx * up + (((PX) - 0) & (up - 1))) + (sy * up + (TAPY)) * MAX_FILTER_SIZE]; if ((PX) == 0) { a = b; b = s_tileIn[src0 + 2 + sx + sy * tileInW]; } \ v.y += a * (scalar_t)c_fu[(sx * up + (((PX) - 1) & (up - 1))) + (sy * up + (TAPY)) * MAX_FILTER_SIZE]; \ v.w += b * (scalar_t)c_fu[(sx * up + (((PX) - 1) & (up - 1))) + (sy * up + (TAPY)) * MAX_FILTER_SIZE]; if ((PX) == 1) { a = b; b = s_tileIn[src0 + 2 + sx + sy * tileInW]; } \ } vec4_t v = InternalType<T>::zero_vec4(); if (tap0y == 0 && phaseInX == 0) #pragma unroll for (int sy = 0; sy < fuSize / up; sy++) { scalar_t a = s_tileIn[src0 + sy * tileInW]; scalar_t b = s_tileIn[src0 + sy * tileInW + 1]; #pragma unroll X_LOOP(0, 0) } if (tap0y == 0 && phaseInX == 1) #pragma unroll for (int sy = 0; sy < fuSize / up; sy++) { scalar_t a = s_tileIn[src0 + sy * tileInW]; scalar_t b = s_tileIn[src0 + sy * tileInW + 1]; #pragma unroll X_LOOP(0, 1) } if (tap0y == 1 && phaseInX == 0) #pragma unroll for (int sy = 0; sy < fuSize / up; sy++) { scalar_t a = s_tileIn[src0 + sy * tileInW]; scalar_t b = s_tileIn[src0 + sy * tileInW + 1]; #pragma unroll X_LOOP(1, 0) } if (tap0y == 1 && phaseInX == 1) #pragma unroll for (int sy = 0; sy < fuSize / up; sy++) { scalar_t a = s_tileIn[src0 + sy * tileInW]; scalar_t b = s_tileIn[src0 + sy * tileInW + 1]; #pragma unroll X_LOOP(1, 1) } #undef X_LOOP int x = tileOutX * down + relUpX0; int y = tileOutY * down + relUpY0; int signX = x + p.sOfs.x; int signY = y + p.sOfs.y; int signZ = blockIdx.z + p.blockZofs; int signXb = signX >> 2; index_t si = signXb + p.sShape.x * (signY + (index_t)p.sShape.y * signZ); v.x *= (scalar_t)((float)up * (float)up * p.gain); v.y *= (scalar_t)((float)up * (float)up * p.gain); v.z *= (scalar_t)((float)up * (float)up * p.gain); v.w *= (scalar_t)((float)up * (float)up * p.gain); if (signWrite) { if (!enableWriteSkip) { // Determine and write signs. int sx = __float_as_uint(v.x) >> 31; int sy = __float_as_uint(v.y) >> 31; int sz = __float_as_uint(v.z) >> 31; int sw = __float_as_uint(v.w) >> 31; if (sx) v.x *= p.slope; if (fabsf(v.x) > p.clamp) { sx = 2; v.x = InternalType<T>::clamp(v.x, p.clamp); } if (sy) v.y *= p.slope; if (fabsf(v.y) > p.clamp) { sy = 2; v.y = InternalType<T>::clamp(v.y, p.clamp); } if (sz) v.z *= p.slope; if (fabsf(v.z) > p.clamp) { sz = 2; v.z = InternalType<T>::clamp(v.z, p.clamp); } if (sw) v.w *= p.slope; if (fabsf(v.w) > p.clamp) { sw = 2; v.w = InternalType<T>::clamp(v.w, p.clamp); } if ((uint32_t)signXb < p.swLimit && (uint32_t)signY < p.sShape.y && signY >= minY) { p.s[si] = sx + (sy << 2) + (sz << 4) + (sw << 6); } } else { // Determine and write signs. if ((uint32_t)signXb < p.swLimit && (uint32_t)signY < p.sShape.y && signY >= minY) { int sx = __float_as_uint(v.x) >> 31; int sy = __float_as_uint(v.y) >> 31; int sz = __float_as_uint(v.z) >> 31; int sw = __float_as_uint(v.w) >> 31; if (sx) v.x *= p.slope; if (fabsf(v.x) > p.clamp) { sx = 2; v.x = InternalType<T>::clamp(v.x, p.clamp); } if (sy) v.y *= p.slope; if (fabsf(v.y) > p.clamp) { sy = 2; v.y = InternalType<T>::clamp(v.y, p.clamp); } if (sz) v.z *= p.slope; if (fabsf(v.z) > p.clamp) { sz = 2; v.z = InternalType<T>::clamp(v.z, p.clamp); } if (sw) v.w *= p.slope; if (fabsf(v.w) > p.clamp) { sw = 2; v.w = InternalType<T>::clamp(v.w, p.clamp); } p.s[si] = sx + (sy << 2) + (sz << 4) + (sw << 6); } else { // Just compute the values. if (v.x < 0.f) v.x *= p.slope; v.x = InternalType<T>::clamp(v.x, p.clamp); if (v.y < 0.f) v.y *= p.slope; v.y = InternalType<T>::clamp(v.y, p.clamp); if (v.z < 0.f) v.z *= p.slope; v.z = InternalType<T>::clamp(v.z, p.clamp); if (v.w < 0.f) v.w *= p.slope; v.w = InternalType<T>::clamp(v.w, p.clamp); } } } else if (signRead) // Read sign and apply. { if ((uint32_t)signY < p.sShape.y) { int s = 0; if ((uint32_t)signXb < p.swLimit) s = p.s[si]; if ((uint32_t)signXb + 1 < p.swLimit) s |= p.s[si + 1] << 8; s >>= (signX & 3) << 1; if (s & 0x01) v.x *= p.slope; if (s & 0x02) v.x = 0.f; if (s & 0x04) v.y *= p.slope; if (s & 0x08) v.y = 0.f; if (s & 0x10) v.z *= p.slope; if (s & 0x20) v.z = 0.f; if (s & 0x40) v.w *= p.slope; if (s & 0x80) v.w = 0.f; } } else // Forward pass with no sign write. { if (v.x < 0.f) v.x *= p.slope; v.x = InternalType<T>::clamp(v.x, p.clamp); if (v.y < 0.f) v.y *= p.slope; v.y = InternalType<T>::clamp(v.y, p.clamp); if (v.z < 0.f) v.z *= p.slope; v.z = InternalType<T>::clamp(v.z, p.clamp); if (v.w < 0.f) v.w *= p.slope; v.w = InternalType<T>::clamp(v.w, p.clamp); } s_tileUpXY[idx + 0] = v.x; s_tileUpXY[idx + 1] = v.y; s_tileUpXY[idx + 2] = v.z; s_tileUpXY[idx + 3] = v.w; } } else if (up == 1) { __syncthreads(); uint32_t groupMask = 15 << ((threadIdx.x & 31) & ~3); int minY = tileOutY ? (tileOutY - tileOutH) * down + tileUpH : 0; // Skip already written signs. for (int idx = threadIdx.x; idx < tileUpW * tileUpH; idx += blockDim.x) { int relUpX0, relUpY0; fast_div_mod<tileUpW>(relUpX0, relUpY0, idx); scalar_t v = s_tileIn[idx] * (scalar_t)c_fu[0]; // 1x1 filter. int x = tileOutX * down + relUpX0; int y = tileOutY * down + relUpY0; int signX = x + p.sOfs.x; int signY = y + p.sOfs.y; int signZ = blockIdx.z + p.blockZofs; int signXb = signX >> 2; index_t si = signXb + p.sShape.x * (signY + (index_t)p.sShape.y * signZ); v *= (scalar_t)((float)up * (float)up * p.gain); if (signWrite) { if (!enableWriteSkip) { // Determine and write sign. uint32_t s = 0; uint32_t signXbit = (1u << signXo); if (v < 0.f) { s = signXbit; v *= p.slope; } if (fabsf(v) > p.clamp) { s = signXbit * 2; v = InternalType<T>::clamp(v, p.clamp); } if ((uint32_t)signXb < p.swLimit && (uint32_t)signY < p.sShape.y && signY >= minY) { s += __shfl_xor_sync(groupMask, s, 1); // Coalesce. s += __shfl_xor_sync(groupMask, s, 2); // Coalesce. p.s[si] = s; // Write. } } else { // Determine and write sign. if ((uint32_t)signXb < p.swLimit && (uint32_t)signY < p.sShape.y && signY >= minY) { uint32_t s = 0; uint32_t signXbit = (1u << signXo); if (v < 0.f) { s = signXbit; v *= p.slope; } if (fabsf(v) > p.clamp) { s = signXbit * 2; v = InternalType<T>::clamp(v, p.clamp); } s += __shfl_xor_sync(groupMask, s, 1); // Coalesce. s += __shfl_xor_sync(groupMask, s, 2); // Coalesce. p.s[si] = s; // Write. } else { // Just compute the value. if (v < 0.f) v *= p.slope; v = InternalType<T>::clamp(v, p.clamp); } } } else if (signRead) { // Read sign and apply if within sign tensor bounds. if ((uint32_t)signXb < p.swLimit && (uint32_t)signY < p.sShape.y) { int s = p.s[si]; s >>= signXo; if (s & 1) v *= p.slope; if (s & 2) v = 0.f; } } else // Forward pass with no sign write. { if (v < 0.f) v *= p.slope; v = InternalType<T>::clamp(v, p.clamp); } if (!downInline) // Write into temporary buffer. s_tileUpXY[idx] = v; else if ((uint32_t)x < p.yShape.x && (uint32_t)y < p.yShape.y) // Write directly into output buffer *((T*)((char*)p.y + (x * get_stride<index_t>(p.yStride.x) + y * get_stride<index_t>(p.yStride.y) + mapOfsOut))) = (T)(v * (scalar_t)c_fd[0]); } } } // Downsampling. if (filterMode == MODE_SUSD || filterMode == MODE_FUSD) { // Horizontal downsampling. __syncthreads(); if (down == 4 && tileOutW % 4 == 0) { // Calculate 4 pixels at a time. for (int idx = threadIdx.x * 4; idx < tileOutW * tileUpH; idx += blockDim.x * 4) { int relOutX0, relUpY; fast_div_mod<tileOutW>(relOutX0, relUpY, idx); int relUpX0 = relOutX0 * down; int src0 = relUpY * tileUpW + relUpX0; vec4_t v = InternalType<T>::zero_vec4(); #pragma unroll for (int step = 0; step < fdSize; step++) { v.x += s_tileUpXY[src0 + 0 + step] * (scalar_t)c_fd[step]; v.y += s_tileUpXY[src0 + 4 + step] * (scalar_t)c_fd[step]; v.z += s_tileUpXY[src0 + 8 + step] * (scalar_t)c_fd[step]; v.w += s_tileUpXY[src0 + 12 + step] * (scalar_t)c_fd[step]; } s_tileDownX[idx+0] = v.x; s_tileDownX[idx+1] = v.y; s_tileDownX[idx+2] = v.z; s_tileDownX[idx+3] = v.w; } } else if ((down == 2 || down == 4) && (tileOutW % 2 == 0)) { // Calculate 2 pixels at a time. for (int idx = threadIdx.x * 2; idx < tileOutW * tileUpH; idx += blockDim.x * 2) { int relOutX0, relUpY; fast_div_mod<tileOutW>(relOutX0, relUpY, idx); int relUpX0 = relOutX0 * down; int src0 = relUpY * tileUpW + relUpX0; vec2_t v = InternalType<T>::zero_vec2(); #pragma unroll for (int step = 0; step < fdSize; step++) { v.x += s_tileUpXY[src0 + 0 + step] * (scalar_t)c_fd[step]; v.y += s_tileUpXY[src0 + down + step] * (scalar_t)c_fd[step]; } s_tileDownX[idx+0] = v.x; s_tileDownX[idx+1] = v.y; } } else { // Calculate 1 pixel at a time. for (int idx = threadIdx.x; idx < tileOutW * tileUpH; idx += blockDim.x) { int relOutX0, relUpY; fast_div_mod<tileOutW>(relOutX0, relUpY, idx); int relUpX0 = relOutX0 * down; int src = relUpY * tileUpW + relUpX0; scalar_t v = 0.f; #pragma unroll for (int step = 0; step < fdSize; step++) v += s_tileUpXY[src + step] * (scalar_t)c_fd[step]; s_tileDownX[idx] = v; } } // Vertical downsampling & store output tile. __syncthreads(); for (int idx = threadIdx.x; idx < tileOutW * tileOutH; idx += blockDim.x) { int relOutX, relOutY0; fast_div_mod<tileOutW>(relOutX, relOutY0, idx); int relUpY0 = relOutY0 * down; int src0 = relUpY0 * tileOutW + relOutX; scalar_t v = 0; #pragma unroll for (int step = 0; step < fdSize; step++) v += s_tileDownX[src0 + step * tileOutW] * (scalar_t)c_fd[step]; int outX = tileOutX + relOutX; int outY = tileOutY + relOutY0; if (outX < p.yShape.x & outY < p.yShape.y) *((T*)((char*)p.y + (outX * get_stride<index_t>(p.yStride.x) + outY * get_stride<index_t>(p.yStride.y) + mapOfsOut))) = (T)v; } } else if (filterMode == MODE_SUFD || filterMode == MODE_FUFD) { // Full downsampling filter. if (down == 2) { // 2-wide. __syncthreads(); for (int idx = threadIdx.x * 2; idx < tileOutW * tileOutH; idx += blockDim.x * 2) { int relOutX0, relOutY0; fast_div_mod<tileOutW>(relOutX0, relOutY0, idx); int relUpX0 = relOutX0 * down; int relUpY0 = relOutY0 * down; int src0 = relUpY0 * tileUpW + relUpX0; vec2_t v = InternalType<T>::zero_vec2(); #pragma unroll for (int sy = 0; sy < fdSize; sy++) #pragma unroll for (int sx = 0; sx < fdSize; sx++) { v.x += s_tileUpXY[src0 + 0 + sx + sy * tileUpW] * (scalar_t)c_fd[sx + sy * MAX_FILTER_SIZE]; v.y += s_tileUpXY[src0 + 2 + sx + sy * tileUpW] * (scalar_t)c_fd[sx + sy * MAX_FILTER_SIZE]; } int outX = tileOutX + relOutX0; int outY = tileOutY + relOutY0; if ((uint32_t)outY < p.yShape.y) { index_t ofs = outX * get_stride<index_t>(p.yStride.x) + outY * get_stride<index_t>(p.yStride.y) + mapOfsOut; if (outX + 0 < p.yShape.x) *((T*)((char*)p.y + ofs)) = (T)v.x; if (outX + 1 < p.yShape.x) *((T*)((char*)p.y + ofs + get_stride<index_t>(p.yStride.x))) = (T)v.y; } } } else if (down == 1 && !downInline) { // Thread per pixel. __syncthreads(); for (int idx = threadIdx.x; idx < tileOutW * tileOutH; idx += blockDim.x) { int relOutX0, relOutY0; fast_div_mod<tileOutW>(relOutX0, relOutY0, idx); scalar_t v = s_tileUpXY[idx] * (scalar_t)c_fd[0]; // 1x1 filter. int outX = tileOutX + relOutX0; int outY = tileOutY + relOutY0; if ((uint32_t)outX < p.yShape.x && (uint32_t)outY < p.yShape.y) *((T*)((char*)p.y + (outX * get_stride<index_t>(p.yStride.x) + outY * get_stride<index_t>(p.yStride.y) + mapOfsOut))) = (T)v; } } } if (!enableXrep) break; } } //------------------------------------------------------------------------ // Compute activation function and signs for upsampled data tensor, modifying data tensor in-place. Used for accelerating the generic variant. // Sign tensor is known to be contiguous, and p.x and p.s have the same z, w dimensions. 64-bit indexing is always used. template <class T, bool signWrite, bool signRead> static __global__ void filtered_lrelu_act_kernel(filtered_lrelu_act_kernel_params p) { typedef typename InternalType<T>::scalar_t scalar_t; // Indexing. int32_t x = threadIdx.x + blockIdx.x * blockDim.x; int32_t ymax = signWrite ? p.sShape.y : p.xShape.y; int32_t qmax = p.xShape.z * p.xShape.w; // Combined minibatch*channel maximum index. // Loop to accommodate oversized tensors. for (int32_t q = blockIdx.z; q < qmax; q += gridDim.z) for (int32_t y = blockIdx.y; y < ymax; y += gridDim.y) { // Extract z and w (channel, minibatch index). int32_t w = q / p.xShape.z; int32_t z = q - w * p.xShape.z; // Choose behavior based on sign read/write mode. if (signWrite) { // Process value if in p.x. uint32_t s = 0; if (x < p.xShape.x && y < p.xShape.y) { int64_t ix = x * p.xStride.x + y * p.xStride.y + z * p.xStride.z + w * p.xStride.w; T* pv = ((T*)p.x) + ix; scalar_t v = (scalar_t)(*pv); // Gain, LReLU, clamp. v *= p.gain; if (v < 0.f) { v *= p.slope; s = 1; // Sign. } if (fabsf(v) > p.clamp) { v = InternalType<T>::clamp(v, p.clamp); s = 2; // Clamp. } *pv = (T)v; // Write value. } // Coalesce into threads 0 and 16 of warp. uint32_t m = (threadIdx.x & 16) ? 0xffff0000u : 0x0000ffffu; s <<= ((threadIdx.x & 15) << 1); // Shift into place. s |= __shfl_xor_sync(m, s, 1); // Distribute. s |= __shfl_xor_sync(m, s, 2); s |= __shfl_xor_sync(m, s, 4); s |= __shfl_xor_sync(m, s, 8); // Write signs if leader and in p.s. if (!(threadIdx.x & 15) && x < p.sShape.x) // y is always in. { uint64_t is = x + p.sShape.x * (y + (int64_t)p.sShape.y * q); // Contiguous. ((uint32_t*)p.s)[is >> 4] = s; } } else if (signRead) { // Process value if in p.x. if (x < p.xShape.x) // y is always in. { int64_t ix = x * p.xStride.x + y * p.xStride.y + z * p.xStride.z + w * p.xStride.w; T* pv = ((T*)p.x) + ix; scalar_t v = (scalar_t)(*pv); v *= p.gain; // Apply sign buffer offset. uint32_t sx = x + p.sOfs.x; uint32_t sy = y + p.sOfs.y; // Read and apply signs if we land inside valid region of sign buffer. if (sx < p.sShape.x && sy < p.sShape.y) { uint64_t is = (sx >> 2) + (p.sShape.x >> 2) * (sy + (uint64_t)p.sShape.y * q); // Contiguous. unsigned char s = p.s[is]; s >>= (sx & 3) << 1; // Shift into place. if (s & 1) // Sign? v *= p.slope; if (s & 2) // Clamp? v = 0.f; } *pv = (T)v; // Write value. } } else { // Forward pass with no sign write. Process value if in p.x. if (x < p.xShape.x) // y is always in. { int64_t ix = x * p.xStride.x + y * p.xStride.y + z * p.xStride.z + w * p.xStride.w; T* pv = ((T*)p.x) + ix; scalar_t v = (scalar_t)(*pv); v *= p.gain; if (v < 0.f) v *= p.slope; if (fabsf(v) > p.clamp) v = InternalType<T>::clamp(v, p.clamp); *pv = (T)v; // Write value. } } } } template <class T, bool signWrite, bool signRead> void* choose_filtered_lrelu_act_kernel(void) { return (void*)filtered_lrelu_act_kernel<T, signWrite, signRead>; } //------------------------------------------------------------------------ // CUDA kernel selection. template <class T, class index_t, bool signWrite, bool signRead> filtered_lrelu_kernel_spec choose_filtered_lrelu_kernel(const filtered_lrelu_kernel_params& p, int sharedKB) { filtered_lrelu_kernel_spec s = { 0 }; // Return the first matching kernel. #define CASE(SH, U, FU, D, FD, MODE, TW, TH, W, XR, WS) \ if (sharedKB >= SH) \ if ((p.fuShape.y == 0 && (MODE == MODE_SUSD || MODE == MODE_SUFD)) || (p.fuShape.y > 0 && (MODE == MODE_FUSD || MODE == MODE_FUFD))) \ if ((p.fdShape.y == 0 && (MODE == MODE_SUSD || MODE == MODE_FUSD)) || (p.fdShape.y > 0 && (MODE == MODE_SUFD || MODE == MODE_FUFD))) \ if (p.up == U && p.fuShape.x <= FU && p.fuShape.y <= FU && p.down == D && p.fdShape.x <= FD && p.fdShape.y <= FD) \ { \ static_assert((D*TW % 4) == 0, "down * tileWidth must be divisible by 4"); \ static_assert(FU % U == 0, "upscaling filter size must be multiple of upscaling factor"); \ static_assert(FD % D == 0, "downscaling filter size must be multiple of downscaling factor"); \ s.setup = (void*)setup_filters_kernel; \ s.exec = (void*)filtered_lrelu_kernel<T, index_t, SH, signWrite, signRead, MODE, U, FU, D, FD, TW, TH, W*32, !!XR, !!WS>; \ s.tileOut = make_int2(TW, TH); \ s.numWarps = W; \ s.xrep = XR; \ s.dynamicSharedKB = (SH == 48) ? 0 : SH; \ return s; \ } // Launch parameters for various kernel specializations. // Small filters must be listed before large filters, otherwise the kernel for larger filter will always match first. // Kernels that use more shared memory must be listed before those that use less, for the same reason. CASE(/*sharedKB*/48, /*up,fu*/1,1, /*down,fd*/1,1, /*mode*/MODE_FUFD, /*tw,th,warps,xrep,wskip*/64, 178, 32, 0, 0) // 1t-upf1-downf1 CASE(/*sharedKB*/48, /*up,fu*/2,8, /*down,fd*/1,1, /*mode*/MODE_SUFD, /*tw,th,warps,xrep,wskip*/152, 95, 16, 0, 0) // 4t-ups2-downf1 CASE(/*sharedKB*/48, /*up,fu*/1,1, /*down,fd*/2,8, /*mode*/MODE_FUSD, /*tw,th,warps,xrep,wskip*/56, 22, 16, 0, 0) // 4t-upf1-downs2 CASE(/*sharedKB*/48, /*up,fu*/2,8, /*down,fd*/2,8, /*mode*/MODE_SUSD, /*tw,th,warps,xrep,wskip*/56, 29, 16, 11, 0) // 4t-ups2-downs2 CASE(/*sharedKB*/48, /*up,fu*/2,8, /*down,fd*/2,8, /*mode*/MODE_FUSD, /*tw,th,warps,xrep,wskip*/60, 28, 16, 0, 0) // 4t-upf2-downs2 CASE(/*sharedKB*/48, /*up,fu*/2,8, /*down,fd*/2,8, /*mode*/MODE_SUFD, /*tw,th,warps,xrep,wskip*/56, 28, 16, 0, 0) // 4t-ups2-downf2 CASE(/*sharedKB*/48, /*up,fu*/4,16, /*down,fd*/2,8, /*mode*/MODE_SUSD, /*tw,th,warps,xrep,wskip*/56, 31, 16, 11, 0) // 4t-ups4-downs2 CASE(/*sharedKB*/48, /*up,fu*/4,16, /*down,fd*/2,8, /*mode*/MODE_SUFD, /*tw,th,warps,xrep,wskip*/56, 36, 16, 0, 0) // 4t-ups4-downf2 CASE(/*sharedKB*/48, /*up,fu*/2,8, /*down,fd*/4,16, /*mode*/MODE_SUSD, /*tw,th,warps,xrep,wskip*/16, 22, 16, 12, 0) // 4t-ups2-downs4 CASE(/*sharedKB*/48, /*up,fu*/2,8, /*down,fd*/4,16, /*mode*/MODE_FUSD, /*tw,th,warps,xrep,wskip*/29, 15, 16, 0, 0) // 4t-upf2-downs4 CASE(/*sharedKB*/48, /*up,fu*/2,12, /*down,fd*/1,1, /*mode*/MODE_SUFD, /*tw,th,warps,xrep,wskip*/96, 150, 28, 0, 0) // 6t-ups2-downf1 CASE(/*sharedKB*/48, /*up,fu*/1,1, /*down,fd*/2,12, /*mode*/MODE_FUSD, /*tw,th,warps,xrep,wskip*/32, 35, 24, 0, 0) // 6t-upf1-downs2 CASE(/*sharedKB*/48, /*up,fu*/2,12, /*down,fd*/2,12, /*mode*/MODE_SUSD, /*tw,th,warps,xrep,wskip*/32, 46, 16, 10, 0) // 6t-ups2-downs2 CASE(/*sharedKB*/48, /*up,fu*/2,12, /*down,fd*/2,12, /*mode*/MODE_FUSD, /*tw,th,warps,xrep,wskip*/58, 28, 24, 8, 0) // 6t-upf2-downs2 CASE(/*sharedKB*/48, /*up,fu*/2,12, /*down,fd*/2,12, /*mode*/MODE_SUFD, /*tw,th,warps,xrep,wskip*/52, 28, 16, 0, 0) // 6t-ups2-downf2 CASE(/*sharedKB*/48, /*up,fu*/4,24, /*down,fd*/2,12, /*mode*/MODE_SUSD, /*tw,th,warps,xrep,wskip*/32, 51, 16, 5, 0) // 6t-ups4-downs2 CASE(/*sharedKB*/48, /*up,fu*/4,24, /*down,fd*/2,12, /*mode*/MODE_SUFD, /*tw,th,warps,xrep,wskip*/32, 56, 16, 6, 0) // 6t-ups4-downf2 CASE(/*sharedKB*/48, /*up,fu*/2,12, /*down,fd*/4,24, /*mode*/MODE_SUSD, /*tw,th,warps,xrep,wskip*/16, 18, 16, 12, 0) // 6t-ups2-downs4 CASE(/*sharedKB*/96, /*up,fu*/2,12, /*down,fd*/4,24, /*mode*/MODE_FUSD, /*tw,th,warps,xrep,wskip*/27, 31, 32, 6, 0) // 6t-upf2-downs4 96kB CASE(/*sharedKB*/48, /*up,fu*/2,12, /*down,fd*/4,24, /*mode*/MODE_FUSD, /*tw,th,warps,xrep,wskip*/27, 13, 24, 0, 0) // 6t-upf2-downs4 CASE(/*sharedKB*/48, /*up,fu*/2,16, /*down,fd*/1,1, /*mode*/MODE_SUFD, /*tw,th,warps,xrep,wskip*/148, 89, 24, 0, 0) // 8t-ups2-downf1 CASE(/*sharedKB*/48, /*up,fu*/1,1, /*down,fd*/2,16, /*mode*/MODE_FUSD, /*tw,th,warps,xrep,wskip*/32, 31, 16, 5, 0) // 8t-upf1-downs2 CASE(/*sharedKB*/48, /*up,fu*/2,16, /*down,fd*/2,16, /*mode*/MODE_SUSD, /*tw,th,warps,xrep,wskip*/32, 41, 16, 9, 0) // 8t-ups2-downs2 CASE(/*sharedKB*/48, /*up,fu*/2,16, /*down,fd*/2,16, /*mode*/MODE_FUSD, /*tw,th,warps,xrep,wskip*/56, 26, 24, 0, 0) // 8t-upf2-downs2 CASE(/*sharedKB*/48, /*up,fu*/2,16, /*down,fd*/2,16, /*mode*/MODE_SUFD, /*tw,th,warps,xrep,wskip*/32, 40, 16, 0, 0) // 8t-ups2-downf2 CASE(/*sharedKB*/48, /*up,fu*/4,32, /*down,fd*/2,16, /*mode*/MODE_SUSD, /*tw,th,warps,xrep,wskip*/32, 46, 24, 5, 0) // 8t-ups4-downs2 CASE(/*sharedKB*/48, /*up,fu*/4,32, /*down,fd*/2,16, /*mode*/MODE_SUFD, /*tw,th,warps,xrep,wskip*/32, 50, 16, 0, 0) // 8t-ups4-downf2 CASE(/*sharedKB*/96, /*up,fu*/2,16, /*down,fd*/4,32, /*mode*/MODE_SUSD, /*tw,th,warps,xrep,wskip*/24, 24, 32, 12, 1) // 8t-ups2-downs4 96kB CASE(/*sharedKB*/48, /*up,fu*/2,16, /*down,fd*/4,32, /*mode*/MODE_SUSD, /*tw,th,warps,xrep,wskip*/16, 13, 16, 10, 1) // 8t-ups2-downs4 CASE(/*sharedKB*/96, /*up,fu*/2,16, /*down,fd*/4,32, /*mode*/MODE_FUSD, /*tw,th,warps,xrep,wskip*/25, 28, 28, 4, 0) // 8t-upf2-downs4 96kB CASE(/*sharedKB*/48, /*up,fu*/2,16, /*down,fd*/4,32, /*mode*/MODE_FUSD, /*tw,th,warps,xrep,wskip*/25, 10, 24, 0, 0) // 8t-upf2-downs4 #undef CASE return s; // No kernel found. } //------------------------------------------------------------------------
4d2241b58ab29c8c9f66b9341975df93ead1e43c.hip
// !!! This is a file automatically generated by hipify!!! // Copyright (c) 2009-2018 The Regents of the University of Michigan // This file is part of the HOOMD-blue project, released under the BSD 3-Clause License. #include "ComputeFreeVolumeGPU.cuh" #include "IntegratorHPMCMonoGPU.cuh" #include "IntegratorHPMCMonoImplicitGPU.cuh" #include "IntegratorHPMCMonoImplicitNewGPU.cuh" #include "ShapeSpheropolyhedron.h" namespace hpmc { namespace detail { //! HPMC kernels for ShapeSpheropolyhedron template hipError_t gpu_hpmc_free_volume<ShapeSpheropolyhedron >(const hpmc_free_volume_args_t &args, const typename ShapeSpheropolyhedron ::param_type *d_params); template hipError_t gpu_hpmc_update<ShapeSpheropolyhedron >(const hpmc_args_t& args, const typename ShapeSpheropolyhedron ::param_type *d_params); template hipError_t gpu_hpmc_implicit_count_overlaps<ShapeSpheropolyhedron >(const hpmc_implicit_args_t& args, const typename ShapeSpheropolyhedron ::param_type *d_params); template hipError_t gpu_hpmc_implicit_accept_reject<ShapeSpheropolyhedron >(const hpmc_implicit_args_t& args, const typename ShapeSpheropolyhedron ::param_type *d_params); template hipError_t gpu_hpmc_insert_depletants_queue<ShapeSpheropolyhedron >(const hpmc_implicit_args_new_t& args, const typename ShapeSpheropolyhedron ::param_type *d_params); template hipError_t gpu_hpmc_implicit_accept_reject_new<ShapeSpheropolyhedron >(const hpmc_implicit_args_new_t& args, const typename ShapeSpheropolyhedron ::param_type *d_params); }; // end namespace detail } // end namespace hpmc
4d2241b58ab29c8c9f66b9341975df93ead1e43c.cu
// Copyright (c) 2009-2018 The Regents of the University of Michigan // This file is part of the HOOMD-blue project, released under the BSD 3-Clause License. #include "ComputeFreeVolumeGPU.cuh" #include "IntegratorHPMCMonoGPU.cuh" #include "IntegratorHPMCMonoImplicitGPU.cuh" #include "IntegratorHPMCMonoImplicitNewGPU.cuh" #include "ShapeSpheropolyhedron.h" namespace hpmc { namespace detail { //! HPMC kernels for ShapeSpheropolyhedron template cudaError_t gpu_hpmc_free_volume<ShapeSpheropolyhedron >(const hpmc_free_volume_args_t &args, const typename ShapeSpheropolyhedron ::param_type *d_params); template cudaError_t gpu_hpmc_update<ShapeSpheropolyhedron >(const hpmc_args_t& args, const typename ShapeSpheropolyhedron ::param_type *d_params); template cudaError_t gpu_hpmc_implicit_count_overlaps<ShapeSpheropolyhedron >(const hpmc_implicit_args_t& args, const typename ShapeSpheropolyhedron ::param_type *d_params); template cudaError_t gpu_hpmc_implicit_accept_reject<ShapeSpheropolyhedron >(const hpmc_implicit_args_t& args, const typename ShapeSpheropolyhedron ::param_type *d_params); template cudaError_t gpu_hpmc_insert_depletants_queue<ShapeSpheropolyhedron >(const hpmc_implicit_args_new_t& args, const typename ShapeSpheropolyhedron ::param_type *d_params); template cudaError_t gpu_hpmc_implicit_accept_reject_new<ShapeSpheropolyhedron >(const hpmc_implicit_args_new_t& args, const typename ShapeSpheropolyhedron ::param_type *d_params); }; // end namespace detail } // end namespace hpmc
88dd28704257beffebd4138b4a6618f2be0e7693.hip
// !!! This is a file automatically generated by hipify!!! #include<stdio.h> #include<stdlib.h> #include<getopt.h> #include <assert.h> #include <hip/hip_runtime.h> #include <time.h> static char* program_name; // Usage void print_usage (FILE* stream, int exit_code) { fprintf (stream, "Usage: %s options\n", program_name); fprintf (stream, " -h --help Display this usage information.\n" " -f --file filename File containing coefficient matrix.\n" " -i --Ni int Number of elements in Y direction (default=512).\n" " -j --Nj int Number of elements in X direction (default=512).\n" " -n --iterations int Number of iterations (default=10000).\n" " -k --kernel [1,2] 1: unoptimized, 2: optimized kernel (default).\n" " -t --tilesize int Size of each thread block in kernel 2 (default=4).\n"); exit (exit_code); } // Host version of the Jacobi method void jacobiOnHost(float* x_next, float* A, float* x_now, float* b, int Ni, int Nj) { int i,j; float sigma; for (i=0; i<Ni; i++) { sigma = 0.0; for (j=0; j<Nj; j++) { if (i != j) sigma += A[i*Nj + j] * x_now[j]; } x_next[i] = (b[i] - sigma) / A[i*Nj + i]; } } // Device version of the Jacobi method __global__ void jacobiOnDevice(float* x_next, float* A, float* x_now, float* b, int Ni, int Nj) { float sigma = 0.0; int idx = threadIdx.x; for (int j=0; j<Nj; j++) { if (idx != j) sigma += A[idx*Nj + j] * x_now[j]; } x_next[idx] = (b[idx] - sigma) / A[idx*Nj + idx]; } // Optimized device version of the Jacobi method __global__ void jacobiOptimizedOnDevice(float* x_next, float* A, float* x_now, float* b, int Ni, int Nj) { // Optimization step 1: tiling int idx = blockIdx.x*blockDim.x + threadIdx.x; if (idx < Ni) { float sigma = 0.0; // Optimization step 2: store index in register // Multiplication is not executed in every iteration. int idx_Ai = idx*Nj; // Tried to use prefetching, but then the result is terribly wrong and I don't know why.. /* float curr_A = A[idx_Ai]; float nxt_A; //printf("idx=%d\n",idx); for (int j=0; j<Nj-1; j++) { if (idx != j) nxt_A = A[idx_Ai + j + 1]; sigma += curr_A * x_now[j]; //sigma += A[idx_Ai + j] * x_now[j]; curr_A = nxt_A; //printf("curr_A=%f\n",curr_A); } if (idx != Nj-1) sigma += nxt_A * x_now[Nj-1]; x_next[idx] = (b[idx] - sigma) / A[idx_Ai + idx]; */ for (int j=0; j<Nj; j++) if (idx != j) sigma += A[idx_Ai + j] * x_now[j]; // Tried to use loop-ennrolling, but also here this gives a wrong result.. /* for (int j=0; j<Nj/4; j+=4) { if (idx != j) { sigma += A[idx_Ai + j] * x_now[j]; } if (idx != j+1) { sigma += A[idx_Ai + j+1] * x_now[j+1]; } if (idx != j+2) { sigma += A[idx_Ai + j+2] * x_now[j+2]; } if (idx != j+3) { sigma += A[idx_Ai + j+3] * x_now[j+3]; } }*/ x_next[idx] = (b[idx] - sigma) / A[idx_Ai + idx]; } } // device selection (copied from previous assignment) static void selectGpu(int *gpu_num, int *num_devs) { // gpu_num: (I/O): I: Default choice, // O: best device, changed only if more than one device // num_devs: (O) Number of found devices. int best = *gpu_num; hipGetDeviceCount(num_devs); if ( *num_devs > 1 ) { int dev_num; int max_cores = 0; for (dev_num = 0; dev_num < *num_devs; dev_num++) { hipDeviceProp_t dev_properties; hipGetDeviceProperties(&dev_properties, dev_num); if (max_cores < dev_properties.multiProcessorCount) { max_cores = dev_properties.multiProcessorCount; best = dev_num; } } *gpu_num = best; } } // device test (copied from previous assignment) static void testDevice(int devID) { // Check if we can run. Maybe do something more... hipDeviceProp_t deviceProp; hipGetDeviceProperties(&deviceProp, devID); if (deviceProp.major == 9999 && deviceProp.minor == 9999) { /* Simulated device. */ printf("There is no device supporting CUDA.\n"); hipDeviceReset(); } else printf("Using GPU device number %d.\n", devID); } int main(int argc, char *argv[]) { // initialize timing variables time_t start, end, start_h, end_h, start_d, end_d; float t_full, t_host, t_dev; start=clock(); // initialize data variables float *x_now, *x_next, *A, *b, *x_h, *x_d; float *x_now_d, *x_next_d, *A_d, *b_d; // initialize parameter variables int N, Ni, Nj, iter, kernel, tileSize; int ch; int i,k; char* fname; FILE* file; // Argument parsing static struct option long_options[] = { {"file", required_argument, NULL, 'f'}, {"Ni", optional_argument, NULL, 'i'}, {"Nj", optional_argument, NULL, 'j'}, {"iterations", optional_argument, NULL, 'n'}, {"kernel", optional_argument, NULL, 'k'}, {"tilesize", optional_argument, NULL, 't'}, {"help", optional_argument, NULL, 'h'}, {NULL, 0, NULL, 0} }; program_name = argv[0]; Ni=512, Nj=512, iter=10000, kernel=2, tileSize=4; ch=0; while ((ch = getopt_long(argc, argv,"f:i:j:n:k:h", long_options, NULL)) != -1) { switch (ch) { case 'f' : fname = optarg; break; case 'i' : Ni = atoi(optarg); break; case 'j' : Nj = atoi(optarg); break; case 'n' : iter = atoi(optarg); break; case 'k' : kernel = atoi(optarg); break; case 't' : tileSize = atoi(optarg); break; case 'h': print_usage(stderr, 1); exit(EXIT_FAILURE); case '?': print_usage(stderr, 1); exit(EXIT_FAILURE); default: abort(); } } N = Ni * Nj; printf("\nRunning Jacobi method:\n"); printf("======================\n\n"); printf("Coefficient matrix given in file: \n%s\n\n", fname); printf("Parameters:\n"); printf("N=%d, Ni=%d, Nj=%d, ", N, Ni, Nj); printf("iterations=%d, kernel=%d, tilesize=%d\n", iter,kernel,tileSize); // Allocate memory on host x_next = (float *) malloc(Ni*sizeof(float)); A = (float *) malloc(N*sizeof(float)); x_now = (float *) malloc(Ni*sizeof(float)); b = (float *) malloc(Ni*sizeof(float)); x_h = (float *) malloc(Ni*sizeof(float)); x_d = (float *) malloc(Ni*sizeof(float)); // Initialize result vector x for (i=0; i<Ni; i++) { x_now[i] = 0; x_next[i] = 0; } // Read coefficient matrix from file file = fopen(fname, "r"); if (file == NULL) exit(EXIT_FAILURE); char *line; size_t len = 0; i=0; while ((getline(&line, &len, file)) != -1) { if (i<N) A[i] = atof(line); else b[i-N] = atof(line); i++; } start_h = clock(); // Run "iter" iterations of the Jacobi method on HOST for (k=0; k<iter; k++) { if (k%2) jacobiOnHost(x_now, A, x_next, b, Ni, Nj); else jacobiOnHost(x_next, A, x_now, b, Ni, Nj); //for (i=0; i<Nj; i++) // x_now[i] = x_next[i]; } end_h = clock(); // Save result from host in x_h for (i=0; i<Nj; i++) x_h[i] = x_next[i]; // Re-initialize result vector x for device computation for (i=0; i<Ni; i++) { x_now[i] = 0; x_next[i] = 0; } // Check available device. int devID = 0, num_devs = 1; selectGpu(&devID, &num_devs); testDevice(devID); // Allocate memory on the device assert(hipSuccess == hipMalloc((void **) &x_next_d, Ni*sizeof(float))); assert(hipSuccess == hipMalloc((void **) &A_d, N*sizeof(float))); assert(hipSuccess == hipMalloc((void **) &x_now_d, Ni*sizeof(float))); assert(hipSuccess == hipMalloc((void **) &b_d, Ni*sizeof(float))); // Copy data -> device hipMemcpy(x_next_d, x_next, sizeof(float)*Ni, hipMemcpyHostToDevice); hipMemcpy(A_d, A, sizeof(float)*N, hipMemcpyHostToDevice); hipMemcpy(x_now_d, x_now, sizeof(float)*Ni, hipMemcpyHostToDevice); hipMemcpy(b_d, b, sizeof(float)*Ni, hipMemcpyHostToDevice); // Compute grid and block size. // Un-optimized kernel int blockSize = Ni; int nBlocks = 1; // Optimized kernel int nTiles = Ni/tileSize + (Ni%tileSize == 0?0:1); int gridHeight = Nj/tileSize + (Nj%tileSize == 0?0:1); int gridWidth = Ni/tileSize + (Ni%tileSize == 0?0:1); printf("w=%d, h=%d\n",gridWidth,gridHeight); dim3 dGrid(gridHeight, gridWidth), dBlock(tileSize, tileSize); start_d = clock(); // Run "iter" iterations of the Jacobi method on DEVICE if (kernel == 1) { printf("Using un-optimized kernel.\n"); for (k=0; k<iter; k++) { if (k%2) hipLaunchKernelGGL(( jacobiOnDevice) , dim3(nBlocks), dim3(blockSize) , 0, 0, x_now_d, A_d, x_next_d, b_d, Ni, Nj); else hipLaunchKernelGGL(( jacobiOnDevice) , dim3(nBlocks), dim3(blockSize) , 0, 0, x_next_d, A_d, x_now_d, b_d, Ni, Nj); //hipMemcpy(x_now_d, x_next_d, sizeof(float)*Ni, hipMemcpyDeviceToDevice); } } else { printf("Using optimized kernel.\n"); for (k=0; k<iter; k++) { if (k%2) hipLaunchKernelGGL(( jacobiOptimizedOnDevice) , dim3(nTiles), dim3(tileSize) , 0, 0, x_now_d, A_d, x_next_d, b_d, Ni, Nj); else hipLaunchKernelGGL(( jacobiOptimizedOnDevice) , dim3(nTiles), dim3(tileSize) , 0, 0, x_next_d, A_d, x_now_d, b_d, Ni, Nj); //hipMemcpy(x_now_d, x_next_d, sizeof(float)*Ni, hipMemcpyDeviceToDevice); } } end_d = clock(); // Data <- device hipMemcpy(x_d, x_next_d, sizeof(float)*Ni, hipMemcpyDeviceToHost); // Free memory free(x_next); free(A); free(x_now); free(b); hipFree(x_next_d); hipFree(A_d); hipFree(x_now_d); hipFree(b_d); end=clock(); printf("\nResult after %d iterations:\n",iter); float err = 0.0; for (i=0; i < Ni; i++) { //printf("x_h[%d]=%f\n",i,x_h[i]); //printf("x_d[%d]=%f\n",i,x_d[i]); err += abs(x_h[i] - x_d[i]) / Ni; } printf("x_h[%d]=%f\n",0,x_h[0]); printf("x_d[%d]=%f\n",0,x_d[0]); t_full = ((float)end - (float)start) / CLOCKS_PER_SEC; t_host = ((float)end_h - (float)start_h) / CLOCKS_PER_SEC; t_dev = ((float)end_d - (float)start_d) / CLOCKS_PER_SEC; printf("\nTiming:\nFull: %f\nHost: %f\nDevice: %f\n\n", t_full, t_host, t_dev); printf("Relative error: %f\n", err); printf("\nProgram terminated successfully.\n"); return 0; }
88dd28704257beffebd4138b4a6618f2be0e7693.cu
#include<stdio.h> #include<stdlib.h> #include<getopt.h> #include <assert.h> #include <cuda.h> #include <time.h> static char* program_name; // Usage void print_usage (FILE* stream, int exit_code) { fprintf (stream, "Usage: %s options\n", program_name); fprintf (stream, " -h --help Display this usage information.\n" " -f --file filename File containing coefficient matrix.\n" " -i --Ni int Number of elements in Y direction (default=512).\n" " -j --Nj int Number of elements in X direction (default=512).\n" " -n --iterations int Number of iterations (default=10000).\n" " -k --kernel [1,2] 1: unoptimized, 2: optimized kernel (default).\n" " -t --tilesize int Size of each thread block in kernel 2 (default=4).\n"); exit (exit_code); } // Host version of the Jacobi method void jacobiOnHost(float* x_next, float* A, float* x_now, float* b, int Ni, int Nj) { int i,j; float sigma; for (i=0; i<Ni; i++) { sigma = 0.0; for (j=0; j<Nj; j++) { if (i != j) sigma += A[i*Nj + j] * x_now[j]; } x_next[i] = (b[i] - sigma) / A[i*Nj + i]; } } // Device version of the Jacobi method __global__ void jacobiOnDevice(float* x_next, float* A, float* x_now, float* b, int Ni, int Nj) { float sigma = 0.0; int idx = threadIdx.x; for (int j=0; j<Nj; j++) { if (idx != j) sigma += A[idx*Nj + j] * x_now[j]; } x_next[idx] = (b[idx] - sigma) / A[idx*Nj + idx]; } // Optimized device version of the Jacobi method __global__ void jacobiOptimizedOnDevice(float* x_next, float* A, float* x_now, float* b, int Ni, int Nj) { // Optimization step 1: tiling int idx = blockIdx.x*blockDim.x + threadIdx.x; if (idx < Ni) { float sigma = 0.0; // Optimization step 2: store index in register // Multiplication is not executed in every iteration. int idx_Ai = idx*Nj; // Tried to use prefetching, but then the result is terribly wrong and I don't know why.. /* float curr_A = A[idx_Ai]; float nxt_A; //printf("idx=%d\n",idx); for (int j=0; j<Nj-1; j++) { if (idx != j) nxt_A = A[idx_Ai + j + 1]; sigma += curr_A * x_now[j]; //sigma += A[idx_Ai + j] * x_now[j]; curr_A = nxt_A; //printf("curr_A=%f\n",curr_A); } if (idx != Nj-1) sigma += nxt_A * x_now[Nj-1]; x_next[idx] = (b[idx] - sigma) / A[idx_Ai + idx]; */ for (int j=0; j<Nj; j++) if (idx != j) sigma += A[idx_Ai + j] * x_now[j]; // Tried to use loop-ennrolling, but also here this gives a wrong result.. /* for (int j=0; j<Nj/4; j+=4) { if (idx != j) { sigma += A[idx_Ai + j] * x_now[j]; } if (idx != j+1) { sigma += A[idx_Ai + j+1] * x_now[j+1]; } if (idx != j+2) { sigma += A[idx_Ai + j+2] * x_now[j+2]; } if (idx != j+3) { sigma += A[idx_Ai + j+3] * x_now[j+3]; } }*/ x_next[idx] = (b[idx] - sigma) / A[idx_Ai + idx]; } } // device selection (copied from previous assignment) static void selectGpu(int *gpu_num, int *num_devs) { // gpu_num: (I/O): I: Default choice, // O: best device, changed only if more than one device // num_devs: (O) Number of found devices. int best = *gpu_num; cudaGetDeviceCount(num_devs); if ( *num_devs > 1 ) { int dev_num; int max_cores = 0; for (dev_num = 0; dev_num < *num_devs; dev_num++) { cudaDeviceProp dev_properties; cudaGetDeviceProperties(&dev_properties, dev_num); if (max_cores < dev_properties.multiProcessorCount) { max_cores = dev_properties.multiProcessorCount; best = dev_num; } } *gpu_num = best; } } // device test (copied from previous assignment) static void testDevice(int devID) { // Check if we can run. Maybe do something more... cudaDeviceProp deviceProp; cudaGetDeviceProperties(&deviceProp, devID); if (deviceProp.major == 9999 && deviceProp.minor == 9999) { /* Simulated device. */ printf("There is no device supporting CUDA.\n"); cudaThreadExit(); } else printf("Using GPU device number %d.\n", devID); } int main(int argc, char *argv[]) { // initialize timing variables time_t start, end, start_h, end_h, start_d, end_d; float t_full, t_host, t_dev; start=clock(); // initialize data variables float *x_now, *x_next, *A, *b, *x_h, *x_d; float *x_now_d, *x_next_d, *A_d, *b_d; // initialize parameter variables int N, Ni, Nj, iter, kernel, tileSize; int ch; int i,k; char* fname; FILE* file; // Argument parsing static struct option long_options[] = { {"file", required_argument, NULL, 'f'}, {"Ni", optional_argument, NULL, 'i'}, {"Nj", optional_argument, NULL, 'j'}, {"iterations", optional_argument, NULL, 'n'}, {"kernel", optional_argument, NULL, 'k'}, {"tilesize", optional_argument, NULL, 't'}, {"help", optional_argument, NULL, 'h'}, {NULL, 0, NULL, 0} }; program_name = argv[0]; Ni=512, Nj=512, iter=10000, kernel=2, tileSize=4; ch=0; while ((ch = getopt_long(argc, argv,"f:i:j:n:k:h", long_options, NULL)) != -1) { switch (ch) { case 'f' : fname = optarg; break; case 'i' : Ni = atoi(optarg); break; case 'j' : Nj = atoi(optarg); break; case 'n' : iter = atoi(optarg); break; case 'k' : kernel = atoi(optarg); break; case 't' : tileSize = atoi(optarg); break; case 'h': print_usage(stderr, 1); exit(EXIT_FAILURE); case '?': print_usage(stderr, 1); exit(EXIT_FAILURE); default: abort(); } } N = Ni * Nj; printf("\nRunning Jacobi method:\n"); printf("======================\n\n"); printf("Coefficient matrix given in file: \n%s\n\n", fname); printf("Parameters:\n"); printf("N=%d, Ni=%d, Nj=%d, ", N, Ni, Nj); printf("iterations=%d, kernel=%d, tilesize=%d\n", iter,kernel,tileSize); // Allocate memory on host x_next = (float *) malloc(Ni*sizeof(float)); A = (float *) malloc(N*sizeof(float)); x_now = (float *) malloc(Ni*sizeof(float)); b = (float *) malloc(Ni*sizeof(float)); x_h = (float *) malloc(Ni*sizeof(float)); x_d = (float *) malloc(Ni*sizeof(float)); // Initialize result vector x for (i=0; i<Ni; i++) { x_now[i] = 0; x_next[i] = 0; } // Read coefficient matrix from file file = fopen(fname, "r"); if (file == NULL) exit(EXIT_FAILURE); char *line; size_t len = 0; i=0; while ((getline(&line, &len, file)) != -1) { if (i<N) A[i] = atof(line); else b[i-N] = atof(line); i++; } start_h = clock(); // Run "iter" iterations of the Jacobi method on HOST for (k=0; k<iter; k++) { if (k%2) jacobiOnHost(x_now, A, x_next, b, Ni, Nj); else jacobiOnHost(x_next, A, x_now, b, Ni, Nj); //for (i=0; i<Nj; i++) // x_now[i] = x_next[i]; } end_h = clock(); // Save result from host in x_h for (i=0; i<Nj; i++) x_h[i] = x_next[i]; // Re-initialize result vector x for device computation for (i=0; i<Ni; i++) { x_now[i] = 0; x_next[i] = 0; } // Check available device. int devID = 0, num_devs = 1; selectGpu(&devID, &num_devs); testDevice(devID); // Allocate memory on the device assert(cudaSuccess == cudaMalloc((void **) &x_next_d, Ni*sizeof(float))); assert(cudaSuccess == cudaMalloc((void **) &A_d, N*sizeof(float))); assert(cudaSuccess == cudaMalloc((void **) &x_now_d, Ni*sizeof(float))); assert(cudaSuccess == cudaMalloc((void **) &b_d, Ni*sizeof(float))); // Copy data -> device cudaMemcpy(x_next_d, x_next, sizeof(float)*Ni, cudaMemcpyHostToDevice); cudaMemcpy(A_d, A, sizeof(float)*N, cudaMemcpyHostToDevice); cudaMemcpy(x_now_d, x_now, sizeof(float)*Ni, cudaMemcpyHostToDevice); cudaMemcpy(b_d, b, sizeof(float)*Ni, cudaMemcpyHostToDevice); // Compute grid and block size. // Un-optimized kernel int blockSize = Ni; int nBlocks = 1; // Optimized kernel int nTiles = Ni/tileSize + (Ni%tileSize == 0?0:1); int gridHeight = Nj/tileSize + (Nj%tileSize == 0?0:1); int gridWidth = Ni/tileSize + (Ni%tileSize == 0?0:1); printf("w=%d, h=%d\n",gridWidth,gridHeight); dim3 dGrid(gridHeight, gridWidth), dBlock(tileSize, tileSize); start_d = clock(); // Run "iter" iterations of the Jacobi method on DEVICE if (kernel == 1) { printf("Using un-optimized kernel.\n"); for (k=0; k<iter; k++) { if (k%2) jacobiOnDevice <<< nBlocks, blockSize >>> (x_now_d, A_d, x_next_d, b_d, Ni, Nj); else jacobiOnDevice <<< nBlocks, blockSize >>> (x_next_d, A_d, x_now_d, b_d, Ni, Nj); //cudaMemcpy(x_now_d, x_next_d, sizeof(float)*Ni, cudaMemcpyDeviceToDevice); } } else { printf("Using optimized kernel.\n"); for (k=0; k<iter; k++) { if (k%2) jacobiOptimizedOnDevice <<< nTiles, tileSize >>> (x_now_d, A_d, x_next_d, b_d, Ni, Nj); else jacobiOptimizedOnDevice <<< nTiles, tileSize >>> (x_next_d, A_d, x_now_d, b_d, Ni, Nj); //cudaMemcpy(x_now_d, x_next_d, sizeof(float)*Ni, cudaMemcpyDeviceToDevice); } } end_d = clock(); // Data <- device cudaMemcpy(x_d, x_next_d, sizeof(float)*Ni, cudaMemcpyDeviceToHost); // Free memory free(x_next); free(A); free(x_now); free(b); cudaFree(x_next_d); cudaFree(A_d); cudaFree(x_now_d); cudaFree(b_d); end=clock(); printf("\nResult after %d iterations:\n",iter); float err = 0.0; for (i=0; i < Ni; i++) { //printf("x_h[%d]=%f\n",i,x_h[i]); //printf("x_d[%d]=%f\n",i,x_d[i]); err += abs(x_h[i] - x_d[i]) / Ni; } printf("x_h[%d]=%f\n",0,x_h[0]); printf("x_d[%d]=%f\n",0,x_d[0]); t_full = ((float)end - (float)start) / CLOCKS_PER_SEC; t_host = ((float)end_h - (float)start_h) / CLOCKS_PER_SEC; t_dev = ((float)end_d - (float)start_d) / CLOCKS_PER_SEC; printf("\nTiming:\nFull: %f\nHost: %f\nDevice: %f\n\n", t_full, t_host, t_dev); printf("Relative error: %f\n", err); printf("\nProgram terminated successfully.\n"); return 0; }
f6ddc35a49b29ba7e1fbe729f3c90f988147619f.hip
// !!! This is a file automatically generated by hipify!!! // Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> #include <cstdint> using namespace std; template <typename T1, typename T2, typename T3> __global__ void cuda_add_impl(int64_t N, T3* O, const T1* X, const T2* Y) { auto offset = threadIdx.x; if (offset < N) { O[offset] = Y[offset] + X[offset]; } } template <typename T1, typename T2, typename T3> void cuda_add(int64_t N, T3* O, const T1* X, const T2* Y, hipStream_t compute_stream) { hipLaunchKernelGGL(( cuda_add_impl), dim3(1), dim3(256), 0, compute_stream, N, O, X, Y); } template <typename T> __global__ void cuda_slice_impl(const T* X, int64_t from, int64_t to, T* Y) { auto offset = threadIdx.x; if (offset >= from && offset < to) { Y[offset - from] = X[offset]; } } template <typename T> void cuda_slice(const T* X, int64_t from, int64_t to, T* Y, hipStream_t compute_stream) { hipLaunchKernelGGL(( cuda_slice_impl<T>), dim3(1), dim3(256), 0, compute_stream, X, from, to, Y); } template void cuda_slice(const float*, int64_t, int64_t, float*, hipStream_t compute_stream); template void cuda_slice(const double*, int64_t, int64_t, double*, hipStream_t compute_stream); template void cuda_add(int64_t, float*, const float*, const float*, hipStream_t compute_stream); template void cuda_add(int64_t, float*, const float*, const double*, hipStream_t compute_stream);
f6ddc35a49b29ba7e1fbe729f3c90f988147619f.cu
// Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. #include <cuda.h> #include <cuda_runtime.h> #include <cstdint> using namespace std; template <typename T1, typename T2, typename T3> __global__ void cuda_add_impl(int64_t N, T3* O, const T1* X, const T2* Y) { auto offset = threadIdx.x; if (offset < N) { O[offset] = Y[offset] + X[offset]; } } template <typename T1, typename T2, typename T3> void cuda_add(int64_t N, T3* O, const T1* X, const T2* Y, cudaStream_t compute_stream) { cuda_add_impl<<<1, 256, 0, compute_stream>>>(N, O, X, Y); } template <typename T> __global__ void cuda_slice_impl(const T* X, int64_t from, int64_t to, T* Y) { auto offset = threadIdx.x; if (offset >= from && offset < to) { Y[offset - from] = X[offset]; } } template <typename T> void cuda_slice(const T* X, int64_t from, int64_t to, T* Y, cudaStream_t compute_stream) { cuda_slice_impl<T><<<1, 256, 0, compute_stream>>>(X, from, to, Y); } template void cuda_slice(const float*, int64_t, int64_t, float*, cudaStream_t compute_stream); template void cuda_slice(const double*, int64_t, int64_t, double*, cudaStream_t compute_stream); template void cuda_add(int64_t, float*, const float*, const float*, cudaStream_t compute_stream); template void cuda_add(int64_t, float*, const float*, const double*, cudaStream_t compute_stream);
8b7797803e5703bd7c1d3ec2c59fb9735cd25f5a.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #ifndef THC_GENERIC_FILE #define THC_GENERIC_FILE "generic/THCTensorTopK.cu" #else void THCTensor_(topk)(THCState* state, THCTensor *topK, THCudaLongTensor *indices, THCTensor *input_, int64_t k, int dim, int dir, int sorted) { THAssert(topK != NULL && indices != NULL && input_ != NULL); THCAssertSameGPU(THCTensor_(checkGPU)(state, 3, topK, indices, input_)); THArgCheck(THCTensor_(nDimensionLegacyNoScalars)(state, topK) <= MAX_CUTORCH_DIMS, 2, CUTORCH_DIM_WARNING); int64_t dims = THCudaLongTensor_nDimensionLegacyNoScalars(state, indices); THArgCheck(dims <= MAX_CUTORCH_DIMS, 3, CUTORCH_DIM_WARNING); int numDims = THCTensor_(nDimensionLegacyNoScalars)(state, input_); THArgCheck(numDims <= MAX_CUTORCH_DIMS, 4, CUTORCH_DIM_WARNING); THArgCheck(dim >= 0 && dim < numDims, 6, "dim not in range"); int64_t sliceSize = THCTensor_(sizeLegacyNoScalars)(state, input_, dim); THArgCheck(k >= 0 && k <= sliceSize, 5, "k not in range for dimension"); THCTensor *input = THCTensor_(newContiguous)(state, input_); // Build the output size, which is the dim being selected set to // size k std::vector<int64_t> topKSize = THTensor_sizesLegacyNoScalars(input); topKSize[dim] = k; THCTensor_(resize)(state, topK, topKSize, {}); THCudaLongTensor_resize(state, indices, topKSize, {}); // static_cast is required to ensure that the correct type (INDEX_T) // is provided to the kernel for the arguments. #define RUN_K(INDEX_T, DIM, DIR) \ hipLaunchKernelGGL(( gatherTopK<scalar_t, INDEX_T, DIM, DIR>) \ , dim3(grid), dim3(block), 0, THCState_getCurrentStream(state), \ inputInfo, \ static_cast<INDEX_T>(sliceSize), \ static_cast<INDEX_T>(k), \ static_cast<INDEX_T>(inputSlices), \ /* The actual dimension that the k-selection is running in */ \ /* may have changed from collapseDims() */ \ static_cast<INDEX_T>(inputInfo.strides[collapseInputDim]), \ topKInfo, \ static_cast<INDEX_T>(topKSlices), \ static_cast<INDEX_T>(topKInfo.strides[collapseTopKDim]), \ indicesInfo, \ static_cast<INDEX_T>(indicesInfo.strides[collapseIndicesDim])) #define RUN_DIR(INDEX_T, DIM) \ if (dir) { \ RUN_K(INDEX_T, DIM, true); \ } else { \ RUN_K(INDEX_T, DIM, false); \ } #define RUN_DIM(INDEX_T) \ if (allDims == 1) { \ RUN_DIR(INDEX_T, 1); \ } else if (allDims == 2) { \ RUN_DIR(INDEX_T, 2); \ } else if (allDims == 3) { \ RUN_DIR(INDEX_T, 3); \ } else { \ RUN_DIR(INDEX_T, -1); \ } #ifdef __HIP_PLATFORM_HCC__ #define TOPK_WARP_SIZE 64 #else #define TOPK_WARP_SIZE 32 #endif #define RUN_T(INDEX_T) \ TensorInfo<scalar_t, INDEX_T> inputInfo = \ getTensorInfo<scalar_t, THCTensor, INDEX_T>(state, input); \ TensorInfo<scalar_t, INDEX_T> topKInfo = \ getTensorInfo<scalar_t, THCTensor, INDEX_T>(state, topK); \ TensorInfo<int64_t, INDEX_T> indicesInfo = \ getTensorInfo<int64_t, THCudaLongTensor, INDEX_T>(state, indices); \ \ /* We use these structures solely to find the offset to */ \ /* each slice we are operating on */ \ inputInfo.sizes[dim] = 1; \ topKInfo.sizes[dim] = 1; \ indicesInfo.sizes[dim] = 1; \ \ /* Collapse all other dims */ \ int collapseInputDim = inputInfo.collapseDims(dim); \ int collapseTopKDim = topKInfo.collapseDims(dim); \ int collapseIndicesDim = indicesInfo.collapseDims(dim); \ \ int64_t inputSlices = 1; \ for (int i = 0; i < inputInfo.dims; ++i) { \ inputSlices *= inputInfo.sizes[i]; \ } \ int64_t topKSlices = 1; \ for (int i = 0; i < topKInfo.dims; ++i) { \ topKSlices *= topKInfo.sizes[i]; \ } \ \ dim3 grid; \ if (!THC_getGridFromTiles(inputSlices, grid)) { \ THError("Slice to sort is too large"); \ } \ \ dim3 block(::min(THCRoundUp(sliceSize, (int64_t) TOPK_WARP_SIZE), (int64_t) 1024)); \ \ /* This is used as a template parameter to calculate indices. */ \ /* We only specialize it if all collapsed dim sizes are the */ \ /* same; otherwise, we use -1 which is the specialization */ \ /* parameter for arbitrary dimensions */ \ int allDims = inputInfo.dims; \ if (topKInfo.dims != allDims || indicesInfo.dims != allDims) { \ allDims = -1; \ } \ \ RUN_DIM(INDEX_T); if (THCTensor_nElement(state, input) > 0) { // Based on required index size, run the algorithm with the // appropriate index type if (THCTensor_canUse32BitIndexMath(state, input) && THCTensor_canUse32BitIndexMath(state, topK) && THCTensor_canUse32BitIndexMath(state, indices)) { RUN_T(uint32_t); } else { RUN_T(uint64_t); } } #undef RUN_T #undef RUN_DIM #undef RUN_DIR #undef RUN_K #undef TOPK_WARP_SIZE // Sort the results if the user wants them sorted, since our // selection routine does not ensure sorting if (sorted) { // FIXME: the k/v inplace sort along slice only works for size <= // 2048 at the moment if (sliceSize <= 2048) { // This avoids any memory allocations and performs all sorting // work inplace along the slice THCTensor_(sortKeyValueInplace)(state, topK, indices, dim, dir); } else { // Depend upon the backup sort that returns indices, which we // can use in conjunction with gather to produce the original // indices. // This is not the most efficient implementation, especially since // there are memory allocations performed here. If the user desires // greater performance, they should torch.gather() the results // themselves using the reported indices, providing previously // allocated tensors to receive the results. THCTensor* sortedTopK = THCTensor_(new)(state); THCudaLongTensor* sortedIndices = THCudaLongTensor_new(state); THCTensor_(sort)(state, sortedTopK, sortedIndices, topK, dim, dir); THCudaLongTensor* sortedTopKIndices = THCudaLongTensor_new(state); THCudaLongTensor_resizeAs(state, sortedTopKIndices, indices); THCudaLongTensor_gather(state, sortedTopKIndices, indices, dim, sortedIndices); THCTensor_(freeCopyTo)(state, sortedTopK, topK); THCudaLongTensor_freeCopyTo(state, sortedTopKIndices, indices); THCudaLongTensor_free(state, sortedIndices); } } THCudaLongTensor_free(state, input); THCudaCheck(hipGetLastError()); } #endif // THC_GENERIC_FILE
8b7797803e5703bd7c1d3ec2c59fb9735cd25f5a.cu
#ifndef THC_GENERIC_FILE #define THC_GENERIC_FILE "generic/THCTensorTopK.cu" #else void THCTensor_(topk)(THCState* state, THCTensor *topK, THCudaLongTensor *indices, THCTensor *input_, int64_t k, int dim, int dir, int sorted) { THAssert(topK != NULL && indices != NULL && input_ != NULL); THCAssertSameGPU(THCTensor_(checkGPU)(state, 3, topK, indices, input_)); THArgCheck(THCTensor_(nDimensionLegacyNoScalars)(state, topK) <= MAX_CUTORCH_DIMS, 2, CUTORCH_DIM_WARNING); int64_t dims = THCudaLongTensor_nDimensionLegacyNoScalars(state, indices); THArgCheck(dims <= MAX_CUTORCH_DIMS, 3, CUTORCH_DIM_WARNING); int numDims = THCTensor_(nDimensionLegacyNoScalars)(state, input_); THArgCheck(numDims <= MAX_CUTORCH_DIMS, 4, CUTORCH_DIM_WARNING); THArgCheck(dim >= 0 && dim < numDims, 6, "dim not in range"); int64_t sliceSize = THCTensor_(sizeLegacyNoScalars)(state, input_, dim); THArgCheck(k >= 0 && k <= sliceSize, 5, "k not in range for dimension"); THCTensor *input = THCTensor_(newContiguous)(state, input_); // Build the output size, which is the dim being selected set to // size k std::vector<int64_t> topKSize = THTensor_sizesLegacyNoScalars(input); topKSize[dim] = k; THCTensor_(resize)(state, topK, topKSize, {}); THCudaLongTensor_resize(state, indices, topKSize, {}); // static_cast is required to ensure that the correct type (INDEX_T) // is provided to the kernel for the arguments. #define RUN_K(INDEX_T, DIM, DIR) \ gatherTopK<scalar_t, INDEX_T, DIM, DIR> \ <<<grid, block, 0, THCState_getCurrentStream(state)>>>( \ inputInfo, \ static_cast<INDEX_T>(sliceSize), \ static_cast<INDEX_T>(k), \ static_cast<INDEX_T>(inputSlices), \ /* The actual dimension that the k-selection is running in */ \ /* may have changed from collapseDims() */ \ static_cast<INDEX_T>(inputInfo.strides[collapseInputDim]), \ topKInfo, \ static_cast<INDEX_T>(topKSlices), \ static_cast<INDEX_T>(topKInfo.strides[collapseTopKDim]), \ indicesInfo, \ static_cast<INDEX_T>(indicesInfo.strides[collapseIndicesDim])) #define RUN_DIR(INDEX_T, DIM) \ if (dir) { \ RUN_K(INDEX_T, DIM, true); \ } else { \ RUN_K(INDEX_T, DIM, false); \ } #define RUN_DIM(INDEX_T) \ if (allDims == 1) { \ RUN_DIR(INDEX_T, 1); \ } else if (allDims == 2) { \ RUN_DIR(INDEX_T, 2); \ } else if (allDims == 3) { \ RUN_DIR(INDEX_T, 3); \ } else { \ RUN_DIR(INDEX_T, -1); \ } #ifdef __HIP_PLATFORM_HCC__ #define TOPK_WARP_SIZE 64 #else #define TOPK_WARP_SIZE 32 #endif #define RUN_T(INDEX_T) \ TensorInfo<scalar_t, INDEX_T> inputInfo = \ getTensorInfo<scalar_t, THCTensor, INDEX_T>(state, input); \ TensorInfo<scalar_t, INDEX_T> topKInfo = \ getTensorInfo<scalar_t, THCTensor, INDEX_T>(state, topK); \ TensorInfo<int64_t, INDEX_T> indicesInfo = \ getTensorInfo<int64_t, THCudaLongTensor, INDEX_T>(state, indices); \ \ /* We use these structures solely to find the offset to */ \ /* each slice we are operating on */ \ inputInfo.sizes[dim] = 1; \ topKInfo.sizes[dim] = 1; \ indicesInfo.sizes[dim] = 1; \ \ /* Collapse all other dims */ \ int collapseInputDim = inputInfo.collapseDims(dim); \ int collapseTopKDim = topKInfo.collapseDims(dim); \ int collapseIndicesDim = indicesInfo.collapseDims(dim); \ \ int64_t inputSlices = 1; \ for (int i = 0; i < inputInfo.dims; ++i) { \ inputSlices *= inputInfo.sizes[i]; \ } \ int64_t topKSlices = 1; \ for (int i = 0; i < topKInfo.dims; ++i) { \ topKSlices *= topKInfo.sizes[i]; \ } \ \ dim3 grid; \ if (!THC_getGridFromTiles(inputSlices, grid)) { \ THError("Slice to sort is too large"); \ } \ \ dim3 block(std::min(THCRoundUp(sliceSize, (int64_t) TOPK_WARP_SIZE), (int64_t) 1024)); \ \ /* This is used as a template parameter to calculate indices. */ \ /* We only specialize it if all collapsed dim sizes are the */ \ /* same; otherwise, we use -1 which is the specialization */ \ /* parameter for arbitrary dimensions */ \ int allDims = inputInfo.dims; \ if (topKInfo.dims != allDims || indicesInfo.dims != allDims) { \ allDims = -1; \ } \ \ RUN_DIM(INDEX_T); if (THCTensor_nElement(state, input) > 0) { // Based on required index size, run the algorithm with the // appropriate index type if (THCTensor_canUse32BitIndexMath(state, input) && THCTensor_canUse32BitIndexMath(state, topK) && THCTensor_canUse32BitIndexMath(state, indices)) { RUN_T(uint32_t); } else { RUN_T(uint64_t); } } #undef RUN_T #undef RUN_DIM #undef RUN_DIR #undef RUN_K #undef TOPK_WARP_SIZE // Sort the results if the user wants them sorted, since our // selection routine does not ensure sorting if (sorted) { // FIXME: the k/v inplace sort along slice only works for size <= // 2048 at the moment if (sliceSize <= 2048) { // This avoids any memory allocations and performs all sorting // work inplace along the slice THCTensor_(sortKeyValueInplace)(state, topK, indices, dim, dir); } else { // Depend upon the backup sort that returns indices, which we // can use in conjunction with gather to produce the original // indices. // This is not the most efficient implementation, especially since // there are memory allocations performed here. If the user desires // greater performance, they should torch.gather() the results // themselves using the reported indices, providing previously // allocated tensors to receive the results. THCTensor* sortedTopK = THCTensor_(new)(state); THCudaLongTensor* sortedIndices = THCudaLongTensor_new(state); THCTensor_(sort)(state, sortedTopK, sortedIndices, topK, dim, dir); THCudaLongTensor* sortedTopKIndices = THCudaLongTensor_new(state); THCudaLongTensor_resizeAs(state, sortedTopKIndices, indices); THCudaLongTensor_gather(state, sortedTopKIndices, indices, dim, sortedIndices); THCTensor_(freeCopyTo)(state, sortedTopK, topK); THCudaLongTensor_freeCopyTo(state, sortedTopKIndices, indices); THCudaLongTensor_free(state, sortedIndices); } } THCudaLongTensor_free(state, input); THCudaCheck(cudaGetLastError()); } #endif // THC_GENERIC_FILE
ba86edf9f0ca0a95353aeae6a8a37d05d7006b97.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include<cstdio> __global__ void set_only_one(double *M, int size){ int i_index = (blockIdx.x * blockDim.x + threadIdx.x); int j_index = (blockIdx.y * blockDim.y + threadIdx.y); if(i_index != 0 || j_index != 2) return; M[j_index*size + i_index] = 1.0; } void caller(){ int siz = 4; dim3 blockDim(2,2); dim3 gridDim( 2,2 ); double *arr1; hipMalloc(&arr1, sizeof(double) * siz * siz); hipMemset((void *)arr1, 0.0, sizeof(double) * siz * siz); hipLaunchKernelGGL(( set_only_one), dim3(gridDim), dim3(blockDim) , 0, 0, arr1, siz); hipDeviceSynchronize(); double *hostarr = new double[siz * siz]; hipMemcpy(hostarr, arr1, sizeof(double)*siz * siz, hipMemcpyDeviceToHost); for(int i = 0; i < siz; i++){ for(int j = 0; j < siz;j++){ printf("%lf ", hostarr[i*siz + j]); } printf("\n"); } }
ba86edf9f0ca0a95353aeae6a8a37d05d7006b97.cu
#include<cstdio> __global__ void set_only_one(double *M, int size){ int i_index = (blockIdx.x * blockDim.x + threadIdx.x); int j_index = (blockIdx.y * blockDim.y + threadIdx.y); if(i_index != 0 || j_index != 2) return; M[j_index*size + i_index] = 1.0; } void caller(){ int siz = 4; dim3 blockDim(2,2); dim3 gridDim( 2,2 ); double *arr1; cudaMalloc(&arr1, sizeof(double) * siz * siz); cudaMemset((void *)arr1, 0.0, sizeof(double) * siz * siz); set_only_one<<<gridDim, blockDim >>>(arr1, siz); cudaThreadSynchronize(); double *hostarr = new double[siz * siz]; cudaMemcpy(hostarr, arr1, sizeof(double)*siz * siz, cudaMemcpyDeviceToHost); for(int i = 0; i < siz; i++){ for(int j = 0; j < siz;j++){ printf("%lf ", hostarr[i*siz + j]); } printf("\n"); } }
aaa1a1fbd76592d16074f242a5c2c2dd1a7fe377.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <iostream> #include <fstream> #include <stdlib.h> #include <random> #include <omp.h> #include "cudaBhtree.cu" #include "Constants.h" void initializeBodies(struct body* bods); void runSimulation(struct body* b, struct body* db, char* image, double* hdImage); __global__ void interactBodies(struct body* b); __device__ void singleInteraction(struct body* a, struct body* b); __host__ __device__ double magnitude(vec3 v); __device__ void updateBodies(struct body* b); void createFrame(char* image, double* hdImage, struct body* b, int step); double toPixelSpace(double p, int size); void renderClear(char* image, double* hdImage); void renderBodies(struct body* b, double* hdImage); void colorDot(double x, double y, double vMag, double* hdImage); void colorAt(int x, int y, struct color c, double f, double* hdImage); unsigned char colorDepth(unsigned char x, unsigned char p, double f); double clamp(double x); __global__ void recinsert(Bhtree *tree, body* insertBod); void writeRender(char* data, double* hdImage, int step); __device__ inline bool contains(Octant *root, vec3 p); __global__ void interactInTree(Bhtree *tree, struct body* b); __host__ __device__ double magnitude(float x, float y, float z); int main() { std::cout << SYSTEM_THICKNESS << "AU thick disk\n";; char *image = new char[WIDTH*HEIGHT*3]; double *hdImage = new double[WIDTH*HEIGHT*3]; struct body *bodies = new struct body[NUM_BODIES]; struct body *d_bodies; hipMalloc(&d_bodies,NUM_BODIES*sizeof(struct body)); initializeBodies(bodies); hipMemcpy(d_bodies,bodies,NUM_BODIES*sizeof(struct body),hipMemcpyHostToDevice); runSimulation(bodies, d_bodies, image, hdImage); std::cout << "\nwe made it\n"; delete[] bodies; delete[] image; return 0; } void initializeBodies(struct body* bods) { using std::uniform_real_distribution; uniform_real_distribution<double> randAngle (0.0, 200.0*PI); uniform_real_distribution<double> randRadius (INNER_BOUND, SYSTEM_SIZE); uniform_real_distribution<double> randHeight (0.0, SYSTEM_THICKNESS); std::default_random_engine gen (0); double angle; double radius; double velocity; struct body *current; //STARS velocity = 0.67*sqrt((G*SOLAR_MASS)/(4*BINARY_SEPARATION*TO_METERS)); //STAR 1 current = &bods[0]; current->position.x = 0.0;///-BINARY_SEPARATION; current->position.y = 0.0; current->position.z = 0.0; current->velocity.x = 0.0; current->velocity.y = 0.0;//velocity; current->velocity.z = 0.0; current->mass = SOLAR_MASS; ///STARTS AT NUMBER OF STARS/// double totalExtraMass = 0.0; for (int index=1; index<NUM_BODIES; index++) { angle = randAngle(gen); radius = sqrt(SYSTEM_SIZE)*sqrt(randRadius(gen)); velocity = pow(((G*(SOLAR_MASS+((radius-INNER_BOUND)/SYSTEM_SIZE)*EXTRA_MASS*SOLAR_MASS)) / (radius*TO_METERS)), 0.5); current = &bods[index]; current->position.x = radius*cos(angle); current->position.y = radius*sin(angle); current->position.z = randHeight(gen)-SYSTEM_THICKNESS/2; current->velocity.x = velocity*sin(angle); current->velocity.y = -velocity*cos(angle); current->velocity.z = 0.0; current->mass = (EXTRA_MASS*SOLAR_MASS)/NUM_BODIES; totalExtraMass += (EXTRA_MASS*SOLAR_MASS)/NUM_BODIES; } std::cout << "\nTotal Disk Mass: " << totalExtraMass; std::cout << "\nEach Particle weight: " << (EXTRA_MASS*SOLAR_MASS)/NUM_BODIES << "\n______________________________\n"; } void runSimulation(struct body* b, struct body* db, char* image, double* hdImage) { createFrame(image, hdImage, b, 1); printf("here (done with createFrame1)"); for (int step=1; step<STEP_COUNT; step++) { std::cout << "\nBeginning timestep: " << step; hipLaunchKernelGGL(( interactBodies), dim3(1),dim3(1), 0, 0, db); hipError_t error = hipGetLastError(); if(error!=hipSuccess) printf("\nCUDA error:%s",hipGetErrorString(error)); hipMemcpy(b,db,NUM_BODIES*sizeof(struct body),hipMemcpyDeviceToHost); if (step%RENDER_INTERVAL==0) { createFrame(image, hdImage, b, step + 1); } if (DEBUG_INFO) {std::cout << "\n-------Done------- timestep: " << step << "\n" << std::flush;} } } __device__ Bhtree *Gtree; __global__ void recinsert(Bhtree *tree, body* insertBod) { const int num_streams=8; hipStream_t streams[num_streams]; for(int i=0; i<num_streams; i++) hipStreamCreateWithFlags(&streams[i],hipStreamNonBlocking); if (tree->myBod==NULL) { tree->myBod = insertBod; } else //if (!isExternal()) { bool isExtern = tree->UNW==NULL && tree->UNE==NULL && tree->USW==NULL && tree->USE==NULL; isExtern = isExtern && tree->DNW==NULL && tree->DNE==NULL && tree->DSW==NULL && tree->DSE==NULL; body *updatedBod; if (!isExtern) { updatedBod = new struct body; updatedBod->position.x = (insertBod->position.x*insertBod->mass + tree->myBod->position.x*tree->myBod->mass) / (insertBod->mass+tree->myBod->mass); updatedBod->position.y = (insertBod->position.y*insertBod->mass + tree->myBod->position.y*tree->myBod->mass) / (insertBod->mass+tree->myBod->mass); updatedBod->position.z = (insertBod->position.z*insertBod->mass + tree->myBod->position.z*tree->myBod->mass) / (insertBod->mass+tree->myBod->mass); updatedBod->mass = insertBod->mass+tree->myBod->mass; // delete myBod; if (tree->toDelete!=NULL) delete tree->toDelete; tree->toDelete = updatedBod; tree->myBod = updatedBod; updatedBod = insertBod; } else { updatedBod = tree->myBod; } Octant *unw = tree->octy->mUNW(); if (contains(unw,updatedBod->position)) { if (tree->UNW==NULL) { tree->UNW = new Bhtree(unw); } else { delete unw; } hipLaunchKernelGGL(( recinsert), dim3(1),dim3(1),0,streams[0], tree->UNW,updatedBod); } else { delete unw; Octant *une = tree->octy->mUNE(); if (contains(une,updatedBod->position)) { if (tree->UNE==NULL) { tree->UNE = new Bhtree(une); } else { delete une; } hipLaunchKernelGGL(( recinsert), dim3(1),dim3(1),0,streams[1], tree->UNE,updatedBod); } else { delete une; Octant *usw = tree->octy->mUSW(); if (contains(usw,updatedBod->position)) { if (tree->USW==NULL) { tree->USW = new Bhtree(usw); } else { delete usw; } hipLaunchKernelGGL(( recinsert), dim3(1),dim3(1),0,streams[2], tree->USW,updatedBod); } else { delete usw; Octant *use = tree->octy->mUSE(); if (contains(use,updatedBod->position)) { if (tree->USE==NULL) { tree->USE = new Bhtree(use); } else { delete use; } hipLaunchKernelGGL(( recinsert), dim3(1),dim3(1),0,streams[3], tree->USE,updatedBod); } else { delete use; Octant *dnw = tree->octy->mDNW(); if (contains(dnw,updatedBod->position)) { if (tree->DNW==NULL) { tree->DNW = new Bhtree(dnw); } else { delete dnw; } hipLaunchKernelGGL(( recinsert), dim3(1),dim3(1),0,streams[4], tree->DNW,updatedBod); } else { delete dnw; Octant *dne = tree->octy->mDNE(); if (contains(dne,updatedBod->position)) { if (tree->DNE==NULL) { tree->DNE = new Bhtree(dne); } else { delete dne; } hipLaunchKernelGGL(( recinsert), dim3(1),dim3(1),0,streams[5], tree->DNE,updatedBod); } else { delete dne; Octant *dsw = tree->octy->mDSW(); if (contains(dsw,updatedBod->position)) { if (tree->DSW==NULL) { tree->DSW = new Bhtree(dsw); } else { delete dsw; } hipLaunchKernelGGL(( recinsert), dim3(1),dim3(1),0,streams[6], tree->DSW,updatedBod); } else { delete dsw; Octant *dse = tree->octy->mDSE(); if (tree->DSE==NULL) { tree->DSE = new Bhtree(dse); } else { delete dse; } hipLaunchKernelGGL(( recinsert), dim3(1),dim3(1),0,streams[7], tree->DSE,updatedBod); } } } } } } } // delete updatedBod; if (isExtern) { hipLaunchKernelGGL(( recinsert), dim3(1),dim3(1), 0, 0, tree,insertBod); } } } __global__ void interactBodies(struct body* bods) { // Sun interacts individually printf("\ncalculating force from star..."); struct body *sun = &bods[0]; for (int bIndex=1; bIndex<NUM_BODIES; bIndex++) { singleInteraction(sun, &bods[bIndex]); } //if (DEBUG_INFO) {std::cout << "\nBuilding Octree..." << std::flush;} printf("\nBuilding octree..."); // Build tree vec3 *center = new struct vec3; center->x = 0; center->y = 0; center->z = 0.1374; /// Does this help? Octant *root = new Octant(center, 60*SYSTEM_SIZE); Gtree = new Bhtree(root); for (int bIndex=1; bIndex<NUM_BODIES; bIndex++) { if (contains(root,bods[bIndex].position)) { hipLaunchKernelGGL(( recinsert), dim3(1),dim3(1), 0, 0, Gtree,&bods[bIndex]); hipError_t error = hipGetLastError(); if(error!=hipSuccess) printf("\nCUDA error:%s",hipGetErrorString(error)); } } printf("\ncalculating interactions..."); //if (DEBUG_INFO) {std::cout << "\nCalculating particle interactions..." << std::flush;} // loop through interactions //#pragma omp parallel for for (int bIndex=1; bIndex<NUM_BODIES; bIndex++) { if (contains(root,bods[bIndex].position)) { hipLaunchKernelGGL(( interactInTree), dim3(1),dim3(1), 0, 0, Gtree,&bods[bIndex]); hipError_t error = hipGetLastError(); if(error!=hipSuccess) printf("\nCUDA error:%s",hipGetErrorString(error)); } } // Destroy tree // delete Gtree; // printf("\nupdating particle positions..."); //if (DEBUG_INFO) {std::cout << "\nUpdating particle positions..." << std::flush;} updateBodies(bods); } __global__ void interactInTree(Bhtree *tree, struct body* b) { bool isExternal = tree->UNW==NULL && tree->UNE==NULL && tree->USW==NULL && tree->USE==NULL; isExternal = isExternal && tree->DNW==NULL && tree->DNE==NULL && tree->DSW==NULL && tree->DSE==NULL; const int num_streams=8; hipStream_t streams[num_streams]; for(int i=0; i<num_streams; i++) hipStreamCreateWithFlags(&streams[i],hipStreamNonBlocking); Octant *o = tree->octy; body *myb = tree->myBod; if(isExternal && myb!=b) singleInteraction(myb,b); else if(o->getLength()/magnitude(myb->position.x-b->position.x, myb->position.y-b->position.y, myb->position.z-b->position.z) < MAX_DISTANCE) singleInteraction(myb,b); else { if (tree->UNW!=NULL)hipLaunchKernelGGL(( interactInTree), dim3(1),dim3(1),0,streams[0], tree->UNW,b); if (tree->UNE!=NULL)hipLaunchKernelGGL(( interactInTree), dim3(1),dim3(1),0,streams[1], tree->UNE,b); if (tree->USW!=NULL)hipLaunchKernelGGL(( interactInTree), dim3(1),dim3(1),0,streams[2], tree->USW,b); if (tree->USE!=NULL)hipLaunchKernelGGL(( interactInTree), dim3(1),dim3(1),0,streams[3], tree->USE,b); if (tree->DNW!=NULL)hipLaunchKernelGGL(( interactInTree), dim3(1),dim3(1),0,streams[4], tree->DNW,b); if (tree->DNE!=NULL)hipLaunchKernelGGL(( interactInTree), dim3(1),dim3(1),0,streams[5], tree->DNE,b); if (tree->DSW!=NULL)hipLaunchKernelGGL(( interactInTree), dim3(1),dim3(1),0,streams[6], tree->DSW,b); if (tree->DSE!=NULL)hipLaunchKernelGGL(( interactInTree), dim3(1),dim3(1),0,streams[7], tree->DSE,b); } } __device__ inline bool contains(Octant *root, vec3 p) { double length = root->getLength(); vec3* mid = root->getMid(); return p.x<=mid->x+length/2.0 && p.x>=mid->x-length/2.0 && p.y<=mid->y+length/2.0 && p.y>=mid->y-length/2.0 && p.z<=mid->z+length/2.0 && p.z>=mid->z-length/2.0; } __host__ __device__ double magnitude(float x, float y, float z) { return sqrt(x*x+y*y+z*z); } __device__ void singleInteraction(struct body* a, struct body* b) { vec3 posDiff; posDiff.x = (a->position.x-b->position.x)*TO_METERS; posDiff.y = (a->position.y-b->position.y)*TO_METERS; posDiff.z = (a->position.z-b->position.z)*TO_METERS; double dist = magnitude(posDiff); double F = TIME_STEP*(G*a->mass*b->mass) / ((dist*dist + SOFTENING*SOFTENING) * dist); a->accel.x -= F*posDiff.x/a->mass; a->accel.y -= F*posDiff.y/a->mass; a->accel.z -= F*posDiff.z/a->mass; b->accel.x += F*posDiff.x/b->mass; b->accel.y += F*posDiff.y/b->mass; b->accel.z += F*posDiff.z/b->mass; } __host__ __device__ double magnitude(vec3 v) { return sqrt(v.x*v.x+v.y*v.y+v.z*v.z); } __device__ void updateBodies(struct body* bods) { double mAbove = 0.0; double mBelow = 0.0; for (int bIndex=0; bIndex<NUM_BODIES; bIndex++) { struct body *current = &bods[bIndex]; if (DEBUG_INFO) { if (bIndex==0) { // std::cout << "\nStar x accel: " << current->accel.x // << " Star y accel: " << current->accel.y; } else if (current->position.y > 0.0) { mAbove += current->mass; } else { mBelow += current->mass; } } current->velocity.x += current->accel.x; current->velocity.y += current->accel.y; current->velocity.z += current->accel.z; current->accel.x = 0.0; current->accel.y = 0.0; current->accel.z = 0.0; current->position.x += TIME_STEP*current->velocity.x/TO_METERS; current->position.y += TIME_STEP*current->velocity.y/TO_METERS; current->position.z += TIME_STEP*current->velocity.z/TO_METERS; } if (DEBUG_INFO) { //std::cout << "\nMass below: " << mBelow << " Mass Above: " // << mAbove << " \nRatio: " << mBelow/mAbove; } } void createFrame(char* image, double* hdImage, struct body* b, int step) { std::cout << "\nWriting frame " << step; if (DEBUG_INFO) {std::cout << "\nClearing Pixels..." << std::flush;} renderClear(image, hdImage); if (DEBUG_INFO) {std::cout << "\nRendering Particles..." << std::flush;} renderBodies(b, hdImage); if (DEBUG_INFO) {std::cout << "\nWriting frame to file..." << std::flush;} writeRender(image, hdImage, step); } void renderClear(char* image, double* hdImage) { for (int i=0; i<WIDTH*HEIGHT*3; i++) { // char* current = image + i; image[i] = 0; //char(image[i]/1.2); hdImage[i] = 0.0; } } void renderBodies(struct body* b, double* hdImage) { /// ORTHOGONAL PROJECTION for(int index=0; index<NUM_BODIES; index++) { struct body *current = &b[index]; int x = toPixelSpace(current->position.x, WIDTH); int y = toPixelSpace(current->position.y, HEIGHT); if (x>DOT_SIZE && x<WIDTH-DOT_SIZE && y>DOT_SIZE && y<HEIGHT-DOT_SIZE) { double vMag = magnitude(current->velocity); colorDot(current->position.x, current->position.y, vMag, hdImage); } } } double toPixelSpace(double p, int size) { return (size/2.0)*(1.0+p/(SYSTEM_SIZE*RENDER_SCALE)); } void colorDot(double x, double y, double vMag, double* hdImage) { const double velocityMax = MAX_VEL_COLOR; //35000 const double velocityMin = sqrt(0.8*(G*(SOLAR_MASS+EXTRA_MASS*SOLAR_MASS))/ (SYSTEM_SIZE*TO_METERS)); //MIN_VEL_COLOR; const double vPortion = sqrt((vMag-velocityMin) / velocityMax); color c; c.r = clamp(4*(vPortion-0.333)); c.g = clamp(fmin(4*vPortion,4.0*(1.0-vPortion))); c.b = clamp(4*(0.5-vPortion)); for (int i=-DOT_SIZE/2; i<DOT_SIZE/2; i++) { for (int j=-DOT_SIZE/2; j<DOT_SIZE/2; j++) { double xP = floor(toPixelSpace(x, WIDTH)); double yP = floor(toPixelSpace(y, HEIGHT)); double cFactor = PARTICLE_BRIGHTNESS / (pow(exp(pow(PARTICLE_SHARPNESS* (xP+i-toPixelSpace(x, WIDTH)),2.0)) + exp(pow(PARTICLE_SHARPNESS* (yP+j-toPixelSpace(y, HEIGHT)),2.0)),/*1.25*/0.75)+1.0); colorAt(int(xP+i),int(yP+j),c, cFactor, hdImage); } } } void colorAt(int x, int y, struct color c, double f, double* hdImage) { int pix = 3*(x+WIDTH*y); hdImage[pix+0] += c.r*f;//colorDepth(c.r, image[pix+0], f); hdImage[pix+1] += c.g*f;//colorDepth(c.g, image[pix+1], f); hdImage[pix+2] += c.b*f;//colorDepth(c.b, image[pix+2], f); } unsigned char colorDepth(unsigned char x, unsigned char p, double f) { return fmax(fmin((x*f+p),255),0); // unsigned char t = fmax(fmin((x*f+p),255),0); // return 2*t-(t*t)/255; } double clamp(double x) { return fmax(fmin(x,1.0),0.0); } void writeRender(char* data, double* hdImage, int step) { for (int i=0; i<WIDTH*HEIGHT*3; i++) { data[i] = int(255.0*clamp(hdImage[i])); } int frame = step/RENDER_INTERVAL + 1;//RENDER_INTERVAL; std::string name = "images/Step"; int i = 0; if (frame == 1000) i++; // Evil hack to avoid extra 0 at 1000 for (i; i<4-floor(log(frame)/log(10)); i++) { name.append("0"); } name.append(std::to_string(frame)); name.append(".ppm"); std::ofstream file (name, std::ofstream::binary); if (file.is_open()) { // size = file.tellg(); file << "P6\n" << WIDTH << " " << HEIGHT << "\n" << "255\n"; file.write(data, WIDTH*HEIGHT*3); file.close(); } }
aaa1a1fbd76592d16074f242a5c2c2dd1a7fe377.cu
#include <iostream> #include <fstream> #include <stdlib.h> #include <random> #include <omp.h> #include "cudaBhtree.cu" #include "Constants.h" void initializeBodies(struct body* bods); void runSimulation(struct body* b, struct body* db, char* image, double* hdImage); __global__ void interactBodies(struct body* b); __device__ void singleInteraction(struct body* a, struct body* b); __host__ __device__ double magnitude(vec3 v); __device__ void updateBodies(struct body* b); void createFrame(char* image, double* hdImage, struct body* b, int step); double toPixelSpace(double p, int size); void renderClear(char* image, double* hdImage); void renderBodies(struct body* b, double* hdImage); void colorDot(double x, double y, double vMag, double* hdImage); void colorAt(int x, int y, struct color c, double f, double* hdImage); unsigned char colorDepth(unsigned char x, unsigned char p, double f); double clamp(double x); __global__ void recinsert(Bhtree *tree, body* insertBod); void writeRender(char* data, double* hdImage, int step); __device__ inline bool contains(Octant *root, vec3 p); __global__ void interactInTree(Bhtree *tree, struct body* b); __host__ __device__ double magnitude(float x, float y, float z); int main() { std::cout << SYSTEM_THICKNESS << "AU thick disk\n";; char *image = new char[WIDTH*HEIGHT*3]; double *hdImage = new double[WIDTH*HEIGHT*3]; struct body *bodies = new struct body[NUM_BODIES]; struct body *d_bodies; cudaMalloc(&d_bodies,NUM_BODIES*sizeof(struct body)); initializeBodies(bodies); cudaMemcpy(d_bodies,bodies,NUM_BODIES*sizeof(struct body),cudaMemcpyHostToDevice); runSimulation(bodies, d_bodies, image, hdImage); std::cout << "\nwe made it\n"; delete[] bodies; delete[] image; return 0; } void initializeBodies(struct body* bods) { using std::uniform_real_distribution; uniform_real_distribution<double> randAngle (0.0, 200.0*PI); uniform_real_distribution<double> randRadius (INNER_BOUND, SYSTEM_SIZE); uniform_real_distribution<double> randHeight (0.0, SYSTEM_THICKNESS); std::default_random_engine gen (0); double angle; double radius; double velocity; struct body *current; //STARS velocity = 0.67*sqrt((G*SOLAR_MASS)/(4*BINARY_SEPARATION*TO_METERS)); //STAR 1 current = &bods[0]; current->position.x = 0.0;///-BINARY_SEPARATION; current->position.y = 0.0; current->position.z = 0.0; current->velocity.x = 0.0; current->velocity.y = 0.0;//velocity; current->velocity.z = 0.0; current->mass = SOLAR_MASS; ///STARTS AT NUMBER OF STARS/// double totalExtraMass = 0.0; for (int index=1; index<NUM_BODIES; index++) { angle = randAngle(gen); radius = sqrt(SYSTEM_SIZE)*sqrt(randRadius(gen)); velocity = pow(((G*(SOLAR_MASS+((radius-INNER_BOUND)/SYSTEM_SIZE)*EXTRA_MASS*SOLAR_MASS)) / (radius*TO_METERS)), 0.5); current = &bods[index]; current->position.x = radius*cos(angle); current->position.y = radius*sin(angle); current->position.z = randHeight(gen)-SYSTEM_THICKNESS/2; current->velocity.x = velocity*sin(angle); current->velocity.y = -velocity*cos(angle); current->velocity.z = 0.0; current->mass = (EXTRA_MASS*SOLAR_MASS)/NUM_BODIES; totalExtraMass += (EXTRA_MASS*SOLAR_MASS)/NUM_BODIES; } std::cout << "\nTotal Disk Mass: " << totalExtraMass; std::cout << "\nEach Particle weight: " << (EXTRA_MASS*SOLAR_MASS)/NUM_BODIES << "\n______________________________\n"; } void runSimulation(struct body* b, struct body* db, char* image, double* hdImage) { createFrame(image, hdImage, b, 1); printf("here (done with createFrame1)"); for (int step=1; step<STEP_COUNT; step++) { std::cout << "\nBeginning timestep: " << step; interactBodies<<<1,1>>>(db); cudaError_t error = cudaGetLastError(); if(error!=cudaSuccess) printf("\nCUDA error:%s",cudaGetErrorString(error)); cudaMemcpy(b,db,NUM_BODIES*sizeof(struct body),cudaMemcpyDeviceToHost); if (step%RENDER_INTERVAL==0) { createFrame(image, hdImage, b, step + 1); } if (DEBUG_INFO) {std::cout << "\n-------Done------- timestep: " << step << "\n" << std::flush;} } } __device__ Bhtree *Gtree; __global__ void recinsert(Bhtree *tree, body* insertBod) { const int num_streams=8; cudaStream_t streams[num_streams]; for(int i=0; i<num_streams; i++) cudaStreamCreateWithFlags(&streams[i],cudaStreamNonBlocking); if (tree->myBod==NULL) { tree->myBod = insertBod; } else //if (!isExternal()) { bool isExtern = tree->UNW==NULL && tree->UNE==NULL && tree->USW==NULL && tree->USE==NULL; isExtern = isExtern && tree->DNW==NULL && tree->DNE==NULL && tree->DSW==NULL && tree->DSE==NULL; body *updatedBod; if (!isExtern) { updatedBod = new struct body; updatedBod->position.x = (insertBod->position.x*insertBod->mass + tree->myBod->position.x*tree->myBod->mass) / (insertBod->mass+tree->myBod->mass); updatedBod->position.y = (insertBod->position.y*insertBod->mass + tree->myBod->position.y*tree->myBod->mass) / (insertBod->mass+tree->myBod->mass); updatedBod->position.z = (insertBod->position.z*insertBod->mass + tree->myBod->position.z*tree->myBod->mass) / (insertBod->mass+tree->myBod->mass); updatedBod->mass = insertBod->mass+tree->myBod->mass; // delete myBod; if (tree->toDelete!=NULL) delete tree->toDelete; tree->toDelete = updatedBod; tree->myBod = updatedBod; updatedBod = insertBod; } else { updatedBod = tree->myBod; } Octant *unw = tree->octy->mUNW(); if (contains(unw,updatedBod->position)) { if (tree->UNW==NULL) { tree->UNW = new Bhtree(unw); } else { delete unw; } recinsert<<<1,1,0,streams[0]>>>(tree->UNW,updatedBod); } else { delete unw; Octant *une = tree->octy->mUNE(); if (contains(une,updatedBod->position)) { if (tree->UNE==NULL) { tree->UNE = new Bhtree(une); } else { delete une; } recinsert<<<1,1,0,streams[1]>>>(tree->UNE,updatedBod); } else { delete une; Octant *usw = tree->octy->mUSW(); if (contains(usw,updatedBod->position)) { if (tree->USW==NULL) { tree->USW = new Bhtree(usw); } else { delete usw; } recinsert<<<1,1,0,streams[2]>>>(tree->USW,updatedBod); } else { delete usw; Octant *use = tree->octy->mUSE(); if (contains(use,updatedBod->position)) { if (tree->USE==NULL) { tree->USE = new Bhtree(use); } else { delete use; } recinsert<<<1,1,0,streams[3]>>>(tree->USE,updatedBod); } else { delete use; Octant *dnw = tree->octy->mDNW(); if (contains(dnw,updatedBod->position)) { if (tree->DNW==NULL) { tree->DNW = new Bhtree(dnw); } else { delete dnw; } recinsert<<<1,1,0,streams[4]>>>(tree->DNW,updatedBod); } else { delete dnw; Octant *dne = tree->octy->mDNE(); if (contains(dne,updatedBod->position)) { if (tree->DNE==NULL) { tree->DNE = new Bhtree(dne); } else { delete dne; } recinsert<<<1,1,0,streams[5]>>>(tree->DNE,updatedBod); } else { delete dne; Octant *dsw = tree->octy->mDSW(); if (contains(dsw,updatedBod->position)) { if (tree->DSW==NULL) { tree->DSW = new Bhtree(dsw); } else { delete dsw; } recinsert<<<1,1,0,streams[6]>>>(tree->DSW,updatedBod); } else { delete dsw; Octant *dse = tree->octy->mDSE(); if (tree->DSE==NULL) { tree->DSE = new Bhtree(dse); } else { delete dse; } recinsert<<<1,1,0,streams[7]>>>(tree->DSE,updatedBod); } } } } } } } // delete updatedBod; if (isExtern) { recinsert<<<1,1>>>(tree,insertBod); } } } __global__ void interactBodies(struct body* bods) { // Sun interacts individually printf("\ncalculating force from star..."); struct body *sun = &bods[0]; for (int bIndex=1; bIndex<NUM_BODIES; bIndex++) { singleInteraction(sun, &bods[bIndex]); } //if (DEBUG_INFO) {std::cout << "\nBuilding Octree..." << std::flush;} printf("\nBuilding octree..."); // Build tree vec3 *center = new struct vec3; center->x = 0; center->y = 0; center->z = 0.1374; /// Does this help? Octant *root = new Octant(center, 60*SYSTEM_SIZE); Gtree = new Bhtree(root); for (int bIndex=1; bIndex<NUM_BODIES; bIndex++) { if (contains(root,bods[bIndex].position)) { recinsert<<<1,1>>>(Gtree,&bods[bIndex]); cudaError_t error = cudaGetLastError(); if(error!=cudaSuccess) printf("\nCUDA error:%s",cudaGetErrorString(error)); } } printf("\ncalculating interactions..."); //if (DEBUG_INFO) {std::cout << "\nCalculating particle interactions..." << std::flush;} // loop through interactions //#pragma omp parallel for for (int bIndex=1; bIndex<NUM_BODIES; bIndex++) { if (contains(root,bods[bIndex].position)) { interactInTree<<<1,1>>>(Gtree,&bods[bIndex]); cudaError_t error = cudaGetLastError(); if(error!=cudaSuccess) printf("\nCUDA error:%s",cudaGetErrorString(error)); } } // Destroy tree // delete Gtree; // printf("\nupdating particle positions..."); //if (DEBUG_INFO) {std::cout << "\nUpdating particle positions..." << std::flush;} updateBodies(bods); } __global__ void interactInTree(Bhtree *tree, struct body* b) { bool isExternal = tree->UNW==NULL && tree->UNE==NULL && tree->USW==NULL && tree->USE==NULL; isExternal = isExternal && tree->DNW==NULL && tree->DNE==NULL && tree->DSW==NULL && tree->DSE==NULL; const int num_streams=8; cudaStream_t streams[num_streams]; for(int i=0; i<num_streams; i++) cudaStreamCreateWithFlags(&streams[i],cudaStreamNonBlocking); Octant *o = tree->octy; body *myb = tree->myBod; if(isExternal && myb!=b) singleInteraction(myb,b); else if(o->getLength()/magnitude(myb->position.x-b->position.x, myb->position.y-b->position.y, myb->position.z-b->position.z) < MAX_DISTANCE) singleInteraction(myb,b); else { if (tree->UNW!=NULL) interactInTree<<<1,1,0,streams[0]>>>(tree->UNW,b); if (tree->UNE!=NULL) interactInTree<<<1,1,0,streams[1]>>>(tree->UNE,b); if (tree->USW!=NULL) interactInTree<<<1,1,0,streams[2]>>>(tree->USW,b); if (tree->USE!=NULL) interactInTree<<<1,1,0,streams[3]>>>(tree->USE,b); if (tree->DNW!=NULL) interactInTree<<<1,1,0,streams[4]>>>(tree->DNW,b); if (tree->DNE!=NULL) interactInTree<<<1,1,0,streams[5]>>>(tree->DNE,b); if (tree->DSW!=NULL) interactInTree<<<1,1,0,streams[6]>>>(tree->DSW,b); if (tree->DSE!=NULL) interactInTree<<<1,1,0,streams[7]>>>(tree->DSE,b); } } __device__ inline bool contains(Octant *root, vec3 p) { double length = root->getLength(); vec3* mid = root->getMid(); return p.x<=mid->x+length/2.0 && p.x>=mid->x-length/2.0 && p.y<=mid->y+length/2.0 && p.y>=mid->y-length/2.0 && p.z<=mid->z+length/2.0 && p.z>=mid->z-length/2.0; } __host__ __device__ double magnitude(float x, float y, float z) { return sqrt(x*x+y*y+z*z); } __device__ void singleInteraction(struct body* a, struct body* b) { vec3 posDiff; posDiff.x = (a->position.x-b->position.x)*TO_METERS; posDiff.y = (a->position.y-b->position.y)*TO_METERS; posDiff.z = (a->position.z-b->position.z)*TO_METERS; double dist = magnitude(posDiff); double F = TIME_STEP*(G*a->mass*b->mass) / ((dist*dist + SOFTENING*SOFTENING) * dist); a->accel.x -= F*posDiff.x/a->mass; a->accel.y -= F*posDiff.y/a->mass; a->accel.z -= F*posDiff.z/a->mass; b->accel.x += F*posDiff.x/b->mass; b->accel.y += F*posDiff.y/b->mass; b->accel.z += F*posDiff.z/b->mass; } __host__ __device__ double magnitude(vec3 v) { return sqrt(v.x*v.x+v.y*v.y+v.z*v.z); } __device__ void updateBodies(struct body* bods) { double mAbove = 0.0; double mBelow = 0.0; for (int bIndex=0; bIndex<NUM_BODIES; bIndex++) { struct body *current = &bods[bIndex]; if (DEBUG_INFO) { if (bIndex==0) { // std::cout << "\nStar x accel: " << current->accel.x // << " Star y accel: " << current->accel.y; } else if (current->position.y > 0.0) { mAbove += current->mass; } else { mBelow += current->mass; } } current->velocity.x += current->accel.x; current->velocity.y += current->accel.y; current->velocity.z += current->accel.z; current->accel.x = 0.0; current->accel.y = 0.0; current->accel.z = 0.0; current->position.x += TIME_STEP*current->velocity.x/TO_METERS; current->position.y += TIME_STEP*current->velocity.y/TO_METERS; current->position.z += TIME_STEP*current->velocity.z/TO_METERS; } if (DEBUG_INFO) { //std::cout << "\nMass below: " << mBelow << " Mass Above: " // << mAbove << " \nRatio: " << mBelow/mAbove; } } void createFrame(char* image, double* hdImage, struct body* b, int step) { std::cout << "\nWriting frame " << step; if (DEBUG_INFO) {std::cout << "\nClearing Pixels..." << std::flush;} renderClear(image, hdImage); if (DEBUG_INFO) {std::cout << "\nRendering Particles..." << std::flush;} renderBodies(b, hdImage); if (DEBUG_INFO) {std::cout << "\nWriting frame to file..." << std::flush;} writeRender(image, hdImage, step); } void renderClear(char* image, double* hdImage) { for (int i=0; i<WIDTH*HEIGHT*3; i++) { // char* current = image + i; image[i] = 0; //char(image[i]/1.2); hdImage[i] = 0.0; } } void renderBodies(struct body* b, double* hdImage) { /// ORTHOGONAL PROJECTION for(int index=0; index<NUM_BODIES; index++) { struct body *current = &b[index]; int x = toPixelSpace(current->position.x, WIDTH); int y = toPixelSpace(current->position.y, HEIGHT); if (x>DOT_SIZE && x<WIDTH-DOT_SIZE && y>DOT_SIZE && y<HEIGHT-DOT_SIZE) { double vMag = magnitude(current->velocity); colorDot(current->position.x, current->position.y, vMag, hdImage); } } } double toPixelSpace(double p, int size) { return (size/2.0)*(1.0+p/(SYSTEM_SIZE*RENDER_SCALE)); } void colorDot(double x, double y, double vMag, double* hdImage) { const double velocityMax = MAX_VEL_COLOR; //35000 const double velocityMin = sqrt(0.8*(G*(SOLAR_MASS+EXTRA_MASS*SOLAR_MASS))/ (SYSTEM_SIZE*TO_METERS)); //MIN_VEL_COLOR; const double vPortion = sqrt((vMag-velocityMin) / velocityMax); color c; c.r = clamp(4*(vPortion-0.333)); c.g = clamp(fmin(4*vPortion,4.0*(1.0-vPortion))); c.b = clamp(4*(0.5-vPortion)); for (int i=-DOT_SIZE/2; i<DOT_SIZE/2; i++) { for (int j=-DOT_SIZE/2; j<DOT_SIZE/2; j++) { double xP = floor(toPixelSpace(x, WIDTH)); double yP = floor(toPixelSpace(y, HEIGHT)); double cFactor = PARTICLE_BRIGHTNESS / (pow(exp(pow(PARTICLE_SHARPNESS* (xP+i-toPixelSpace(x, WIDTH)),2.0)) + exp(pow(PARTICLE_SHARPNESS* (yP+j-toPixelSpace(y, HEIGHT)),2.0)),/*1.25*/0.75)+1.0); colorAt(int(xP+i),int(yP+j),c, cFactor, hdImage); } } } void colorAt(int x, int y, struct color c, double f, double* hdImage) { int pix = 3*(x+WIDTH*y); hdImage[pix+0] += c.r*f;//colorDepth(c.r, image[pix+0], f); hdImage[pix+1] += c.g*f;//colorDepth(c.g, image[pix+1], f); hdImage[pix+2] += c.b*f;//colorDepth(c.b, image[pix+2], f); } unsigned char colorDepth(unsigned char x, unsigned char p, double f) { return fmax(fmin((x*f+p),255),0); // unsigned char t = fmax(fmin((x*f+p),255),0); // return 2*t-(t*t)/255; } double clamp(double x) { return fmax(fmin(x,1.0),0.0); } void writeRender(char* data, double* hdImage, int step) { for (int i=0; i<WIDTH*HEIGHT*3; i++) { data[i] = int(255.0*clamp(hdImage[i])); } int frame = step/RENDER_INTERVAL + 1;//RENDER_INTERVAL; std::string name = "images/Step"; int i = 0; if (frame == 1000) i++; // Evil hack to avoid extra 0 at 1000 for (i; i<4-floor(log(frame)/log(10)); i++) { name.append("0"); } name.append(std::to_string(frame)); name.append(".ppm"); std::ofstream file (name, std::ofstream::binary); if (file.is_open()) { // size = file.tellg(); file << "P6\n" << WIDTH << " " << HEIGHT << "\n" << "255\n"; file.write(data, WIDTH*HEIGHT*3); file.close(); } }
d2fb8293b7e2d09e590ff3bda6bc159d502741dc.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #if GOOGLE_CUDA #include "ew_op_gpu.h" #include <stdio.h> __device__ __forceinline__ uint bfe(uint val, int pos) { uint bit; asm ("bfe.u32 %0, %1, %2, 1;" : "=r"(bit) : "r"(val), "r"(pos) ); return bit; } typedef struct __align__(8) KeyVal { uint key; float val; } KeyVal; template <typename T> __global__ void top_k(T* Y, uint* A, const T* __restrict__ X, uint Exp, uint topK, uint K, uint rect, uint rebase) { extern __shared__ KeyVal data[]; uint tid = threadIdx.x; uint n = blockIdx.x; uint offset = n*K + tid; KeyVal init; init.key = tid; init.val = tid < K ? load(add_ptr_u(X, offset)) : -FLT_MAX; data[tid] = init; __syncthreads(); for (int i = 1; i <= Exp; ++i) { int j; #pragma unroll 1 for (j = i - 1; j >= 5; --j) { // when the comparison stride is 32 or greater, // use half of warps and uniform shared memory access to make comparisons if (tid < blockDim.x/2) { // figure out the a and b indexes for the "butterfly" compare operation uint m = (tid >> j) << (j + 1); uint r = tid & ((1 << j) - 1); uint a = m + r; uint b = a + (1 << j); bool d = bfe(a, i) != 0; KeyVal A = data[a]; KeyVal B = data[b]; if((B.val > A.val) ^ d) { KeyVal t = A; A = B; B = t; } data[a] = A; data[b] = B; } __syncthreads(); } // When the comparison stride is less than 32, // use all warps and shfl_xor operations to make comparisons in registers // Load shared to registers KeyVal A = data[tid]; #pragma unroll 5 while (j >= 0) { KeyVal B; B.val = shfl_xor(A.val, 1 << j); B.key = shfl_xor(A.key, 1 << j); bool d = bfe(tid, i) != bfe(tid, j--); // in the case of equality we want both shuffle lanes to not swap if(((B.val > A.val) ^ d) && B.val != A.val) A = B; } // Load final register values back to shared. data[tid] = A; __syncthreads(); } if (rect) { // avoid extra __syncthreads by coalescing to unused shared float* coalesce = (float*)&data[blockDim.x]; // Output same size as input, with zeros for non-topK values. // rebase sets the zero line to the min value of the topK KeyVal out = data[tid]; float base = rebase ? fmaxf(data[topK-1].val, 0.0f) : 0.0f; float val = tid < topK ? out.val : 0.0f; //if (tid == 0 && n == 0) // printf("base: %f %d\n", base, data[topK-1].key); // apply the rectification and coalesce the output coalesce[out.key] = fmaxf(val, base) - base; __syncthreads(); if (tid < K) store(add_ptr_u(Y, offset), coalesce[tid]); } else { // output just top values and their indicies. if (tid < topK) { KeyVal out = data[tid]; offset = n*topK + tid; store(add_ptr_u(Y, offset), out.val); __stg(add_ptr_u(A, offset), out.key); } } } template <typename T> bool TopK(hipStream_t stream, T* y, uint* a, const T* x, uint topK, uint N, uint K, uint rebase) { uint exp; if (K > 512) exp = 10; else if (K > 256) exp = 9; else if (K > 128) exp = 8; else if (K > 64) exp = 7; else if (K > 32) exp = 6; else exp = 5; uint threads = 1 << exp; uint shared = threads * 16; hipLaunchKernelGGL(( top_k<T>), dim3(N),dim3(threads),shared,stream, y, a, x, exp, topK, K, a == NULL, rebase); return true; } template bool TopK<float>(hipStream_t stream, float* y, uint* a, const float* x, uint topK, uint N, uint K, uint rebase); template bool TopK<ehalf>(hipStream_t stream, ehalf* y, uint* a, const ehalf* x, uint topK, uint N, uint K, uint rebase); template bool TopK<bhalf>(hipStream_t stream, bhalf* y, uint* a, const bhalf* x, uint topK, uint N, uint K, uint rebase); template <typename T> __global__ void masked_top_k_softmax(T* Y, const float* __restrict__ M, const T* __restrict__ X, uint Exp, uint topK, uint D123, uint D23, uint D3, uint M1, uint M2, uint use_mask, float scale) { extern __shared__ KeyVal block[]; extern __shared__ float stage[]; // x: D0, D1, D2, D3 // m: 1, D1, D2, D3 // m: 1, 1, D2, D3 // m: 1, 1, 1, D3 uint tid = threadIdx.x; uint d0 = blockIdx.x; uint d1 = blockIdx.y; uint d2 = blockIdx.z; uint offsetX = d0*D123 + d1*D23 + d2*D3 + tid; uint offsetM = d1*M1 + d2*M2 + tid; M = add_ptr_u(M, offsetM); X = add_ptr_u(X, offsetX); float mask = tid < D3 ? (use_mask ? __ldg(M) : 1.0f) : 0.0f; float xval = mask != 0.0 ? load(X) * mask * scale : -FLT_MAX; KeyVal init; init.key = tid; init.val = xval; block[tid] = init; __syncthreads(); for (int i = 1; i <= Exp; ++i) { int j; #pragma unroll 1 for (j = i - 1; j >= 5; --j) { // when the comparison stride is 32 or greater, // use half of warps and uniform shared memory access to make comparisons if (tid < blockDim.x/2) { // figure out the a and b indexes for the "butterfly" compare operation uint m = (tid >> j) << (j + 1); uint r = tid & ((1 << j) - 1); uint a = m + r; uint b = a + (1 << j); bool d = bfe(a, i) != 0; KeyVal A = block[a]; KeyVal B = block[b]; if((B.val > A.val) ^ d) { KeyVal t = A; A = B; B = t; } block[a] = A; block[b] = B; } __syncthreads(); } // When the comparison stride is less than 32, // use all warps and shfl_xor operations to make comparisons in registers // Load shared to registers KeyVal A = block[tid]; #pragma unroll 5 while (j >= 0) { KeyVal B; B.val = shfl_xor(A.val, 1 << j); B.key = shfl_xor(A.key, 1 << j); bool d = bfe(tid, i) != bfe(tid, j--); // in the case of equality we want both shuffle lanes to not swap if(((B.val > A.val) ^ d) && B.val != A.val) A = B; } // Load final register values back to shared. block[tid] = A; __syncthreads(); } float* vals = &stage[blockDim.x*2]; float* reds = &vals[blockDim.x]; KeyVal out = block[tid]; float val = 0.0f; if (tid < topK) val = expf(out.val - block[0].val); vals[out.key] = val; // reduce within warp #pragma unroll for (int i = 16; i > 0; i >>= 1) val += shfl_xor(val, i); // first thread of each warp store to shared if ((tid & 31) == 0) reds[tid/32] = val; __syncthreads(); if (tid < blockDim.x/32) { // first warp loads all prior reductions val = reds[tid]; // reduce within this last warp #pragma unroll 1 for (int i = blockDim.x/64; i > 0; i >>= 1) val += shfl_xor(val, i); // rcp final reduction to shared reds[tid] = 1.0f / val; } __syncthreads(); if (tid < D3) store(add_ptr_u(Y, offsetX), vals[tid] * reds[0]); } template <typename T> bool MaskedTopKSoftmax(hipStream_t stream, T* y, const float* m, const T* x, uint topK, uint D0, uint D1, uint D2, uint D3, uint M1, uint M2, float scale) { uint exp; if (D3 > 512) exp = 10; else if (D3 > 256) exp = 9; else if (D3 > 128) exp = 8; else if (D3 > 64) exp = 7; else if (D3 > 32) exp = 6; else exp = 5; uint threads = 1 << exp; uint shared = threads * 16; hipLaunchKernelGGL(( masked_top_k_softmax<T>), dim3(dim3(D0,D1,D2)),dim3(threads),shared,stream, y, m, x, exp, topK, D1*D2*D3, D2*D3, D3, M1, M2, m != NULL, scale); return true; } template bool MaskedTopKSoftmax<float>(hipStream_t stream, float* y, const float* m, const float* x, uint topK, uint D0, uint D1, uint D2, uint D3, uint M1, uint M2, float scale); template bool MaskedTopKSoftmax<ehalf>(hipStream_t stream, ehalf* y, const float* m, const ehalf* x, uint topK, uint D0, uint D1, uint D2, uint D3, uint M1, uint M2, float scale); template bool MaskedTopKSoftmax<bhalf>(hipStream_t stream, bhalf* y, const float* m, const bhalf* x, uint topK, uint D0, uint D1, uint D2, uint D3, uint M1, uint M2, float scale); // x *= mask * scale // y = exp(x - max(x)) / sum( exp(x - max(x)) ) template <typename T, int U> __global__ void masked_softmax( T* Y, const T* __restrict__ X, const float* __restrict__ M, uint D123, uint D23, uint D3, uint M1, uint M2, uint use_mask, float scale, int threads_pow2) { __shared__ float Max[32]; __shared__ float Sum[32]; // x: D0, D1, D2, D3 // m: 1, D1, D2, D3 // m: 1, 1, D2, D3 // m: 1, 1, 1, D3 uint tid = threadIdx.x; uint d0 = blockIdx.x; uint d1 = blockIdx.y; uint d2 = blockIdx.z; if (blockDim.x > 32) { if (tid < 32) { // Allows non-power of 2 threads to work Max[tid] = -FLT_MAX; Sum[tid] = 0.0f; } __syncthreads(); } uint ti = (tid & 0x3fe0)*U + (tid & 31); uint offsetX = d0*D123 + d1*D23 + d2*D3 + ti; uint offsetM = d1*M1 + d2*M2 + ti; M = add_ptr_u(M, offsetM); X = add_ptr_u(X, offsetX); // Load mask float mask[U]; for (int i = 0; i < U; i++) mask[i]= 1.0f; if (use_mask) { for (int i = 0; i < U; i++) { mask[i] = 0.0f; if (ti + i*32 < D3) mask[i] = __ldg(M + i*32); } } // Load X float xval[U]; for (int i = 0; i < U; i++) xval[i] = -FLT_MAX; for (int i = 0; i < U; i++) if (mask[i] != 0.0 && ti + i*32 < D3) xval[i] = load(X, i*32) * mask[i] * scale; // reduce within thread float Xmax[U]; for (int i = 0; i < U; i++) Xmax[i] = xval[i]; for (int j = U >> 1; j > 0; j >>= 1) for (int i = 0; i < j; i++) Xmax[i] = fmaxf(Xmax[i], Xmax[i+j]); float xmax = Xmax[0]; // reduce within warp for (int i = 16; i > 0; i >>= 1) xmax = fmaxf(xmax, shfl_xor(xmax, i)); if (blockDim.x > 32) { // first thread of each warp store to shared if ((tid & 31) == 0) Max[tid/32] = xmax; __syncthreads(); if (tid < 32) { // first warp loads all prior reductions xmax = Max[tid]; // reduce within this last warp #pragma unroll 1 for (int i = threads_pow2/64; i > 0; i >>= 1) xmax = fmaxf(xmax, shfl_xor(xmax, i)); // final reduction to shared Max[tid] = xmax; } __syncthreads(); xmax = Max[0]; } // compute exponent of softmax float Xsum[U]; for (int i = 0; i < U; i++) Xsum[i] = xval[i] = expf(xval[i] - xmax); // reduce within thread for (int j = U >> 1; j > 0; j >>= 1) for (int i = 0; i < j; i++) Xsum[i] = Xsum[i] + Xsum[i+j]; float exp_sum = Xsum[0]; // reduce within warp for (int i = 16; i > 0; i >>= 1) exp_sum += shfl_xor(exp_sum, i); if (blockDim.x > 32) { // first thread of each warp store to shared if ((tid & 31) == 0) Sum[tid/32] = exp_sum; __syncthreads(); if (tid < 32) { // first warp loads all prior reductions exp_sum = Sum[tid]; // reduce within this last warp #pragma unroll 1 for (int i = threads_pow2/64; i > 0; i >>= 1) exp_sum += shfl_xor(exp_sum, i); // final reduction to shared Sum[tid] = exp_sum; } __syncthreads(); exp_sum = Sum[0]; } float rcp_exp_sum = 1.0f / exp_sum; Y = add_ptr_u(Y, offsetX); for (int i = 0; i < U; i++) store(Y, xval[i] * rcp_exp_sum, i*32, ti + i*32 < D3); } // x *= mask * scale // y = exp(x - max(x)) / sum( exp(x - max(x)) ) template <typename T> __global__ void __launch_bounds__(32) masked_softmax2( T* Y, const T* __restrict__ X, const float* __restrict__ M, uint D123, uint D23, uint D3, uint M1, uint M2, uint use_mask, float scale) { // x: D0, D1, D2, D3 // m: 1, D1, D2, D3 // m: 1, 1, D2, D3 // m: 1, 1, 1, D3 uint tid = threadIdx.x; uint d0 = blockIdx.x; uint d1 = blockIdx.y; uint d2 = blockIdx.z; uint offsetX = d0*D123 + d1*D23 + d2*D3 + tid; uint offsetM = d1*M1 + d2*M2 + tid; // max(x, axis-1) float max_x = -FLT_MAX; #pragma unroll 2 for (uint d3 = tid, xi = offsetX, mi = offsetM; d3 < D3; d3 += 32, xi += 32, mi += 32) { float m = use_mask ? __ldg(add_ptr_u(M, mi)) : 1.0f; float x = m != 0.0 ? load(add_ptr_u(X, xi)) * m * scale : -FLT_MAX; max_x = fmaxf(max_x, x); } for (int i = 16; i > 0; i >>= 1) max_x = fmaxf(max_x, shfl_xor(max_x, i)); float exp_sum = 0.0f; #pragma unroll 2 for (uint d3 = tid, xi = offsetX, mi = offsetM; d3 < D3; d3 += 32, xi += 32, mi += 32) { float m = use_mask ? __ldg(add_ptr_u(M, mi)) : 1.0f; float x = m != 0.0 ? load(add_ptr_u(X, xi)) * m * scale : -FLT_MAX; exp_sum += expf(x - max_x); } for (int i = 16; i > 0; i >>= 1) exp_sum += shfl_xor(exp_sum, i); float rcp_exp_sum = 1.0f / exp_sum; #pragma unroll 2 for (uint d3 = tid, xi = offsetX, mi = offsetM; d3 < D3; d3 += 32, xi += 32, mi += 32) { float m = use_mask ? __ldg(add_ptr_u(M, mi)) : 1.0f; float x = m != 0.0 ? load(add_ptr_u(X, xi)) * m * scale : -FLT_MAX; float y = expf(x - max_x) * rcp_exp_sum; store(add_ptr_u(Y, xi), y); } } // dx = (dy - sum(dy * y, axis=-1)) * y * m * scale template <typename T, int U> __global__ void masked_softmax_grad( T* DX, const T* __restrict__ DY, const T* __restrict__ Y, const float* __restrict__ M, uint D123, uint D23, uint D3, uint M1, uint M2, uint use_mask, float scale, int threads_pow2) { __shared__ float Sum[32]; // x: D0, D1, D2, D3 // m: 1, D1, D2, D3 // m: 1, 1, D2, D3 // m: 1, 1, 1, D3 uint tid = threadIdx.x; uint d0 = blockIdx.x; uint d1 = blockIdx.y; uint d2 = blockIdx.z; if (blockDim.x > 32) { // Allows non-power of 2 threads to work if (tid < 32) Sum[tid] = 0.0f; __syncthreads(); } uint ti = (tid & 0x3fe0)*U + (tid & 31); uint offsetY = d0*D123 + d1*D23 + d2*D3 + ti; uint offsetM = d1*M1 + d2*M2 + ti; DY = add_ptr_u(DY, offsetY); Y = add_ptr_u( Y, offsetY); M = add_ptr_u( M, offsetM); // Load mask float mask[U]; for (int i = 0; i < U; i++) mask[i]= 1.0f; if (use_mask) { for (int i = 0; i < U; i++) { mask[i] = 0.0f; if (ti + i*32 < D3) mask[i] = __ldg(M + i*32); } } // Load DY float dy[U]; for (int i = 0; i < U; i++) dy[i]= 0.0f; for (int i = 0; i < U; i++) if (mask[i] != 0.0 && ti + i*32 < D3) dy[i] = load(DY, i*32); // Load Y float y[U]; for (int i = 0; i < U; i++) y[i]= 0.0f; for (int i = 0; i < U; i++) if (mask[i] != 0.0 && ti + i*32 < D3) y[i] = load(Y, i*32); // compute dy * y and y * mask * scale float dyy[U]; for (int i = 0; i < U; i++) { dyy[i] = dy[i] * y[i]; y[i] *= mask[i] * scale; } // reduce within thread for (int j = U >> 1; j > 0; j >>= 1) for (int i = 0; i < j; i++) dyy[i] = dyy[i] + dyy[i+j]; float sum_dyy = dyy[0]; // reduce within warp for (int i = 16; i > 0; i >>= 1) sum_dyy += shfl_xor(sum_dyy, i); if (blockDim.x > 32) { // first thread of each warp store to shared if ((tid & 31) == 0) Sum[tid/32] = sum_dyy; __syncthreads(); if (tid < 32) { // first warp loads all prior reductions sum_dyy = Sum[tid]; // reduce within this last warp #pragma unroll 1 for (int i = threads_pow2/64; i > 0; i >>= 1) sum_dyy += shfl_xor(sum_dyy, i); // final reduction to shared Sum[tid] = sum_dyy; } __syncthreads(); sum_dyy = Sum[0]; } // dx = (dy - sum_dyy) * y * mask* scale DX = add_ptr_u(DX, offsetY); for (int i = 0; i < U; i++) store(DX, (dy[i] - sum_dyy) * y[i], i*32, ti + i*32 < D3); } // dx = (dy - sum(dy * y, axis=-1)) * y * m * scale template <typename T> __global__ void __launch_bounds__(32) masked_softmax_grad2( T* DX, const T* __restrict__ DY, const T* __restrict__ Y, const float* __restrict__ M, uint D123, uint D23, uint D3, uint M1, uint M2, uint use_mask, float scale) { // x: D0, D1, D2, D3 // m: 1, D1, D2, D3 // m: 1, 1, D2, D3 // m: 1, 1, 1, D3 uint tid = threadIdx.x; uint d0 = blockIdx.x; uint d1 = blockIdx.y; uint d2 = blockIdx.z; uint offsetY = d0*D123 + d1*D23 + d2*D3 + tid; uint offsetM = d1*M1 + d2*M2 + tid; // sum(dy * y, axis=-1)) float sum_dy_y = 0.0f; #pragma unroll 2 for (uint d3 = tid, offset = offsetY; d3 < D3; d3 += 32, offset += 32) { float dy = load(add_ptr_u(DY, offset)); float y = load(add_ptr_u(Y, offset)); sum_dy_y += dy * y; } for (int i = 16; i > 0; i >>= 1) sum_dy_y += shfl_xor(sum_dy_y, i); #pragma unroll 2 for (uint d3 = tid; d3 < D3; d3 += 32, offsetY += 32, offsetM += 32) { float dy = load(add_ptr_u(DY, offsetY)); float y = load(add_ptr_u(Y, offsetY)); float m = use_mask ? __ldg(add_ptr_u(M, offsetM)) : 1.0f; float dx = (dy - sum_dy_y) * y * m * scale; store(add_ptr_u(DX, offsetY), dx); } } template <typename T> bool MaskedSoftmax(hipStream_t stream, T* y, const T* x, const float* m, uint D0, uint D1, uint D2, uint D3, uint M1, uint M2, float scale) { if (D3 > 1024*8) hipLaunchKernelGGL(( masked_softmax2<T>), dim3(dim3(D0,D1,D2)),dim3(32),0,stream, y, x, m, D1*D2*D3, D2*D3, D3, M1, M2, m != NULL, scale); else { if (D3 > 32*4) { uint threads = CEIL_DIV(D3, 32*8) * 32; int thread2 = THREAD_POW2(threads); hipLaunchKernelGGL(( masked_softmax<T,8>), dim3(dim3(D0,D1,D2)),dim3(threads),0,stream, y, x, m, D1*D2*D3, D2*D3, D3, M1, M2, m != NULL, scale, thread2); } else if (D3 > 32*2) hipLaunchKernelGGL(( masked_softmax<T,4>), dim3(dim3(D0,D1,D2)),dim3(32),0,stream, y, x, m, D1*D2*D3, D2*D3, D3, M1, M2, m != NULL, scale,32); else if (D3 > 32*1) hipLaunchKernelGGL(( masked_softmax<T,2>), dim3(dim3(D0,D1,D2)),dim3(32),0,stream, y, x, m, D1*D2*D3, D2*D3, D3, M1, M2, m != NULL, scale,32); else hipLaunchKernelGGL(( masked_softmax<T,1>), dim3(dim3(D0,D1,D2)),dim3(32),0,stream, y, x, m, D1*D2*D3, D2*D3, D3, M1, M2, m != NULL, scale,32); } return true; } template <typename T> bool MaskedSoftmaxGrad(hipStream_t stream, T* dx, const T* dy, const T* y, const float* m, uint D0, uint D1, uint D2, uint D3, uint M1, uint M2, float scale) { if (D3 > 1024*4) hipLaunchKernelGGL(( masked_softmax_grad2<T>), dim3(dim3(D0,D1,D2)),dim3(32),0,stream, dx, dy, y, m, D1*D2*D3, D2*D3, D3, M1, M2, m != NULL, scale); else { if (D3 > 32*2) { uint threads = CEIL_DIV(D3, 32*4) * 32; int thread2 = THREAD_POW2(threads); hipLaunchKernelGGL(( masked_softmax_grad<T,4>), dim3(dim3(D0,D1,D2)),dim3(threads),0,stream, dx, dy, y, m, D1*D2*D3, D2*D3, D3, M1, M2, m != NULL, scale, thread2); } else if (D3 > 32*1) hipLaunchKernelGGL(( masked_softmax_grad<T,2>), dim3(dim3(D0,D1,D2)),dim3(32),0,stream, dx, dy, y, m, D1*D2*D3, D2*D3, D3, M1, M2, m != NULL, scale,32); else hipLaunchKernelGGL(( masked_softmax_grad<T,1>), dim3(dim3(D0,D1,D2)),dim3(32),0,stream, dx, dy, y, m, D1*D2*D3, D2*D3, D3, M1, M2, m != NULL, scale,32); } return true; } template bool MaskedSoftmax<float>(hipStream_t stream, float* y, const float* x, const float* m, uint D0, uint D1, uint D2, uint D3, uint M1, uint M2, float scale); template bool MaskedSoftmax<ehalf>(hipStream_t stream, ehalf* y, const ehalf* x, const float* m, uint D0, uint D1, uint D2, uint D3, uint M1, uint M2, float scale); template bool MaskedSoftmax<bhalf>(hipStream_t stream, bhalf* y, const bhalf* x, const float* m, uint D0, uint D1, uint D2, uint D3, uint M1, uint M2, float scale); template bool MaskedSoftmaxGrad<float>(hipStream_t stream, float* dx, const float* dy, const float* y, const float* m, uint D0, uint D1, uint D2, uint D3, uint M1, uint M2, float scale); template bool MaskedSoftmaxGrad<ehalf>(hipStream_t stream, ehalf* dx, const ehalf* dy, const ehalf* y, const float* m, uint D0, uint D1, uint D2, uint D3, uint M1, uint M2, float scale); template bool MaskedSoftmaxGrad<bhalf>(hipStream_t stream, bhalf* dx, const bhalf* dy, const bhalf* y, const float* m, uint D0, uint D1, uint D2, uint D3, uint M1, uint M2, float scale); // split_heads: (batch, pixel, head, state) -> (batch, head, pixel, state) // merge_heads: (batch, head, pixel, state) -> (batch, pixel, head, state) template <typename T, uint U> __global__ void __launch_bounds__(32) transpose_0213(T* Y, const T* X, uint D123, uint D23, uint D13, uint D2, uint D3) { uint tid = threadIdx.x; uint d2 = blockIdx.x; uint d1 = blockIdx.y; uint d0 = blockIdx.z; uint offset = d0*D123 + tid; uint offsetX = d1*D23 + d2*D3 + offset; uint offsetY = d2*D13 + d1*D3 + offset; #pragma unroll 1 while (d2 < D2) { #pragma unroll 1 for (uint d3 = tid, xi = offsetX, yi = offsetY; d3 < D3; d3 += U*32, xi += U*32, yi += U*32) { const T* Xi = add_ptr_u(X, xi); T* Yi = add_ptr_u(Y, yi); float x[U]; for (uint i = 0; i < U; i++) x[i] = load(Xi, i*32, d3 + i*32 < D3); for (uint i = 0; i < U; i++) store(Yi, x[i], i*32, d3 + i*32 < D3); } offsetX += gridDim.x*D3; offsetY += gridDim.x*D13; d2 += gridDim.x; } } template <typename T> bool Transpose_0213(hipStream_t stream, T* y, const T* x, uint D0, uint D1, uint D2, uint D3) { // make sure each block has enough work to cover launch overhead uint gridX = CEIL_DIV(D2, 4); if (D3 <= 64) hipLaunchKernelGGL(( transpose_0213<T,2>), dim3(dim3(gridX,D1,D0)),dim3(32),0,stream, y, x, D1*D2*D3, D2*D3, D1*D3, D2, D3); else hipLaunchKernelGGL(( transpose_0213<T,4>), dim3(dim3(gridX,D1,D0)),dim3(32),0,stream, y, x, D1*D2*D3, D2*D3, D1*D3, D2, D3); return true; } template bool Transpose_0213<float>(hipStream_t stream, float* y, const float* x, uint D0, uint D1, uint D2, uint D3); template bool Transpose_0213<ehalf>(hipStream_t stream, ehalf* y, const ehalf* x, uint D0, uint D1, uint D2, uint D3); template bool Transpose_0213<bhalf>(hipStream_t stream, bhalf* y, const bhalf* x, uint D0, uint D1, uint D2, uint D3); #endif
d2fb8293b7e2d09e590ff3bda6bc159d502741dc.cu
#if GOOGLE_CUDA #include "ew_op_gpu.h" #include <stdio.h> __device__ __forceinline__ uint bfe(uint val, int pos) { uint bit; asm ("bfe.u32 %0, %1, %2, 1;" : "=r"(bit) : "r"(val), "r"(pos) ); return bit; } typedef struct __align__(8) KeyVal { uint key; float val; } KeyVal; template <typename T> __global__ void top_k(T* Y, uint* A, const T* __restrict__ X, uint Exp, uint topK, uint K, uint rect, uint rebase) { extern __shared__ KeyVal data[]; uint tid = threadIdx.x; uint n = blockIdx.x; uint offset = n*K + tid; KeyVal init; init.key = tid; init.val = tid < K ? load(add_ptr_u(X, offset)) : -FLT_MAX; data[tid] = init; __syncthreads(); for (int i = 1; i <= Exp; ++i) { int j; #pragma unroll 1 for (j = i - 1; j >= 5; --j) { // when the comparison stride is 32 or greater, // use half of warps and uniform shared memory access to make comparisons if (tid < blockDim.x/2) { // figure out the a and b indexes for the "butterfly" compare operation uint m = (tid >> j) << (j + 1); uint r = tid & ((1 << j) - 1); uint a = m + r; uint b = a + (1 << j); bool d = bfe(a, i) != 0; KeyVal A = data[a]; KeyVal B = data[b]; if((B.val > A.val) ^ d) { KeyVal t = A; A = B; B = t; } data[a] = A; data[b] = B; } __syncthreads(); } // When the comparison stride is less than 32, // use all warps and shfl_xor operations to make comparisons in registers // Load shared to registers KeyVal A = data[tid]; #pragma unroll 5 while (j >= 0) { KeyVal B; B.val = shfl_xor(A.val, 1 << j); B.key = shfl_xor(A.key, 1 << j); bool d = bfe(tid, i) != bfe(tid, j--); // in the case of equality we want both shuffle lanes to not swap if(((B.val > A.val) ^ d) && B.val != A.val) A = B; } // Load final register values back to shared. data[tid] = A; __syncthreads(); } if (rect) { // avoid extra __syncthreads by coalescing to unused shared float* coalesce = (float*)&data[blockDim.x]; // Output same size as input, with zeros for non-topK values. // rebase sets the zero line to the min value of the topK KeyVal out = data[tid]; float base = rebase ? fmaxf(data[topK-1].val, 0.0f) : 0.0f; float val = tid < topK ? out.val : 0.0f; //if (tid == 0 && n == 0) // printf("base: %f %d\n", base, data[topK-1].key); // apply the rectification and coalesce the output coalesce[out.key] = fmaxf(val, base) - base; __syncthreads(); if (tid < K) store(add_ptr_u(Y, offset), coalesce[tid]); } else { // output just top values and their indicies. if (tid < topK) { KeyVal out = data[tid]; offset = n*topK + tid; store(add_ptr_u(Y, offset), out.val); __stg(add_ptr_u(A, offset), out.key); } } } template <typename T> bool TopK(CUstream stream, T* y, uint* a, const T* x, uint topK, uint N, uint K, uint rebase) { uint exp; if (K > 512) exp = 10; else if (K > 256) exp = 9; else if (K > 128) exp = 8; else if (K > 64) exp = 7; else if (K > 32) exp = 6; else exp = 5; uint threads = 1 << exp; uint shared = threads * 16; top_k<T><<<N,threads,shared,stream>>>(y, a, x, exp, topK, K, a == NULL, rebase); return true; } template bool TopK<float>(CUstream stream, float* y, uint* a, const float* x, uint topK, uint N, uint K, uint rebase); template bool TopK<ehalf>(CUstream stream, ehalf* y, uint* a, const ehalf* x, uint topK, uint N, uint K, uint rebase); template bool TopK<bhalf>(CUstream stream, bhalf* y, uint* a, const bhalf* x, uint topK, uint N, uint K, uint rebase); template <typename T> __global__ void masked_top_k_softmax(T* Y, const float* __restrict__ M, const T* __restrict__ X, uint Exp, uint topK, uint D123, uint D23, uint D3, uint M1, uint M2, uint use_mask, float scale) { extern __shared__ KeyVal block[]; extern __shared__ float stage[]; // x: D0, D1, D2, D3 // m: 1, D1, D2, D3 // m: 1, 1, D2, D3 // m: 1, 1, 1, D3 uint tid = threadIdx.x; uint d0 = blockIdx.x; uint d1 = blockIdx.y; uint d2 = blockIdx.z; uint offsetX = d0*D123 + d1*D23 + d2*D3 + tid; uint offsetM = d1*M1 + d2*M2 + tid; M = add_ptr_u(M, offsetM); X = add_ptr_u(X, offsetX); float mask = tid < D3 ? (use_mask ? __ldg(M) : 1.0f) : 0.0f; float xval = mask != 0.0 ? load(X) * mask * scale : -FLT_MAX; KeyVal init; init.key = tid; init.val = xval; block[tid] = init; __syncthreads(); for (int i = 1; i <= Exp; ++i) { int j; #pragma unroll 1 for (j = i - 1; j >= 5; --j) { // when the comparison stride is 32 or greater, // use half of warps and uniform shared memory access to make comparisons if (tid < blockDim.x/2) { // figure out the a and b indexes for the "butterfly" compare operation uint m = (tid >> j) << (j + 1); uint r = tid & ((1 << j) - 1); uint a = m + r; uint b = a + (1 << j); bool d = bfe(a, i) != 0; KeyVal A = block[a]; KeyVal B = block[b]; if((B.val > A.val) ^ d) { KeyVal t = A; A = B; B = t; } block[a] = A; block[b] = B; } __syncthreads(); } // When the comparison stride is less than 32, // use all warps and shfl_xor operations to make comparisons in registers // Load shared to registers KeyVal A = block[tid]; #pragma unroll 5 while (j >= 0) { KeyVal B; B.val = shfl_xor(A.val, 1 << j); B.key = shfl_xor(A.key, 1 << j); bool d = bfe(tid, i) != bfe(tid, j--); // in the case of equality we want both shuffle lanes to not swap if(((B.val > A.val) ^ d) && B.val != A.val) A = B; } // Load final register values back to shared. block[tid] = A; __syncthreads(); } float* vals = &stage[blockDim.x*2]; float* reds = &vals[blockDim.x]; KeyVal out = block[tid]; float val = 0.0f; if (tid < topK) val = expf(out.val - block[0].val); vals[out.key] = val; // reduce within warp #pragma unroll for (int i = 16; i > 0; i >>= 1) val += shfl_xor(val, i); // first thread of each warp store to shared if ((tid & 31) == 0) reds[tid/32] = val; __syncthreads(); if (tid < blockDim.x/32) { // first warp loads all prior reductions val = reds[tid]; // reduce within this last warp #pragma unroll 1 for (int i = blockDim.x/64; i > 0; i >>= 1) val += shfl_xor(val, i); // rcp final reduction to shared reds[tid] = 1.0f / val; } __syncthreads(); if (tid < D3) store(add_ptr_u(Y, offsetX), vals[tid] * reds[0]); } template <typename T> bool MaskedTopKSoftmax(CUstream stream, T* y, const float* m, const T* x, uint topK, uint D0, uint D1, uint D2, uint D3, uint M1, uint M2, float scale) { uint exp; if (D3 > 512) exp = 10; else if (D3 > 256) exp = 9; else if (D3 > 128) exp = 8; else if (D3 > 64) exp = 7; else if (D3 > 32) exp = 6; else exp = 5; uint threads = 1 << exp; uint shared = threads * 16; masked_top_k_softmax<T><<<dim3(D0,D1,D2),threads,shared,stream>>>(y, m, x, exp, topK, D1*D2*D3, D2*D3, D3, M1, M2, m != NULL, scale); return true; } template bool MaskedTopKSoftmax<float>(CUstream stream, float* y, const float* m, const float* x, uint topK, uint D0, uint D1, uint D2, uint D3, uint M1, uint M2, float scale); template bool MaskedTopKSoftmax<ehalf>(CUstream stream, ehalf* y, const float* m, const ehalf* x, uint topK, uint D0, uint D1, uint D2, uint D3, uint M1, uint M2, float scale); template bool MaskedTopKSoftmax<bhalf>(CUstream stream, bhalf* y, const float* m, const bhalf* x, uint topK, uint D0, uint D1, uint D2, uint D3, uint M1, uint M2, float scale); // x *= mask * scale // y = exp(x - max(x)) / sum( exp(x - max(x)) ) template <typename T, int U> __global__ void masked_softmax( T* Y, const T* __restrict__ X, const float* __restrict__ M, uint D123, uint D23, uint D3, uint M1, uint M2, uint use_mask, float scale, int threads_pow2) { __shared__ float Max[32]; __shared__ float Sum[32]; // x: D0, D1, D2, D3 // m: 1, D1, D2, D3 // m: 1, 1, D2, D3 // m: 1, 1, 1, D3 uint tid = threadIdx.x; uint d0 = blockIdx.x; uint d1 = blockIdx.y; uint d2 = blockIdx.z; if (blockDim.x > 32) { if (tid < 32) { // Allows non-power of 2 threads to work Max[tid] = -FLT_MAX; Sum[tid] = 0.0f; } __syncthreads(); } uint ti = (tid & 0x3fe0)*U + (tid & 31); uint offsetX = d0*D123 + d1*D23 + d2*D3 + ti; uint offsetM = d1*M1 + d2*M2 + ti; M = add_ptr_u(M, offsetM); X = add_ptr_u(X, offsetX); // Load mask float mask[U]; for (int i = 0; i < U; i++) mask[i]= 1.0f; if (use_mask) { for (int i = 0; i < U; i++) { mask[i] = 0.0f; if (ti + i*32 < D3) mask[i] = __ldg(M + i*32); } } // Load X float xval[U]; for (int i = 0; i < U; i++) xval[i] = -FLT_MAX; for (int i = 0; i < U; i++) if (mask[i] != 0.0 && ti + i*32 < D3) xval[i] = load(X, i*32) * mask[i] * scale; // reduce within thread float Xmax[U]; for (int i = 0; i < U; i++) Xmax[i] = xval[i]; for (int j = U >> 1; j > 0; j >>= 1) for (int i = 0; i < j; i++) Xmax[i] = fmaxf(Xmax[i], Xmax[i+j]); float xmax = Xmax[0]; // reduce within warp for (int i = 16; i > 0; i >>= 1) xmax = fmaxf(xmax, shfl_xor(xmax, i)); if (blockDim.x > 32) { // first thread of each warp store to shared if ((tid & 31) == 0) Max[tid/32] = xmax; __syncthreads(); if (tid < 32) { // first warp loads all prior reductions xmax = Max[tid]; // reduce within this last warp #pragma unroll 1 for (int i = threads_pow2/64; i > 0; i >>= 1) xmax = fmaxf(xmax, shfl_xor(xmax, i)); // final reduction to shared Max[tid] = xmax; } __syncthreads(); xmax = Max[0]; } // compute exponent of softmax float Xsum[U]; for (int i = 0; i < U; i++) Xsum[i] = xval[i] = expf(xval[i] - xmax); // reduce within thread for (int j = U >> 1; j > 0; j >>= 1) for (int i = 0; i < j; i++) Xsum[i] = Xsum[i] + Xsum[i+j]; float exp_sum = Xsum[0]; // reduce within warp for (int i = 16; i > 0; i >>= 1) exp_sum += shfl_xor(exp_sum, i); if (blockDim.x > 32) { // first thread of each warp store to shared if ((tid & 31) == 0) Sum[tid/32] = exp_sum; __syncthreads(); if (tid < 32) { // first warp loads all prior reductions exp_sum = Sum[tid]; // reduce within this last warp #pragma unroll 1 for (int i = threads_pow2/64; i > 0; i >>= 1) exp_sum += shfl_xor(exp_sum, i); // final reduction to shared Sum[tid] = exp_sum; } __syncthreads(); exp_sum = Sum[0]; } float rcp_exp_sum = 1.0f / exp_sum; Y = add_ptr_u(Y, offsetX); for (int i = 0; i < U; i++) store(Y, xval[i] * rcp_exp_sum, i*32, ti + i*32 < D3); } // x *= mask * scale // y = exp(x - max(x)) / sum( exp(x - max(x)) ) template <typename T> __global__ void __launch_bounds__(32) masked_softmax2( T* Y, const T* __restrict__ X, const float* __restrict__ M, uint D123, uint D23, uint D3, uint M1, uint M2, uint use_mask, float scale) { // x: D0, D1, D2, D3 // m: 1, D1, D2, D3 // m: 1, 1, D2, D3 // m: 1, 1, 1, D3 uint tid = threadIdx.x; uint d0 = blockIdx.x; uint d1 = blockIdx.y; uint d2 = blockIdx.z; uint offsetX = d0*D123 + d1*D23 + d2*D3 + tid; uint offsetM = d1*M1 + d2*M2 + tid; // max(x, axis-1) float max_x = -FLT_MAX; #pragma unroll 2 for (uint d3 = tid, xi = offsetX, mi = offsetM; d3 < D3; d3 += 32, xi += 32, mi += 32) { float m = use_mask ? __ldg(add_ptr_u(M, mi)) : 1.0f; float x = m != 0.0 ? load(add_ptr_u(X, xi)) * m * scale : -FLT_MAX; max_x = fmaxf(max_x, x); } for (int i = 16; i > 0; i >>= 1) max_x = fmaxf(max_x, shfl_xor(max_x, i)); float exp_sum = 0.0f; #pragma unroll 2 for (uint d3 = tid, xi = offsetX, mi = offsetM; d3 < D3; d3 += 32, xi += 32, mi += 32) { float m = use_mask ? __ldg(add_ptr_u(M, mi)) : 1.0f; float x = m != 0.0 ? load(add_ptr_u(X, xi)) * m * scale : -FLT_MAX; exp_sum += expf(x - max_x); } for (int i = 16; i > 0; i >>= 1) exp_sum += shfl_xor(exp_sum, i); float rcp_exp_sum = 1.0f / exp_sum; #pragma unroll 2 for (uint d3 = tid, xi = offsetX, mi = offsetM; d3 < D3; d3 += 32, xi += 32, mi += 32) { float m = use_mask ? __ldg(add_ptr_u(M, mi)) : 1.0f; float x = m != 0.0 ? load(add_ptr_u(X, xi)) * m * scale : -FLT_MAX; float y = expf(x - max_x) * rcp_exp_sum; store(add_ptr_u(Y, xi), y); } } // dx = (dy - sum(dy * y, axis=-1)) * y * m * scale template <typename T, int U> __global__ void masked_softmax_grad( T* DX, const T* __restrict__ DY, const T* __restrict__ Y, const float* __restrict__ M, uint D123, uint D23, uint D3, uint M1, uint M2, uint use_mask, float scale, int threads_pow2) { __shared__ float Sum[32]; // x: D0, D1, D2, D3 // m: 1, D1, D2, D3 // m: 1, 1, D2, D3 // m: 1, 1, 1, D3 uint tid = threadIdx.x; uint d0 = blockIdx.x; uint d1 = blockIdx.y; uint d2 = blockIdx.z; if (blockDim.x > 32) { // Allows non-power of 2 threads to work if (tid < 32) Sum[tid] = 0.0f; __syncthreads(); } uint ti = (tid & 0x3fe0)*U + (tid & 31); uint offsetY = d0*D123 + d1*D23 + d2*D3 + ti; uint offsetM = d1*M1 + d2*M2 + ti; DY = add_ptr_u(DY, offsetY); Y = add_ptr_u( Y, offsetY); M = add_ptr_u( M, offsetM); // Load mask float mask[U]; for (int i = 0; i < U; i++) mask[i]= 1.0f; if (use_mask) { for (int i = 0; i < U; i++) { mask[i] = 0.0f; if (ti + i*32 < D3) mask[i] = __ldg(M + i*32); } } // Load DY float dy[U]; for (int i = 0; i < U; i++) dy[i]= 0.0f; for (int i = 0; i < U; i++) if (mask[i] != 0.0 && ti + i*32 < D3) dy[i] = load(DY, i*32); // Load Y float y[U]; for (int i = 0; i < U; i++) y[i]= 0.0f; for (int i = 0; i < U; i++) if (mask[i] != 0.0 && ti + i*32 < D3) y[i] = load(Y, i*32); // compute dy * y and y * mask * scale float dyy[U]; for (int i = 0; i < U; i++) { dyy[i] = dy[i] * y[i]; y[i] *= mask[i] * scale; } // reduce within thread for (int j = U >> 1; j > 0; j >>= 1) for (int i = 0; i < j; i++) dyy[i] = dyy[i] + dyy[i+j]; float sum_dyy = dyy[0]; // reduce within warp for (int i = 16; i > 0; i >>= 1) sum_dyy += shfl_xor(sum_dyy, i); if (blockDim.x > 32) { // first thread of each warp store to shared if ((tid & 31) == 0) Sum[tid/32] = sum_dyy; __syncthreads(); if (tid < 32) { // first warp loads all prior reductions sum_dyy = Sum[tid]; // reduce within this last warp #pragma unroll 1 for (int i = threads_pow2/64; i > 0; i >>= 1) sum_dyy += shfl_xor(sum_dyy, i); // final reduction to shared Sum[tid] = sum_dyy; } __syncthreads(); sum_dyy = Sum[0]; } // dx = (dy - sum_dyy) * y * mask* scale DX = add_ptr_u(DX, offsetY); for (int i = 0; i < U; i++) store(DX, (dy[i] - sum_dyy) * y[i], i*32, ti + i*32 < D3); } // dx = (dy - sum(dy * y, axis=-1)) * y * m * scale template <typename T> __global__ void __launch_bounds__(32) masked_softmax_grad2( T* DX, const T* __restrict__ DY, const T* __restrict__ Y, const float* __restrict__ M, uint D123, uint D23, uint D3, uint M1, uint M2, uint use_mask, float scale) { // x: D0, D1, D2, D3 // m: 1, D1, D2, D3 // m: 1, 1, D2, D3 // m: 1, 1, 1, D3 uint tid = threadIdx.x; uint d0 = blockIdx.x; uint d1 = blockIdx.y; uint d2 = blockIdx.z; uint offsetY = d0*D123 + d1*D23 + d2*D3 + tid; uint offsetM = d1*M1 + d2*M2 + tid; // sum(dy * y, axis=-1)) float sum_dy_y = 0.0f; #pragma unroll 2 for (uint d3 = tid, offset = offsetY; d3 < D3; d3 += 32, offset += 32) { float dy = load(add_ptr_u(DY, offset)); float y = load(add_ptr_u(Y, offset)); sum_dy_y += dy * y; } for (int i = 16; i > 0; i >>= 1) sum_dy_y += shfl_xor(sum_dy_y, i); #pragma unroll 2 for (uint d3 = tid; d3 < D3; d3 += 32, offsetY += 32, offsetM += 32) { float dy = load(add_ptr_u(DY, offsetY)); float y = load(add_ptr_u(Y, offsetY)); float m = use_mask ? __ldg(add_ptr_u(M, offsetM)) : 1.0f; float dx = (dy - sum_dy_y) * y * m * scale; store(add_ptr_u(DX, offsetY), dx); } } template <typename T> bool MaskedSoftmax(CUstream stream, T* y, const T* x, const float* m, uint D0, uint D1, uint D2, uint D3, uint M1, uint M2, float scale) { if (D3 > 1024*8) masked_softmax2<T><<<dim3(D0,D1,D2),32,0,stream>>>(y, x, m, D1*D2*D3, D2*D3, D3, M1, M2, m != NULL, scale); else { if (D3 > 32*4) { uint threads = CEIL_DIV(D3, 32*8) * 32; int thread2 = THREAD_POW2(threads); masked_softmax<T,8><<<dim3(D0,D1,D2),threads,0,stream>>>(y, x, m, D1*D2*D3, D2*D3, D3, M1, M2, m != NULL, scale, thread2); } else if (D3 > 32*2) masked_softmax<T,4><<<dim3(D0,D1,D2),32,0,stream>>>(y, x, m, D1*D2*D3, D2*D3, D3, M1, M2, m != NULL, scale,32); else if (D3 > 32*1) masked_softmax<T,2><<<dim3(D0,D1,D2),32,0,stream>>>(y, x, m, D1*D2*D3, D2*D3, D3, M1, M2, m != NULL, scale,32); else masked_softmax<T,1><<<dim3(D0,D1,D2),32,0,stream>>>(y, x, m, D1*D2*D3, D2*D3, D3, M1, M2, m != NULL, scale,32); } return true; } template <typename T> bool MaskedSoftmaxGrad(CUstream stream, T* dx, const T* dy, const T* y, const float* m, uint D0, uint D1, uint D2, uint D3, uint M1, uint M2, float scale) { if (D3 > 1024*4) masked_softmax_grad2<T><<<dim3(D0,D1,D2),32,0,stream>>>(dx, dy, y, m, D1*D2*D3, D2*D3, D3, M1, M2, m != NULL, scale); else { if (D3 > 32*2) { uint threads = CEIL_DIV(D3, 32*4) * 32; int thread2 = THREAD_POW2(threads); masked_softmax_grad<T,4><<<dim3(D0,D1,D2),threads,0,stream>>>(dx, dy, y, m, D1*D2*D3, D2*D3, D3, M1, M2, m != NULL, scale, thread2); } else if (D3 > 32*1) masked_softmax_grad<T,2><<<dim3(D0,D1,D2),32,0,stream>>>(dx, dy, y, m, D1*D2*D3, D2*D3, D3, M1, M2, m != NULL, scale,32); else masked_softmax_grad<T,1><<<dim3(D0,D1,D2),32,0,stream>>>(dx, dy, y, m, D1*D2*D3, D2*D3, D3, M1, M2, m != NULL, scale,32); } return true; } template bool MaskedSoftmax<float>(CUstream stream, float* y, const float* x, const float* m, uint D0, uint D1, uint D2, uint D3, uint M1, uint M2, float scale); template bool MaskedSoftmax<ehalf>(CUstream stream, ehalf* y, const ehalf* x, const float* m, uint D0, uint D1, uint D2, uint D3, uint M1, uint M2, float scale); template bool MaskedSoftmax<bhalf>(CUstream stream, bhalf* y, const bhalf* x, const float* m, uint D0, uint D1, uint D2, uint D3, uint M1, uint M2, float scale); template bool MaskedSoftmaxGrad<float>(CUstream stream, float* dx, const float* dy, const float* y, const float* m, uint D0, uint D1, uint D2, uint D3, uint M1, uint M2, float scale); template bool MaskedSoftmaxGrad<ehalf>(CUstream stream, ehalf* dx, const ehalf* dy, const ehalf* y, const float* m, uint D0, uint D1, uint D2, uint D3, uint M1, uint M2, float scale); template bool MaskedSoftmaxGrad<bhalf>(CUstream stream, bhalf* dx, const bhalf* dy, const bhalf* y, const float* m, uint D0, uint D1, uint D2, uint D3, uint M1, uint M2, float scale); // split_heads: (batch, pixel, head, state) -> (batch, head, pixel, state) // merge_heads: (batch, head, pixel, state) -> (batch, pixel, head, state) template <typename T, uint U> __global__ void __launch_bounds__(32) transpose_0213(T* Y, const T* X, uint D123, uint D23, uint D13, uint D2, uint D3) { uint tid = threadIdx.x; uint d2 = blockIdx.x; uint d1 = blockIdx.y; uint d0 = blockIdx.z; uint offset = d0*D123 + tid; uint offsetX = d1*D23 + d2*D3 + offset; uint offsetY = d2*D13 + d1*D3 + offset; #pragma unroll 1 while (d2 < D2) { #pragma unroll 1 for (uint d3 = tid, xi = offsetX, yi = offsetY; d3 < D3; d3 += U*32, xi += U*32, yi += U*32) { const T* Xi = add_ptr_u(X, xi); T* Yi = add_ptr_u(Y, yi); float x[U]; for (uint i = 0; i < U; i++) x[i] = load(Xi, i*32, d3 + i*32 < D3); for (uint i = 0; i < U; i++) store(Yi, x[i], i*32, d3 + i*32 < D3); } offsetX += gridDim.x*D3; offsetY += gridDim.x*D13; d2 += gridDim.x; } } template <typename T> bool Transpose_0213(CUstream stream, T* y, const T* x, uint D0, uint D1, uint D2, uint D3) { // make sure each block has enough work to cover launch overhead uint gridX = CEIL_DIV(D2, 4); if (D3 <= 64) transpose_0213<T,2><<<dim3(gridX,D1,D0),32,0,stream>>>(y, x, D1*D2*D3, D2*D3, D1*D3, D2, D3); else transpose_0213<T,4><<<dim3(gridX,D1,D0),32,0,stream>>>(y, x, D1*D2*D3, D2*D3, D1*D3, D2, D3); return true; } template bool Transpose_0213<float>(CUstream stream, float* y, const float* x, uint D0, uint D1, uint D2, uint D3); template bool Transpose_0213<ehalf>(CUstream stream, ehalf* y, const ehalf* x, uint D0, uint D1, uint D2, uint D3); template bool Transpose_0213<bhalf>(CUstream stream, bhalf* y, const bhalf* x, uint D0, uint D1, uint D2, uint D3); #endif
66817708e2dea2d49e399b12c033f3598c7a830e.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <stdlib.h> #include <cutil.h> #include <math.h> // Includes #include <stdio.h> #include "../include/ContAcq-IntClk.h" //#include "REPEATL.h" #include "../include/REPEATW.h" // includes, project #include "../include/sdkHelper.h" // helper for shared functions common to CUDA SDK samples //#include <shrQATest.h> //#include <shrUtils.h> // includes CUDA #include <hip/hip_runtime.h> #define THREADS_PER_BLOCK 256 #define NUM_OF_BLOCKS 60 #define max_tid THREADS_PER_BLOCK*NUM_OF_BLOCKS #define LINE_SIZE 24 #define SETS 64 #define ASSOC 6 #define SIMD_WIDTH 32 #define ITERATIONS REPLACE_ITERATIONS // Variables int* h_A; int* h_B; int* h_C; int* d_A; int* d_B; int* d_C; bool noprompt = false; unsigned int my_timer; // Functions void CleanupResources(void); void RandomInit(int*, int); void ParseArguments(int, char**); //////////////////////////////////////////////////////////////////////////////// // These are CUDA Helper functions // This will output the proper CUDA error strings in the event that a CUDA host call returns an error #define checkCudaErrors(err) __checkCudaErrors (err, __FILE__, __LINE__) inline void __checkCudaErrors(hipError_t err, const char *file, const int line ){ if(hipSuccess != err){ fprintf(stderr, "%s(%i) : CUDA Runtime API error %d: %s.\n",file, line, (int)err, hipGetErrorString( err ) ); exit(-1); } } // This will output the proper error string when calling hipGetLastError #define getLastCudaError(msg) __getLastCudaError (msg, __FILE__, __LINE__) inline void __getLastCudaError(const char *errorMessage, const char *file, const int line ){ hipError_t err = hipGetLastError(); if (hipSuccess != err){ fprintf(stderr, "%s(%i) : getLastCudaError() CUDA error : %s : (%d) %s.\n",file, line, errorMessage, (int)err, hipGetErrorString( err ) ); exit(-1); } } // end of CUDA Helper Functions // Device code __global__ void PowerKernal(int* A, int* C, int N){ int tid = blockDim.x * blockIdx.x + threadIdx.x; //Do Some Computation int size = (400*max_tid*LINE_SIZE)/sizeof(int); unsigned j=0, k=0; int sum=0; // Fill the L1 cache, Miss on every iteration for (int i=0; i<ITERATIONS ; i++){ REPEAT_L6(0); //REPLACE_ITERATIONS } /* // Fill the L1 cache, Miss on first LD, Hit on subsequent LDs for(k=0; k<ITERATIONS; ++k){ for(j=0; j<(size/2); j+=THREADS_PER_BLOCK){ C[tid+j] = A[tid+j]; } } */ C[0]=sum; __syncthreads(); } // Host code int main(){ printf("Power Microbenchmarks\n"); int N = (400*max_tid*LINE_SIZE); size_t size = N * sizeof(int) ; // Allocate input vectors h_A and h_B in host memory h_A = (int*)malloc(size); if (h_A == 0) CleanupResources(); //h_B = (float*)malloc(size); //if (h_B == 0) CleanupResources(); h_C = (int*)malloc(size); if (h_C == 0) CleanupResources(); // Initialize input vectors RandomInit(h_A, N); //RandomInit(h_B, N); // Allocate vectors in device memory checkCudaErrors( hipMalloc((void**)&d_A, size) ); //checkCudaErrors( hipMalloc((void**)&d_B, size) ); checkCudaErrors( hipMalloc((void**)&d_C, size) ); // Copy vectors from host memory to device memory checkCudaErrors( hipMemcpy(d_A, h_A, size, hipMemcpyHostToDevice) ); //checkCudaErrors( hipMemcpy(d_B, h_B, size, hipMemcpyHostToDevice) ); //VecAdd<<<blocksPerGrid, threadsPerBlock>>>(d_A, d_B, d_C, N); dim3 dimGrid(NUM_OF_BLOCKS,1); dim3 dimBlock(THREADS_PER_BLOCK,1); CUT_SAFE_CALL(cutCreateTimer(&my_timer)); TaskHandle taskhandle = LaunchDAQ(); CUT_SAFE_CALL(cutStartTimer(my_timer)); hipLaunchKernelGGL(( PowerKernal), dim3(dimGrid),dim3(dimBlock), 0, 0, d_A, d_C, N); CUDA_SAFE_CALL( hipDeviceSynchronize() ); printf("execution time = %f\n", cutGetTimerValue(my_timer)); TurnOffDAQ(taskhandle, cutGetTimerValue(my_timer)); CUT_SAFE_CALL(cutStopTimer(my_timer)); CUT_SAFE_CALL(cutDeleteTimer(my_timer)); getLastCudaError("kernel launch failure"); #ifdef _DEBUG checkCudaErrors( hipDeviceSynchronize() ); #endif // Copy result from device memory to host memory // h_C contains the result in host memory checkCudaErrors( hipMemcpy(h_C, d_C, size, hipMemcpyDeviceToHost) ); CleanupResources(); return 0; } void CleanupResources(void){ // Free device memory if (d_A) hipFree(d_A); //if (d_B) // hipFree(d_B); if (d_C) hipFree(d_C); // Free host memory if (h_A) free(h_A); // if (h_B) // free(h_B); if (h_C) free(h_C); } // Allocates an array with random float entries. void RandomInit(int* data, int n){ for (int i = 0; i < n; ++i) data[i] = (int)(rand() / RAND_MAX); }
66817708e2dea2d49e399b12c033f3598c7a830e.cu
#include <stdio.h> #include <stdlib.h> #include <cutil.h> #include <math.h> // Includes #include <stdio.h> #include "../include/ContAcq-IntClk.h" //#include "REPEATL.h" #include "../include/REPEATW.h" // includes, project #include "../include/sdkHelper.h" // helper for shared functions common to CUDA SDK samples //#include <shrQATest.h> //#include <shrUtils.h> // includes CUDA #include <cuda_runtime.h> #define THREADS_PER_BLOCK 256 #define NUM_OF_BLOCKS 60 #define max_tid THREADS_PER_BLOCK*NUM_OF_BLOCKS #define LINE_SIZE 24 #define SETS 64 #define ASSOC 6 #define SIMD_WIDTH 32 #define ITERATIONS REPLACE_ITERATIONS // Variables int* h_A; int* h_B; int* h_C; int* d_A; int* d_B; int* d_C; bool noprompt = false; unsigned int my_timer; // Functions void CleanupResources(void); void RandomInit(int*, int); void ParseArguments(int, char**); //////////////////////////////////////////////////////////////////////////////// // These are CUDA Helper functions // This will output the proper CUDA error strings in the event that a CUDA host call returns an error #define checkCudaErrors(err) __checkCudaErrors (err, __FILE__, __LINE__) inline void __checkCudaErrors(cudaError err, const char *file, const int line ){ if(cudaSuccess != err){ fprintf(stderr, "%s(%i) : CUDA Runtime API error %d: %s.\n",file, line, (int)err, cudaGetErrorString( err ) ); exit(-1); } } // This will output the proper error string when calling cudaGetLastError #define getLastCudaError(msg) __getLastCudaError (msg, __FILE__, __LINE__) inline void __getLastCudaError(const char *errorMessage, const char *file, const int line ){ cudaError_t err = cudaGetLastError(); if (cudaSuccess != err){ fprintf(stderr, "%s(%i) : getLastCudaError() CUDA error : %s : (%d) %s.\n",file, line, errorMessage, (int)err, cudaGetErrorString( err ) ); exit(-1); } } // end of CUDA Helper Functions // Device code __global__ void PowerKernal(int* A, int* C, int N){ int tid = blockDim.x * blockIdx.x + threadIdx.x; //Do Some Computation int size = (400*max_tid*LINE_SIZE)/sizeof(int); unsigned j=0, k=0; int sum=0; // Fill the L1 cache, Miss on every iteration for (int i=0; i<ITERATIONS ; i++){ REPEAT_L6(0); //REPLACE_ITERATIONS } /* // Fill the L1 cache, Miss on first LD, Hit on subsequent LDs for(k=0; k<ITERATIONS; ++k){ for(j=0; j<(size/2); j+=THREADS_PER_BLOCK){ C[tid+j] = A[tid+j]; } } */ C[0]=sum; __syncthreads(); } // Host code int main(){ printf("Power Microbenchmarks\n"); int N = (400*max_tid*LINE_SIZE); size_t size = N * sizeof(int) ; // Allocate input vectors h_A and h_B in host memory h_A = (int*)malloc(size); if (h_A == 0) CleanupResources(); //h_B = (float*)malloc(size); //if (h_B == 0) CleanupResources(); h_C = (int*)malloc(size); if (h_C == 0) CleanupResources(); // Initialize input vectors RandomInit(h_A, N); //RandomInit(h_B, N); // Allocate vectors in device memory checkCudaErrors( cudaMalloc((void**)&d_A, size) ); //checkCudaErrors( cudaMalloc((void**)&d_B, size) ); checkCudaErrors( cudaMalloc((void**)&d_C, size) ); // Copy vectors from host memory to device memory checkCudaErrors( cudaMemcpy(d_A, h_A, size, cudaMemcpyHostToDevice) ); //checkCudaErrors( cudaMemcpy(d_B, h_B, size, cudaMemcpyHostToDevice) ); //VecAdd<<<blocksPerGrid, threadsPerBlock>>>(d_A, d_B, d_C, N); dim3 dimGrid(NUM_OF_BLOCKS,1); dim3 dimBlock(THREADS_PER_BLOCK,1); CUT_SAFE_CALL(cutCreateTimer(&my_timer)); TaskHandle taskhandle = LaunchDAQ(); CUT_SAFE_CALL(cutStartTimer(my_timer)); PowerKernal<<<dimGrid,dimBlock>>>(d_A, d_C, N); CUDA_SAFE_CALL( cudaThreadSynchronize() ); printf("execution time = %f\n", cutGetTimerValue(my_timer)); TurnOffDAQ(taskhandle, cutGetTimerValue(my_timer)); CUT_SAFE_CALL(cutStopTimer(my_timer)); CUT_SAFE_CALL(cutDeleteTimer(my_timer)); getLastCudaError("kernel launch failure"); #ifdef _DEBUG checkCudaErrors( cudaDeviceSynchronize() ); #endif // Copy result from device memory to host memory // h_C contains the result in host memory checkCudaErrors( cudaMemcpy(h_C, d_C, size, cudaMemcpyDeviceToHost) ); CleanupResources(); return 0; } void CleanupResources(void){ // Free device memory if (d_A) cudaFree(d_A); //if (d_B) // cudaFree(d_B); if (d_C) cudaFree(d_C); // Free host memory if (h_A) free(h_A); // if (h_B) // free(h_B); if (h_C) free(h_C); } // Allocates an array with random float entries. void RandomInit(int* data, int n){ for (int i = 0; i < n; ++i) data[i] = (int)(rand() / RAND_MAX); }
33b38117433e5dc0fc81165c269baeecdaa9e03c.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" //////////////////////////////////////////////////////////////////////////////// // sweep-tt-multistart.c - using VELOCITYBOX and FLOATBOX // vim: set tabstop=2 softtabstop=2 shiftwidth=2 expandtab : //////////////////////////////////////////////////////////////////////////////// /********************************************************************************/ /* Given a velocity field v[nx][ny][nz] for a set of points (i,j,k) (where */ /* 0 <= i < nx, 0 <= j < ny, 0 <= k < nz) layed out on a grid with delta unit */ /* distance, compute the minimum travel time, tt[nx][ny][nz][numstart], for all */ /* points to the numstart starting points. The program is called as follows: */ /* */ /* sweep-tt-multistart vfile fsfile startfile */ /* */ // vfile is the velocity field file and has the .vbox format. /* */ /* fsfile is the forward star offset file and has the format: */ /* */ /* starsize */ /* oi oj ok for every forward star offset (oi,oj,ok) */ /* */ /* startfile contains starting points and has the format: */ /* */ /* numstart */ /* si sj sk for every starting point */ /* */ /* The program writes to "output.tt" the following: */ /* */ /* nx ny nz */ /* tt[i][j][k] for every point (i,j,k) in row-major order */ /* */ /* for every starting point. */ /* (Note, the program currently exits before this is done.) */ /********************************************************************************/ #include "iovelocity.h" #include "timing.h" #include <stdlib.h> #include <stdio.h> #include <math.h> #define FSRADIUSMAX 7 /* maximum radius forward star */ #define FSMAX 818 /* maximum # of points in a forward star */ #define MODELMAX 250 /* maximum model dimension in X,Y,Z */ #define STARTMAX 4 /* maximum starting points */ #define GRIDX 256 #define GRIDY 256 #define GRIDZ 1 #define BLOCKX 1 #define BLOCKY 1 #define BLOCKZ 64 struct FS { /* forward start offset */ int i, j, k; /* point coordinates */ float d; /* distance to star center (0,0,0)*/ }; struct MODEL { /* model point */ float v; /* velocity */ float tt[STARTMAX]; /* travel time for starting points */ }; struct START { /* starting point */ int i, j , k; /* point coordinates */ }; int changed[STARTMAX]; struct FS fs[FSMAX]; __constant__ struct FS dc_fs[FSMAX]; struct START start[STARTMAX]; __constant__ struct START dc_start[STARTMAX]; struct VELOCITYBOX vbox; // stores JUST velocities __constant__ struct VELOCITYBOX dc_vbox; struct FLOATBOX ttboxes[STARTMAX]; // stores JUST travel times, one volume per starting point __constant__ struct FLOATBOX dc_ttboxes[STARTMAX]; void cudaRun(int, int); __global__ void cudaWorker(int, int, int, int, int, int, struct FS *, struct START *, struct VELOCITYBOX *, struct FLOATBOX *,long *); __device__ int sweepXYZ(int, int, int, int, int, int, int, int, struct FS *, float *, float *); int main(int argc, char* argv[]) { int i, j, k, nx, ny, nz, s; int numradius, starsize, numstart; int fsindex[FSRADIUSMAX]; float delta; FILE *fsfile, *ttfile, *startfile; const char *velocity_model_file = argv[1]; /* open velocity model file */ printf( "Loading velocity model file: %s...", velocity_model_file ); fflush( stdout ); //if( !vboxloadbinary( &vbox, velocity_model_file ) ) { if( !vboxloadtext( &vbox, velocity_model_file ) ) { printf( "Cannot open velocity model file: %s\n", velocity_model_file ); exit(1); } nx = vbox.box.nx; ny = vbox.box.ny; nz = vbox.box.nz; printf( " done.\n" ); fflush( stdout ); printf( "Velocity model dimensions: %d x %d x %d\n", nx, ny, nz ); /* open forward star offset file */ fsfile = fopen(argv[2],"r"); if(fsfile == NULL) { printf("Cannot open forward star offset file: %s\n", argv[2]); exit(1); } printf("Forward star offset file: %s\n", argv[2]); /* open file with starting points */ startfile = fopen(argv[3],"r"); if(startfile == NULL) { printf("Cannot open starting points file: %s\n", argv[4]); exit(1); } printf("Starting points file: %s\n", argv[3]); /* get delta */ delta = 10.0; printf("Delta: %f\n", delta); /* read forward star offsets */ starsize = 0; fscanf(fsfile, "%i", &starsize); printf("Forward star size: %d\n", starsize); for (i=0; i<FSRADIUSMAX; i++) { fsindex[i] = 0; } numradius = 0; for (i=0; i<starsize; i++) { fscanf(fsfile, "%i %i %i", &fs[i].i, &fs[i].j, &fs[i].k); fs[i].d = sqrt(fs[i].i*fs[i].i + fs[i].j*fs[i].j + fs[i].k*fs[i].k); if ((numradius+1) < fs[i].d) { fsindex[numradius] = i; numradius++; } fs[i].d = delta * fs[i].d; } printf("Forward star offsets read\n"); for (i=0; i<FSRADIUSMAX; i++) { printf("numradius: %d, fsindex[%d]: %d\n", numradius, i, fsindex[i]); } /* read starting points */ fscanf(startfile, "%i", &numstart); // initialize travel times for all starting points for( s = 0; s < numstart; s++ ) { // prepare travel time volumes boxalloc( &ttboxes[s], nx, ny, nz ); boxsetall( ttboxes[s], INFINITY ); // set the starting point to have a travel time of 0 fscanf( startfile, "%i %i %i", &i, &j, &k ); boxput( ttboxes[s], i, j, k, 0 ); printf( "starting point %d: %d %d %d\n", s, i, j, k ); start[s].i = i; start[s].j = j; start[s].k = k; } printf("Starting points read\n"); hipSetDevice(0); cudaRun(numstart, starsize); // /* print travel times */ ttfile = fopen("output.tt","w"); if(ttfile == NULL) { printf("Can not open travel time output file: %s\n", "output.tt"); exit(1); } fprintf(ttfile, "%d %d %d\n", nx, ny, nz); for (s=0; s<numstart; s++) { fprintf(ttfile, "starting point: %d\n", s); for (i=0; i<nx; i++) { for (j=0; j<ny; j++) { for (k=0; k<nz; k++) { /* use %g for doubles */ fprintf(ttfile, "travel time for (%d,%d,%d): %f %d %d %d\n", i, j, k, boxget( ttboxes[s], i, j, k ), 0, 0, 0 ); } } } } } /* main */ void cudaRun( int numstart, int starsize ) { struct FS *pd_fs; struct START *pd_start; struct VELOCITYBOX *pd_vbox; // stores JUST velocities struct FLOATBOX *pd_ttboxes; // stores JUST travel times, one volume per starting point int i, j, nx = vbox.box.nx, ny = vbox.box.ny, nz = vbox.box.nz; hipError_t err; //copy fs to device hipMemcpyToSymbol(dc_fs, fs, sizeof(fs)); size_t fssize = sizeof(struct FS)*FSMAX; err = hipMalloc( (void**)&pd_fs, fssize ); if(err != hipSuccess) printf("fs malloc error\n"); err = hipMemcpy( pd_fs, fs, fssize, hipMemcpyHostToDevice ); if(err != hipSuccess) printf("fs copy error: %d\n", (int)fssize); printf("1\n"); //copy start points to device hipMemcpyToSymbol(dc_start, start, sizeof(start)); size_t startsize = sizeof(struct START)*STARTMAX; err = hipMalloc( (void**)&pd_start, startsize ); if(err != hipSuccess) printf("start malloc error\n"); err = hipMemcpy( pd_start, start, startsize, hipMemcpyHostToDevice ); if(err != hipSuccess) printf("start copy error\n"); printf("2\n"); //copy velosity box to device size_t vboxsize = sizeof(struct VELOCITYBOX); size_t flatbytes = (size_t)nx * ny * nz * sizeof(float); float *pd_vboxflat; err = hipMalloc( (void **)&pd_vbox, vboxsize ); if(err != hipSuccess) printf("vbox malloc error\n"); err = hipMalloc( (void **)&pd_vboxflat, flatbytes ); if(err != hipSuccess) printf("pd_vboxflat malloc error\n"); struct VELOCITYBOX dummyvbox; memcpy( &dummyvbox, &vbox, sizeof(struct VELOCITYBOX) ); dummyvbox.box.flat = pd_vboxflat; err = hipMemcpy( dummyvbox.box.flat, vbox.box.flat, flatbytes, hipMemcpyHostToDevice ); if(err != hipSuccess) printf( "pd_vboxflat copy error\n" ); err = hipMemcpy( pd_vbox, &dummyvbox, vboxsize, hipMemcpyHostToDevice ); if(err != hipSuccess) printf( "vbox copy error\n" ); hipMemcpyToSymbol(dc_vbox, &dummyvbox, sizeof(dummyvbox)); printf( "3\n" ); //copy travel time boxes to device size_t boxessize = sizeof(struct FLOATBOX)*STARTMAX; err = hipMalloc( (void **)&pd_ttboxes, boxessize ); if(err != hipSuccess) printf("boxes malloc error\n"); struct FLOATBOX dummybox[STARTMAX]; for(i=0; i<STARTMAX; i++){ float *pd_boxflat; err = hipMalloc( (void **)&pd_boxflat, flatbytes ); if(err != hipSuccess) printf("pd_boxflat malloc error\n"); memcpy(dummybox+i, ttboxes+i, sizeof(struct FLOATBOX)); dummybox[i].flat = pd_boxflat; err = hipMemcpy( dummybox[i].flat, ttboxes[i].flat, flatbytes, hipMemcpyHostToDevice ); if(err != hipSuccess) printf( "boxflat %d copy error\n", i ); } err = hipMemcpy( pd_ttboxes, dummybox, sizeof(struct FLOATBOX) * STARTMAX, hipMemcpyHostToDevice ); if(err != hipSuccess) printf( "box %d copy error\n", i ); hipMemcpyToSymbol(dc_ttboxes, dummybox, sizeof(dummybox)); printf("4\n"); const int tNum = GRIDX * BLOCKX * GRIDY * BLOCKY * GRIDZ * BLOCKZ ; //const int blkNum = GRIDX * GRIDY * GRIDZ; //const int blkSize = BLOCKX * BLOCKY * BLOCKZ; long *pd_anychange, *anychange; double sweepTime = 0, dataTransTime = 0; err = hipMalloc(&pd_anychange, sizeof(long) * tNum); if(err != hipSuccess) printf( "pd_anychange malloc error\n"); anychange = (long*)malloc(sizeof(long) * tNum); printf("5\n"); int nDevices; hipGetDeviceCount(&nDevices); printf("device: %d\n", nDevices); for (int i = 0; i < nDevices; i++) { hipDeviceProp_t prop; hipGetDeviceProperties(&prop, i); printf("Device Number: %d\n", i); printf(" Device name: %s\n", prop.name); printf(" Memory Clock Rate (KHz): %d\n", prop.memoryClockRate); printf(" Memory Bus Width (bits): %d\n", prop.memoryBusWidth); printf(" Peak Memory Bandwidth (GB/s): %f\n\n", 2.0*prop.memoryClockRate*(prop.memoryBusWidth/8)/1.0e6); } for(i=0; i<numstart; i++){ long sweepNum = 0, changeSum = 1; while (changeSum) { sweepTime = 0; dataTransTime = 0; changeSum = 0; sweepNum++; err = hipMemset(pd_anychange, 0, sizeof(long) * tNum); if(err != hipSuccess) printf( "pd_anychange memset error\n"); reset_and_start_timer(); dim3 gridDim(GRIDX,GRIDY,GRIDZ); dim3 blockDim(BLOCKX,BLOCKY,BLOCKZ); hipLaunchKernelGGL(( cudaWorker), dim3(gridDim),dim3(blockDim), 0, 0, nx, ny, nz, i, 0, starsize-1, //Note: change the range to the original starsize only reduce 5ms time. dc_fs, dc_start, pd_vbox, pd_ttboxes, pd_anychange ); hipDeviceSynchronize(); sweepTime = get_elapsed_msec(); if(err != hipSuccess) printf(" hipGetLastError() returned %d: %s\n", err, hipGetErrorString(err)); reset_and_start_timer(); err = hipMemcpy( anychange, pd_anychange, sizeof(long) * tNum, hipMemcpyDeviceToHost ); if(err != hipSuccess) printf( "anychange copy error: %d\n", err); dataTransTime = get_elapsed_msec(); for(j = 0; j < tNum; j++){ changeSum += anychange[j]; } printf(" start point: %d, sweep %d: %d changes, sweep %f, data trans %f\n", i, sweepNum, changeSum, sweepTime, dataTransTime); } } printf("6\n"); for(i=0; i<STARTMAX; i++){ struct FLOATBOX ttboxbuff; err = hipMemcpy( &ttboxbuff, pd_ttboxes+i, sizeof(struct FLOATBOX), hipMemcpyDeviceToHost ); if(err != hipSuccess) printf( "box %d copy error\n", i ); err = hipMemcpy( ttboxes[i].flat, ttboxbuff.flat, flatbytes, hipMemcpyDeviceToHost ); if(err != hipSuccess) printf( "boxflat %d copy error\n", i ); } printf("7\n"); hipFree(pd_fs); hipFree(pd_start); hipFree(pd_vbox); hipFree(pd_vboxflat); hipFree(pd_ttboxes); hipFree(dummybox[i].flat); hipFree(pd_anychange); free(anychange); } __global__ void cudaWorker( int d_nx, int d_ny, int d_nz, int d_s, int d_starstart, int d_starend, struct FS *pd_fs, struct START *pd_start, struct VELOCITYBOX *pd_vbox, struct FLOATBOX *pd_ttboxes, long *pd_anychange ) { //int d_blktid = threadIdx.z + threadIdx.y * blockDim.z + threadIdx.x * blockDim.z * blockDim.y; int d_blkid = blockIdx.x + blockIdx.y * gridDim.x + gridDim.x * gridDim.y * blockIdx.z; int d_glbtid = d_blkid * (blockDim.x * blockDim.y * blockDim.z) + (threadIdx.z * (blockDim.x * blockDim.y)) + (threadIdx.y * blockDim.x) + threadIdx.x; //int blkSize = blockDim.x * blockDim.y * blockDim.z; pd_anychange[d_glbtid] = sweepXYZ( d_nx, d_ny, d_nz, dc_start[d_s].i, dc_start[d_s].j, dc_start[d_s].k, d_starstart, d_starend, dc_fs, pd_vbox->box.flat, pd_ttboxes[d_s].flat ); } __device__ int sweepXYZ( int nx, int ny, int nz, int startx, int starty, int startz, int starstart, int starstop, struct FS *fs, float *vboxflat, float *ttboxflat ) { int i, j, k, l, oi, oj, ok; float delay = 0.0, tt = 0.0, tto = 0.0, ttd = 0.0, ttod = 0.0; int sx = nz * ny; int d_blktid = threadIdx.z + threadIdx.y * blockDim.z + threadIdx.x * blockDim.z * blockDim.y; __shared__ int change; if(d_blktid == 0) change = 0; __syncthreads(); i = blockIdx.x * blockDim.x + threadIdx.x; j = blockIdx.y * blockDim.y + threadIdx.y; k = blockIdx.z * blockDim.z + threadIdx.z; if(i >= nx || j >= ny || k >= nz) return 0; for (l=starstart; l<starstop; l++) { /* find point in forward star based on offsets */ oi = i+fs[l].i; oj = j+fs[l].j; ok = k+fs[l].k; /* if (oi,oj,ok) is outside the boundaries, then skip */ if ((oi < 0) || (oi > nx-1) || (oj < 0) || (oj > ny-1) || (ok < 0) || (ok > nz-1)) { continue; } /* compute delay from (i,j,k) to (oi,oj,ok) with end point average */ int iIdx = k+nz*j+i*sx; int oIdx = ok+nz*oj+oi*sx; delay = dc_fs[l].d * (vboxflat[iIdx] + vboxflat[oIdx]) / 2.0; tt = ttboxflat[iIdx]; tto = ttboxflat[oIdx]; /* if a shorter travel time through (oi,oj,ok), update (i,j,k) */ if ((delay + tto) < tt) { ttboxflat[iIdx] = delay + tto; if(change == 0) change = 1; } /* if a shorter travel time through (i,j,k), update (oi,oj,ok) */ else if ((delay + tt) < tto) { ttboxflat[oIdx] = delay + tt; if(change == 0) change = 1; } } return(change); } /* end sweepXYZ */
33b38117433e5dc0fc81165c269baeecdaa9e03c.cu
//////////////////////////////////////////////////////////////////////////////// // sweep-tt-multistart.c - using VELOCITYBOX and FLOATBOX // vim: set tabstop=2 softtabstop=2 shiftwidth=2 expandtab : //////////////////////////////////////////////////////////////////////////////// /********************************************************************************/ /* Given a velocity field v[nx][ny][nz] for a set of points (i,j,k) (where */ /* 0 <= i < nx, 0 <= j < ny, 0 <= k < nz) layed out on a grid with delta unit */ /* distance, compute the minimum travel time, tt[nx][ny][nz][numstart], for all */ /* points to the numstart starting points. The program is called as follows: */ /* */ /* sweep-tt-multistart vfile fsfile startfile */ /* */ // vfile is the velocity field file and has the .vbox format. /* */ /* fsfile is the forward star offset file and has the format: */ /* */ /* starsize */ /* oi oj ok for every forward star offset (oi,oj,ok) */ /* */ /* startfile contains starting points and has the format: */ /* */ /* numstart */ /* si sj sk for every starting point */ /* */ /* The program writes to "output.tt" the following: */ /* */ /* nx ny nz */ /* tt[i][j][k] for every point (i,j,k) in row-major order */ /* */ /* for every starting point. */ /* (Note, the program currently exits before this is done.) */ /********************************************************************************/ #include "iovelocity.h" #include "timing.h" #include <stdlib.h> #include <stdio.h> #include <math.h> #define FSRADIUSMAX 7 /* maximum radius forward star */ #define FSMAX 818 /* maximum # of points in a forward star */ #define MODELMAX 250 /* maximum model dimension in X,Y,Z */ #define STARTMAX 4 /* maximum starting points */ #define GRIDX 256 #define GRIDY 256 #define GRIDZ 1 #define BLOCKX 1 #define BLOCKY 1 #define BLOCKZ 64 struct FS { /* forward start offset */ int i, j, k; /* point coordinates */ float d; /* distance to star center (0,0,0)*/ }; struct MODEL { /* model point */ float v; /* velocity */ float tt[STARTMAX]; /* travel time for starting points */ }; struct START { /* starting point */ int i, j , k; /* point coordinates */ }; int changed[STARTMAX]; struct FS fs[FSMAX]; __constant__ struct FS dc_fs[FSMAX]; struct START start[STARTMAX]; __constant__ struct START dc_start[STARTMAX]; struct VELOCITYBOX vbox; // stores JUST velocities __constant__ struct VELOCITYBOX dc_vbox; struct FLOATBOX ttboxes[STARTMAX]; // stores JUST travel times, one volume per starting point __constant__ struct FLOATBOX dc_ttboxes[STARTMAX]; void cudaRun(int, int); __global__ void cudaWorker(int, int, int, int, int, int, struct FS *, struct START *, struct VELOCITYBOX *, struct FLOATBOX *,long *); __device__ int sweepXYZ(int, int, int, int, int, int, int, int, struct FS *, float *, float *); int main(int argc, char* argv[]) { int i, j, k, nx, ny, nz, s; int numradius, starsize, numstart; int fsindex[FSRADIUSMAX]; float delta; FILE *fsfile, *ttfile, *startfile; const char *velocity_model_file = argv[1]; /* open velocity model file */ printf( "Loading velocity model file: %s...", velocity_model_file ); fflush( stdout ); //if( !vboxloadbinary( &vbox, velocity_model_file ) ) { if( !vboxloadtext( &vbox, velocity_model_file ) ) { printf( "Cannot open velocity model file: %s\n", velocity_model_file ); exit(1); } nx = vbox.box.nx; ny = vbox.box.ny; nz = vbox.box.nz; printf( " done.\n" ); fflush( stdout ); printf( "Velocity model dimensions: %d x %d x %d\n", nx, ny, nz ); /* open forward star offset file */ fsfile = fopen(argv[2],"r"); if(fsfile == NULL) { printf("Cannot open forward star offset file: %s\n", argv[2]); exit(1); } printf("Forward star offset file: %s\n", argv[2]); /* open file with starting points */ startfile = fopen(argv[3],"r"); if(startfile == NULL) { printf("Cannot open starting points file: %s\n", argv[4]); exit(1); } printf("Starting points file: %s\n", argv[3]); /* get delta */ delta = 10.0; printf("Delta: %f\n", delta); /* read forward star offsets */ starsize = 0; fscanf(fsfile, "%i", &starsize); printf("Forward star size: %d\n", starsize); for (i=0; i<FSRADIUSMAX; i++) { fsindex[i] = 0; } numradius = 0; for (i=0; i<starsize; i++) { fscanf(fsfile, "%i %i %i", &fs[i].i, &fs[i].j, &fs[i].k); fs[i].d = sqrt(fs[i].i*fs[i].i + fs[i].j*fs[i].j + fs[i].k*fs[i].k); if ((numradius+1) < fs[i].d) { fsindex[numradius] = i; numradius++; } fs[i].d = delta * fs[i].d; } printf("Forward star offsets read\n"); for (i=0; i<FSRADIUSMAX; i++) { printf("numradius: %d, fsindex[%d]: %d\n", numradius, i, fsindex[i]); } /* read starting points */ fscanf(startfile, "%i", &numstart); // initialize travel times for all starting points for( s = 0; s < numstart; s++ ) { // prepare travel time volumes boxalloc( &ttboxes[s], nx, ny, nz ); boxsetall( ttboxes[s], INFINITY ); // set the starting point to have a travel time of 0 fscanf( startfile, "%i %i %i", &i, &j, &k ); boxput( ttboxes[s], i, j, k, 0 ); printf( "starting point %d: %d %d %d\n", s, i, j, k ); start[s].i = i; start[s].j = j; start[s].k = k; } printf("Starting points read\n"); cudaSetDevice(0); cudaRun(numstart, starsize); // /* print travel times */ ttfile = fopen("output.tt","w"); if(ttfile == NULL) { printf("Can not open travel time output file: %s\n", "output.tt"); exit(1); } fprintf(ttfile, "%d %d %d\n", nx, ny, nz); for (s=0; s<numstart; s++) { fprintf(ttfile, "starting point: %d\n", s); for (i=0; i<nx; i++) { for (j=0; j<ny; j++) { for (k=0; k<nz; k++) { /* use %g for doubles */ fprintf(ttfile, "travel time for (%d,%d,%d): %f %d %d %d\n", i, j, k, boxget( ttboxes[s], i, j, k ), 0, 0, 0 ); } } } } } /* main */ void cudaRun( int numstart, int starsize ) { struct FS *pd_fs; struct START *pd_start; struct VELOCITYBOX *pd_vbox; // stores JUST velocities struct FLOATBOX *pd_ttboxes; // stores JUST travel times, one volume per starting point int i, j, nx = vbox.box.nx, ny = vbox.box.ny, nz = vbox.box.nz; cudaError_t err; //copy fs to device cudaMemcpyToSymbol(dc_fs, fs, sizeof(fs)); size_t fssize = sizeof(struct FS)*FSMAX; err = cudaMalloc( (void**)&pd_fs, fssize ); if(err != cudaSuccess) printf("fs malloc error\n"); err = cudaMemcpy( pd_fs, fs, fssize, cudaMemcpyHostToDevice ); if(err != cudaSuccess) printf("fs copy error: %d\n", (int)fssize); printf("1\n"); //copy start points to device cudaMemcpyToSymbol(dc_start, start, sizeof(start)); size_t startsize = sizeof(struct START)*STARTMAX; err = cudaMalloc( (void**)&pd_start, startsize ); if(err != cudaSuccess) printf("start malloc error\n"); err = cudaMemcpy( pd_start, start, startsize, cudaMemcpyHostToDevice ); if(err != cudaSuccess) printf("start copy error\n"); printf("2\n"); //copy velosity box to device size_t vboxsize = sizeof(struct VELOCITYBOX); size_t flatbytes = (size_t)nx * ny * nz * sizeof(float); float *pd_vboxflat; err = cudaMalloc( (void **)&pd_vbox, vboxsize ); if(err != cudaSuccess) printf("vbox malloc error\n"); err = cudaMalloc( (void **)&pd_vboxflat, flatbytes ); if(err != cudaSuccess) printf("pd_vboxflat malloc error\n"); struct VELOCITYBOX dummyvbox; memcpy( &dummyvbox, &vbox, sizeof(struct VELOCITYBOX) ); dummyvbox.box.flat = pd_vboxflat; err = cudaMemcpy( dummyvbox.box.flat, vbox.box.flat, flatbytes, cudaMemcpyHostToDevice ); if(err != cudaSuccess) printf( "pd_vboxflat copy error\n" ); err = cudaMemcpy( pd_vbox, &dummyvbox, vboxsize, cudaMemcpyHostToDevice ); if(err != cudaSuccess) printf( "vbox copy error\n" ); cudaMemcpyToSymbol(dc_vbox, &dummyvbox, sizeof(dummyvbox)); printf( "3\n" ); //copy travel time boxes to device size_t boxessize = sizeof(struct FLOATBOX)*STARTMAX; err = cudaMalloc( (void **)&pd_ttboxes, boxessize ); if(err != cudaSuccess) printf("boxes malloc error\n"); struct FLOATBOX dummybox[STARTMAX]; for(i=0; i<STARTMAX; i++){ float *pd_boxflat; err = cudaMalloc( (void **)&pd_boxflat, flatbytes ); if(err != cudaSuccess) printf("pd_boxflat malloc error\n"); memcpy(dummybox+i, ttboxes+i, sizeof(struct FLOATBOX)); dummybox[i].flat = pd_boxflat; err = cudaMemcpy( dummybox[i].flat, ttboxes[i].flat, flatbytes, cudaMemcpyHostToDevice ); if(err != cudaSuccess) printf( "boxflat %d copy error\n", i ); } err = cudaMemcpy( pd_ttboxes, dummybox, sizeof(struct FLOATBOX) * STARTMAX, cudaMemcpyHostToDevice ); if(err != cudaSuccess) printf( "box %d copy error\n", i ); cudaMemcpyToSymbol(dc_ttboxes, dummybox, sizeof(dummybox)); printf("4\n"); const int tNum = GRIDX * BLOCKX * GRIDY * BLOCKY * GRIDZ * BLOCKZ ; //const int blkNum = GRIDX * GRIDY * GRIDZ; //const int blkSize = BLOCKX * BLOCKY * BLOCKZ; long *pd_anychange, *anychange; double sweepTime = 0, dataTransTime = 0; err = cudaMalloc(&pd_anychange, sizeof(long) * tNum); if(err != cudaSuccess) printf( "pd_anychange malloc error\n"); anychange = (long*)malloc(sizeof(long) * tNum); printf("5\n"); int nDevices; cudaGetDeviceCount(&nDevices); printf("device: %d\n", nDevices); for (int i = 0; i < nDevices; i++) { cudaDeviceProp prop; cudaGetDeviceProperties(&prop, i); printf("Device Number: %d\n", i); printf(" Device name: %s\n", prop.name); printf(" Memory Clock Rate (KHz): %d\n", prop.memoryClockRate); printf(" Memory Bus Width (bits): %d\n", prop.memoryBusWidth); printf(" Peak Memory Bandwidth (GB/s): %f\n\n", 2.0*prop.memoryClockRate*(prop.memoryBusWidth/8)/1.0e6); } for(i=0; i<numstart; i++){ long sweepNum = 0, changeSum = 1; while (changeSum) { sweepTime = 0; dataTransTime = 0; changeSum = 0; sweepNum++; err = cudaMemset(pd_anychange, 0, sizeof(long) * tNum); if(err != cudaSuccess) printf( "pd_anychange memset error\n"); reset_and_start_timer(); dim3 gridDim(GRIDX,GRIDY,GRIDZ); dim3 blockDim(BLOCKX,BLOCKY,BLOCKZ); cudaWorker<<<gridDim,blockDim>>>( nx, ny, nz, i, 0, starsize-1, //Note: change the range to the original starsize only reduce 5ms time. dc_fs, dc_start, pd_vbox, pd_ttboxes, pd_anychange ); cudaDeviceSynchronize(); sweepTime = get_elapsed_msec(); if(err != cudaSuccess) printf(" cudaGetLastError() returned %d: %s\n", err, cudaGetErrorString(err)); reset_and_start_timer(); err = cudaMemcpy( anychange, pd_anychange, sizeof(long) * tNum, cudaMemcpyDeviceToHost ); if(err != cudaSuccess) printf( "anychange copy error: %d\n", err); dataTransTime = get_elapsed_msec(); for(j = 0; j < tNum; j++){ changeSum += anychange[j]; } printf(" start point: %d, sweep %d: %d changes, sweep %f, data trans %f\n", i, sweepNum, changeSum, sweepTime, dataTransTime); } } printf("6\n"); for(i=0; i<STARTMAX; i++){ struct FLOATBOX ttboxbuff; err = cudaMemcpy( &ttboxbuff, pd_ttboxes+i, sizeof(struct FLOATBOX), cudaMemcpyDeviceToHost ); if(err != cudaSuccess) printf( "box %d copy error\n", i ); err = cudaMemcpy( ttboxes[i].flat, ttboxbuff.flat, flatbytes, cudaMemcpyDeviceToHost ); if(err != cudaSuccess) printf( "boxflat %d copy error\n", i ); } printf("7\n"); cudaFree(pd_fs); cudaFree(pd_start); cudaFree(pd_vbox); cudaFree(pd_vboxflat); cudaFree(pd_ttboxes); cudaFree(dummybox[i].flat); cudaFree(pd_anychange); free(anychange); } __global__ void cudaWorker( int d_nx, int d_ny, int d_nz, int d_s, int d_starstart, int d_starend, struct FS *pd_fs, struct START *pd_start, struct VELOCITYBOX *pd_vbox, struct FLOATBOX *pd_ttboxes, long *pd_anychange ) { //int d_blktid = threadIdx.z + threadIdx.y * blockDim.z + threadIdx.x * blockDim.z * blockDim.y; int d_blkid = blockIdx.x + blockIdx.y * gridDim.x + gridDim.x * gridDim.y * blockIdx.z; int d_glbtid = d_blkid * (blockDim.x * blockDim.y * blockDim.z) + (threadIdx.z * (blockDim.x * blockDim.y)) + (threadIdx.y * blockDim.x) + threadIdx.x; //int blkSize = blockDim.x * blockDim.y * blockDim.z; pd_anychange[d_glbtid] = sweepXYZ( d_nx, d_ny, d_nz, dc_start[d_s].i, dc_start[d_s].j, dc_start[d_s].k, d_starstart, d_starend, dc_fs, pd_vbox->box.flat, pd_ttboxes[d_s].flat ); } __device__ int sweepXYZ( int nx, int ny, int nz, int startx, int starty, int startz, int starstart, int starstop, struct FS *fs, float *vboxflat, float *ttboxflat ) { int i, j, k, l, oi, oj, ok; float delay = 0.0, tt = 0.0, tto = 0.0, ttd = 0.0, ttod = 0.0; int sx = nz * ny; int d_blktid = threadIdx.z + threadIdx.y * blockDim.z + threadIdx.x * blockDim.z * blockDim.y; __shared__ int change; if(d_blktid == 0) change = 0; __syncthreads(); i = blockIdx.x * blockDim.x + threadIdx.x; j = blockIdx.y * blockDim.y + threadIdx.y; k = blockIdx.z * blockDim.z + threadIdx.z; if(i >= nx || j >= ny || k >= nz) return 0; for (l=starstart; l<starstop; l++) { /* find point in forward star based on offsets */ oi = i+fs[l].i; oj = j+fs[l].j; ok = k+fs[l].k; /* if (oi,oj,ok) is outside the boundaries, then skip */ if ((oi < 0) || (oi > nx-1) || (oj < 0) || (oj > ny-1) || (ok < 0) || (ok > nz-1)) { continue; } /* compute delay from (i,j,k) to (oi,oj,ok) with end point average */ int iIdx = k+nz*j+i*sx; int oIdx = ok+nz*oj+oi*sx; delay = dc_fs[l].d * (vboxflat[iIdx] + vboxflat[oIdx]) / 2.0; tt = ttboxflat[iIdx]; tto = ttboxflat[oIdx]; /* if a shorter travel time through (oi,oj,ok), update (i,j,k) */ if ((delay + tto) < tt) { ttboxflat[iIdx] = delay + tto; if(change == 0) change = 1; } /* if a shorter travel time through (i,j,k), update (oi,oj,ok) */ else if ((delay + tt) < tto) { ttboxflat[oIdx] = delay + tt; if(change == 0) change = 1; } } return(change); } /* end sweepXYZ */
32c647bec7836e56697f58a6692d5f772622f138.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #include "csutil.h" inline void gpuAssert(hipError_t code, const char *file, int line, bool abort=true) { if (code != hipSuccess){ fprintf(stderr, "GPUassert: %s %s %d\n", hipGetErrorString(code), file, line); if (abort) exit(code); } } // Kernel functions __global__ void reduce(int *g_idata, int *g_out) { __shared__ int sdata[BLOCKSIZE]; // each thread loads one element from global to shared mem int i = blockIdx.x*blockDim.x + threadIdx.x; sdata[threadIdx.x] = g_idata[i]; __syncthreads(); // do reduction in shared mem for (int s=1; s < blockDim.x; s *=2) { int index = 2 * s * threadIdx.x; if (index < blockDim.x) { sdata[index] += sdata[index + s]; } __syncthreads(); } // write result for this block to global mem if (threadIdx.x == 0){ atomicAdd(g_out, sdata[0]); } } __device__ bool is_equal(float a, float b, float eps) { return fabs(a - b) < eps ? true : false; } __global__ void findvec(float *base_vec, float *in, int *out) { int index = blockIdx.x * blockDim.x + threadIdx.x; int ok = true; for (int i = 0; i < d_m; i++){ if (!is_equal(in[index + i], base_vec[i], d_r)){ ok = false; break; } } if (ok) out[index] = 1; } /************************** not used kernel functions ***************************/ __global__ void czek(int n, int *idx, int *str, int *bI, int *bD, int *tI, int *gD) { int index = blockIdx.x * blockDim.x + threadIdx.x; int stride = blockDim.x * gridDim.x; for (int i = index; i < n; i += stride){ idx[i] = index; str[i] = stride; bI[i] = blockIdx.x; bD[i] = blockDim.x; tI[i] = threadIdx.x; gD[i] = gridDim.x; } }
32c647bec7836e56697f58a6692d5f772622f138.cu
#include <stdio.h> #include "csutil.h" inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort=true) { if (code != cudaSuccess){ fprintf(stderr, "GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line); if (abort) exit(code); } } // Kernel functions __global__ void reduce(int *g_idata, int *g_out) { __shared__ int sdata[BLOCKSIZE]; // each thread loads one element from global to shared mem int i = blockIdx.x*blockDim.x + threadIdx.x; sdata[threadIdx.x] = g_idata[i]; __syncthreads(); // do reduction in shared mem for (int s=1; s < blockDim.x; s *=2) { int index = 2 * s * threadIdx.x; if (index < blockDim.x) { sdata[index] += sdata[index + s]; } __syncthreads(); } // write result for this block to global mem if (threadIdx.x == 0){ atomicAdd(g_out, sdata[0]); } } __device__ bool is_equal(float a, float b, float eps) { return fabs(a - b) < eps ? true : false; } __global__ void findvec(float *base_vec, float *in, int *out) { int index = blockIdx.x * blockDim.x + threadIdx.x; int ok = true; for (int i = 0; i < d_m; i++){ if (!is_equal(in[index + i], base_vec[i], d_r)){ ok = false; break; } } if (ok) out[index] = 1; } /************************** not used kernel functions ***************************/ __global__ void czek(int n, int *idx, int *str, int *bI, int *bD, int *tI, int *gD) { int index = blockIdx.x * blockDim.x + threadIdx.x; int stride = blockDim.x * gridDim.x; for (int i = index; i < n; i += stride){ idx[i] = index; str[i] = stride; bI[i] = blockIdx.x; bD[i] = blockDim.x; tI[i] = threadIdx.x; gD[i] = gridDim.x; } }
06ad4b21f3c29884a81b8379e5f4f05d57849ac4.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <fstream> #include "SocialForceGPU.h" #include <omp.h> __global__ void testFunc() { } namespace NeighborModule { __device__ int zcode(int x, int y) { //return x * NUM_CELL + y; x &= 0x0000ffff; // x = ---- ---- ---- ---- fedc ba98 7654 3210 y &= 0x0000ffff; // x = ---- ---- ---- ---- fedc ba98 7654 3210 x = (x ^ (x << 8)) & 0x00ff00ff; // x = ---- ---- fedc ba98 ---- ---- 7654 3210 y = (y ^ (y << 8)) & 0x00ff00ff; // x = ---- ---- fedc ba98 ---- ---- 7654 3210 y = (y ^ (y << 4)) & 0x0f0f0f0f; // x = ---- fedc ---- ba98 ---- 7654 ---- 3210 x = (x ^ (x << 4)) & 0x0f0f0f0f; // x = ---- fedc ---- ba98 ---- 7654 ---- 3210 y = (y ^ (y << 2)) & 0x33333333; // x = --fe --dc --ba --98 --76 --54 --32 --10 x = (x ^ (x << 2)) & 0x33333333; // x = --fe --dc --ba --98 --76 --54 --32 --10 y = (y ^ (y << 1)) & 0x55555555; // x = -f-e -d-c -b-a -9-8 -7-6 -5-4 -3-2 -1-0 x = (x ^ (x << 1)) & 0x55555555; // x = -f-e -d-c -b-a -9-8 -7-6 -5-4 -3-2 -1-0 return x | (y << 1); } __device__ int zcode(const double2 &loc) { int ix = loc.x / (ENV_DIM / NUM_CELL); int iy = loc.y / (ENV_DIM / NUM_CELL); return zcode(ix, iy); } __device__ int zcode(SocialForceAgent *agent) { return zcode(agent->data.loc); } __device__ void swap(SocialForceAgent** agentPtrs, int a, int b) { SocialForceAgent* temp = agentPtrs[a]; agentPtrs[a] = agentPtrs[b]; agentPtrs[b] = temp; } __device__ void quickSortByAgentLoc(SocialForceAgent** agentPtrs, hiprandState_t &rState, int l, int r) { if (l == r) return; int pi = l + hiprand(&rState) % (r - l); swap(agentPtrs, l, pi); SocialForceAgent* pivot = agentPtrs[l]; int i = l + 1, j = l + 1; for (; j < r; j++) { if (zcode(agentPtrs[j]) < zcode(pivot)) { swap(agentPtrs, i, j); i++; } } swap(agentPtrs, l, i - 1); quickSortByAgentLoc(agentPtrs, rState, l, i - 1); quickSortByAgentLoc(agentPtrs, rState, i, r); } __global__ void sortAgentByLocKernel(SocialForceAgent** agentPtrsToSort, hiprandState_t *rState, int numCap) { int idx = threadIdx.x + blockIdx.x * blockDim.x; hiprandState_t &rStateLocal = *rState; if (idx == 0) quickSortByAgentLoc(agentPtrsToSort, rStateLocal, 0, numCap); } __global__ void setCidStartEndKernel(SocialForceAgent** contextSorted, int* cidStarts, int* cidEnds, int numCap) { const int idx = threadIdx.x + blockIdx.x * blockDim.x; if (idx < numCap && idx > 0) { int cid = zcode(contextSorted[idx]); int cidPrev = zcode(contextSorted[idx - 1]); if (cid != cidPrev) { cidStarts[cid] = idx; cidEnds[cidPrev] = idx; } } if (idx == 0) { int cid = zcode(contextSorted[0]); cidStarts[cid] = 0; cid = zcode(contextSorted[numCap - 1]); cidEnds[cid] = numCap; } } } extern "C" void runTest() { testFunc << <32, 32 >> >(); } /* helper functions and data structures*/ #define checkCudaErrors(err) __checkCudaErrors(err, __FILE__, __LINE__) inline void __checkCudaErrors(hipError_t err, const char *file, const int line) { if (hipSuccess != err) { fprintf(stderr, "%s(%i) : CUDA Runtime API error %d: %s.\n", file, line, (int)err, hipGetErrorString(err)); exit(-1); } } namespace APUtil { __global__ void hookPointerAndDataKernel(SocialForceAgent** agentPtrArray, SocialForceAgent* agentArray, int numCap) { int index = threadIdx.x + blockIdx.x * blockDim.x; if (index < numCap) agentPtrArray[index] = &agentArray[index]; } }; extern "C" void hookPointerAndData(SocialForceAgent** agentPtrArray, SocialForceAgent* agentArray, int numCap) { int gSize = GRID_SIZE(numCap); APUtil::hookPointerAndDataKernel << <gSize, BLOCK_SIZE >> >(agentPtrArray, agentArray, numCap); } __device__ double SocialForceAgent::correctCrossBoader(double val, double limit) { if (val >= limit) return limit - 0.001; else if (val < 0) return 0; return val; } void SocialForceAgent::computeIndivSocialForceRoom(const SocialForceAgentData &myData, const SocialForceAgentData &otherData, double2 &fSum){ double cMass = 100; //my data const double2& loc = myData.loc; const double2& goal = myData.goal; const double2& velo = myData.velocity; const double& v0 = myData.v0; const double& mass = myData.mass; //other's data const double2& locOther = otherData.loc; const double2& goalOther = otherData.goal; const double2& veloOther = otherData.velocity; const double& v0Other = otherData.v0; const double& massOther = otherData.mass; double d = 1e-15 + sqrt((loc.x - locOther.x) * (loc.x - locOther.x) + (loc.y - locOther.y) * (loc.y - locOther.y)); double dDelta = mass / cMass + massOther / cMass - d; double fExp = A * exp(dDelta / B); double fKg = dDelta < 0 ? 0 : k1 *dDelta; double nijx = (loc.x - locOther.x) / d; double nijy = (loc.y - locOther.y) / d; double fnijx = (fExp + fKg) * nijx; double fnijy = (fExp + fKg) * nijy; double fkgx = 0; double fkgy = 0; if (dDelta > 0) { double tix = -nijy; double tiy = nijx; fkgx = k2 * dDelta; fkgy = k2 * dDelta; double vijDelta = (veloOther.x - velo.x) * tix + (veloOther.y - velo.y) * tiy; fkgx = fkgx * vijDelta * tix; fkgy = fkgy * vijDelta * tiy; } fSum.x += fnijx + fkgx; fSum.y += fnijy + fkgy; } __device__ void SocialForceAgent::computeForceWithWall(const SocialForceAgentData &dataLocal, obstacleLine &wall, const int &cMass, double2 &fSum) { double2 wl = make_double2(wall.ex - wall.sx, wall.ey - wall.sy); if (length(wl) == 0) return; double diw, crx, cry; const double2 &loc = dataLocal.loc; diw = wall.pointToLineDist(loc, crx, cry); double virDiw = DIST(loc.x, loc.y, crx, cry); if (virDiw == 0) return; double niwx = (loc.x - crx) / virDiw; double niwy = (loc.y - cry) / virDiw; double drw = dataLocal.mass / cMass - diw; double fiw1 = A * exp(drw / B); if (drw > 0) fiw1 += k1 * drw; double fniwx = fiw1 * niwx; double fniwy = fiw1 * niwy; double fiwKgx = 0, fiwKgy = 0; if (drw > 0) { double fiwKg = k2 * drw * (dataLocal.velocity.x * (-niwy) + dataLocal.velocity.y * niwx); fiwKgx = fiwKg * (-niwy); fiwKgy = fiwKg * niwx; } fSum.x += fniwx - fiwKgx; fSum.y += fniwy - fiwKgy; } __device__ void SocialForceAgent::computeWallImpaction(const SocialForceAgentData &dataLocal, obstacleLine &wall, const double2 &newVelo, const double &tick, double &mint){ double crx, cry, tt; const double2 &loc = dataLocal.loc; int ret = wall.intersection2LineSeg( loc.x, loc.y, loc.x + 0.5 * newVelo.x * tick, loc.y + 0.5 * newVelo.y * tick, crx, cry ); if (ret == 1) { if (fabs(crx - loc.x) > 0) tt = (crx - loc.x) / (newVelo.x * tick); else tt = (crx - loc.y) / (newVelo.y * tick + 1e-20); if (tt < mint) mint = tt; } } __device__ void SocialForceAgent::computeDirection(const SocialForceAgentData &dataLocal, double2 &dvt) { //my data const double2& loc = dataLocal.loc; const double2& goal = dataLocal.goal; const double2& velo = dataLocal.velocity; const double& v0 = dataLocal.v0; const double& mass = dataLocal.mass; dvt.x = 0; dvt.y = 0; double2 diff; diff.x = 0; diff.y = 0; double d0 = sqrt((loc.x - goal.x) * (loc.x - goal.x) + (loc.y - goal.y) * (loc.y - goal.y)); diff.x = v0 * (goal.x - loc.x) / d0; diff.y = v0 * (goal.y - loc.y) / d0; dvt.x = (diff.x - velo.x) / tao; dvt.y = (diff.y - velo.y) / tao; } __device__ int sharedMinAndMax(int value, bool minFlag) { for (int i = 16; i >= 1; i /= 2) { if (minFlag) value = min(value, __shfl_xor(value, i, 32)); else value = max(value, __shfl_xor(value, i, 32)); } return value; } __device__ void SocialForceAgent::computeSocialForceRoom(SocialForceAgentData &dataLocal, double2 &fSum) { //__shared__ SocialForceAgentData sdata[BLOCK_SIZE]; fSum.x = 0; fSum.y = 0; double ds = 0; int neighborCount = 0; for (int i = 0; i < NUM_CAP; i++) { SocialForceAgent *other = myClone->context[i]; SocialForceAgentData otherData = other->data; ds = length(otherData.loc - dataLocal.loc); if (ds < 6 && ds > 0) { neighborCount++; computeIndivSocialForceRoom(dataLocal, otherData, fSum); for (int i = 0; i < NUM_PARAM; i++) this->flagCloning[i] |= other->flagCloning[i]; } } dataLocal.numNeighbor = neighborCount; } __device__ void SocialForceAgent::chooseNewGoal(const double2 &newLoc, double epsilon, double2 &newGoal) { double2 oldGoal = newGoal; double2 center = make_double2(ENV_DIM / 2, ENV_DIM / 2); if (newLoc.x < center.x && newLoc.y <= center.y) { newGoal.x = 0.5 * ENV_DIM; newGoal.y = 0.3 * ENV_DIM; } else if (newLoc.x <= center.x && newLoc.y > center.y) { newGoal.x = 0.3 * ENV_DIM; newGoal.y = 0.5 * ENV_DIM; } else if (newLoc.x > center.x && newLoc.y > center.y) { newGoal.x = 0.5 * ENV_DIM; newGoal.y = 0.7 * ENV_DIM; } else if (newLoc.x >= center.x && newLoc.y < center.y){ newGoal.x = 0.9 * ENV_DIM; newGoal.y = 0.3 * ENV_DIM; } } __device__ void SocialForceAgent::step(){ double cMass = 100; const double2& loc = data.loc; const double2& goal = data.goal; const double2& velo = data.velocity; const double& v0 = data.v0; const double& mass = data.mass; //compute the direction double2 dvt; computeDirection(data, dvt); //compute force with other agents double2 fSum; computeSocialForceRoom(data, fSum); //compute force with walls and gates for (int i = 0; i < NUM_WALLS; i++) { obstacleLine wall = myClone->walls[i]; computeForceWithWall(data, wall, cMass, fSum); } for (int i = 0; i < NUM_PARAM; i++) { obstacleLine &gate = myClone->gates[i]; if (gate.pointToLineDist(loc) < 6) { // ideally, parent clone agent should compare against all child clone parameter configuration this->flagCloning[i] = -1; } } //sum up dvt.x += fSum.x / mass; dvt.y += fSum.y / mass; double2 newVelo = data.velocity; double2 newLoc = data.loc; double2 newGoal = data.goal; double tick = 0.1; newVelo.x += dvt.x * tick * (1);// + this->random->gaussian() * 0.1); newVelo.y += dvt.y * tick * (1);// + this->random->gaussian() * 0.1); double dv = sqrt(newVelo.x * newVelo.x + newVelo.y * newVelo.y); if (dv > maxv) { newVelo.x = newVelo.x * maxv / dv; newVelo.y = newVelo.y * maxv / dv; } double mint = 1; for (int i = 0; i < NUM_WALLS; i++) { obstacleLine wall = myClone->walls[i]; computeWallImpaction(data, wall, newVelo, tick, mint); } newVelo.x *= mint; newVelo.y *= mint; newLoc.x += newVelo.x * tick; newLoc.y += newVelo.y * tick; double goalTemp = goal.x; chooseNewGoal(newLoc, mass / cMass, newGoal); newLoc.x = correctCrossBoader(newLoc.x, ENV_DIM); newLoc.y = correctCrossBoader(newLoc.y, ENV_DIM); dataCopy = data; dataCopy.loc = newLoc; dataCopy.velocity = newVelo; dataCopy.goal = newGoal; } __device__ void SocialForceAgent::init(SocialForceClone* c, int idx) { this->contextId = idx; //this->myOrigin = NULL; this->goalIdx = 0; this->myClone = c; for (int i = 0; i < NUM_PARAM; i++) { this->flagCloning[i] = 0; this->flagCloned[i] = 0; } hiprandState_t rStateLocal = c->rState[idx]; this->color.x = hiprand(&rStateLocal) % 256; this->color.y = hiprand(&rStateLocal) % 256; this->color.z = hiprand(&rStateLocal) % 256; this->color.w = hiprand(&rStateLocal) % 256; SocialForceAgentData & dataLocal = this->data; //= &sfModel->originalAgents->dataArray[dataSlot]; float rx = (float)(idx / 32) / (float)32; float ry = (float)(idx % 32) / (float)32; dataLocal.loc.x = (0.6 + 0.1 * hiprand_uniform(&rStateLocal)) * ENV_DIM; dataLocal.loc.y = (0.5 + 0.4 * hiprand_uniform(&rStateLocal)) * ENV_DIM; dataLocal.velocity.x = 2;//4 * (this->random->uniform()-0.5); dataLocal.velocity.y = 2;//4 * (this->random->uniform()-0.5); dataLocal.v0 = 2; dataLocal.mass = 50; dataLocal.numNeighbor = 0; //chooseNewGoal(dataLocal.loc, 0, dataLocal.goal); dataLocal.goal = make_double2(0.5 * ENV_DIM, 0.7 * ENV_DIM); this->dataCopy = dataLocal; } __device__ void SocialForceAgent::initNewClone(SocialForceAgent *parent, SocialForceClone *childClone) { this->color = childClone->color; this->contextId = parent->contextId; //this->myOrigin = parent; this->myClone = childClone; this->goalIdx = parent->goalIdx; for (int i = 0; i < NUM_GOAL; i++) this->goalSeq[i] = parent->goalSeq[i]; for (int i = 0; i < NUM_PARAM; i++) { this->flagCloning[i] = 0; this->flagCloned[i] = 0; } this->data = parent->data; this->dataCopy = parent->dataCopy; } namespace clone { __global__ void stepKernel(SocialForceClone *c, int numElemLocal) { int index = threadIdx.x + blockIdx.x * blockDim.x; if (index < numElemLocal) c->ap->agentPtrArray[index]->step(); } __global__ void swapKernel(SocialForceClone *c, int numElemLocal) { int idx = threadIdx.x + blockIdx.x * blockDim.x; if (idx < numElemLocal) { SocialForceAgent &agent = *c->ap->agentPtrArray[idx]; agent.data = agent.dataCopy; } } } void SocialForceClone::step(int stepCount) { if (numElem == 0) return; int gSize; //alterGate(stepCount); /* hipMemcpyAsync(contextSorted, context, sizeof(SocialForceAgent*) * NUM_CAP, hipMemcpyDeviceToDevice, myStream); hipStreamSynchronize(myStream); NeighborModule::sortAgentByLocKernel << <1, 1, 0, myStream >> >(this->contextSorted, this->rState, NUM_CAP); hipMemsetAsync(cidStarts, 0xff, sizeof(int) * NUM_CELL * NUM_CELL, myStream); hipMemsetAsync(cidEnds, 0xff, sizeof(int) * NUM_CELL * NUM_CELL, myStream); hipStreamSynchronize(myStream); gSize = GRID_SIZE(NUM_CAP); NeighborModule::setCidStartEndKernel<<<gSize, BLOCK_SIZE, 0, myStream>>>(contextSorted, cidStarts, cidEnds, NUM_CAP); NeighborModule::sortAgentByLocKernel << <1, 1, 0, myStream >> >(this->apHost->agentPtrArray, this->rState, this->numElem); */ gSize = GRID_SIZE(numElem); size_t smemSize = sizeof(SocialForceAgentData) * BLOCK_SIZE; clone::stepKernel << <gSize, BLOCK_SIZE, smemSize, myStream >> >(selfDev, numElem); //clone::stepKernel << <gSize, BLOCK_SIZE >> >(selfDev, numElem); } void SocialForceClone::swap() { if (numElem == 0) return; int gSize = GRID_SIZE(numElem); clone::swapKernel << <gSize, BLOCK_SIZE >> >(selfDev, numElem); } void SocialForceClone::alterGate(int stepCount) { bool changed = false; for (int i = 0; i < NUM_PARAM; i++) { if (cloneParams[i] == stepCount) { changed = true; gates[i].init(0, 0, 0, 0); //hipMemcpyAsync(&selfDev->gates[i], &gates[i], sizeof(obstacleLine), hipMemcpyHostToDevice, myStream); hipMemcpy(&selfDev->gates[i], &gates[i], sizeof(obstacleLine), hipMemcpyHostToDevice); } } } namespace AppUtil { __device__ bool cloningCondition(SocialForceAgent *agent, SocialForceClone *parentClone, SocialForceClone *childClone) { // if agent has been cloned? if (childClone->cloneFlags[agent->contextId] == true) return false; // active cloning condition double2 &loc = agent->data.loc; for (int i = 0; i < NUM_PARAM; i++) { int param1 = parentClone->cloneParams[i]; int param2 = childClone->cloneParams[i]; if (param1 != param2) { obstacleLine g1 = parentClone->gates[i]; obstacleLine g2 = childClone->gates[i]; if (g1.pointToLineDist(loc) < 6) return true; if (g2.pointToLineDist(loc) < 6) return true; } } // passive cloning condition #define MY_MAX(a, b) (a > b ? a : b) #define MY_MIN(a, b) (a < b ? a : b) int minx = MY_MAX((loc.x - RADIUS_I) / CELL_DIM, 0); int miny = MY_MAX((loc.y - RADIUS_I) / CELL_DIM, 0); int maxx = MY_MIN((loc.x + RADIUS_I) / CELL_DIM, NUM_CELL - 1); int maxy = MY_MIN((loc.y + RADIUS_I) / CELL_DIM, NUM_CELL - 1); for (int i = minx; i <= maxx; i++) for (int j = miny; j <= maxy; j++) if (childClone->takenMap[i * NUM_CELL + j]) return true; // pass all the check, don't need to be cloned return false; } __global__ void updateContextKernel(SocialForceClone *c, int numElem) { int idx = threadIdx.x + blockIdx.x * blockDim.x; if (idx < numElem) { SocialForceAgent *agent = c->ap->agentPtrArray[idx]; c->context[agent->contextId] = agent; } } __global__ void constructPassiveMap(SocialForceClone *c, int numElem) { int idx = threadIdx.x + blockIdx.x * blockDim.x; if (idx < numElem) { SocialForceAgent &agent = *c->ap->agentPtrArray[idx]; int takenId = agent.data.loc.x / CELL_DIM; takenId = takenId * NUM_CELL + agent.data.loc.y / CELL_DIM; c->takenMap[takenId] = true; } } __global__ void performCloningKernel(SocialForceClone *p, SocialForceClone *c, int numCap) { int idx = threadIdx.x + blockIdx.x * blockDim.x; if (idx < numCap) { SocialForceAgent *agent = p->context[idx]; if (cloningCondition(agent, p, c)) { uint lastNum = atomicInc(&c->numElem, numCap); SocialForceAgent& childAgent = *c->ap->agentPtrArray[lastNum]; c->ap->takenFlags[lastNum] = true; childAgent.initNewClone(agent, c); c->context[childAgent.contextId] = &childAgent; c->cloneFlags[childAgent.contextId] = true; //c->numElem++; /* not written back */ } } } __global__ void performCloningOldKernel(SocialForceClone *p, SocialForceClone *c, int numParent) { int idx = threadIdx.x + blockIdx.x * blockDim.x; if (idx < numParent) { SocialForceAgent *agent = p->ap->agentPtrArray[idx]; int cloneLevelLocal = c->cloneLevel; int cloneMaskLocal = c->cloneMasks[cloneLevelLocal]; int cloneDecision = ~agent->flagCloned[cloneLevelLocal] & cloneMaskLocal & agent->flagCloning[cloneLevelLocal]; if (cloneDecision > 0) { agent->flagCloned[cloneLevelLocal] |= cloneMaskLocal; agent->flagCloning[cloneLevelLocal] &= ~cloneMaskLocal; uint lastNum = atomicInc(&c->numElem, NUM_CAP); SocialForceAgent &childAgent = *c->ap->agentPtrArray[lastNum]; c->ap->takenFlags[lastNum] = true; childAgent.initNewClone(agent, c); c->context[childAgent.contextId] = &childAgent; c->cloneFlags[childAgent.contextId] = true; } } } __global__ void compareAndEliminateKernel(SocialForceClone *p, SocialForceClone *c, int numElem) { int idx = threadIdx.x + blockIdx.x * blockDim.x; if (idx < numElem) { SocialForceAgent &childAgent = *c->ap->agentPtrArray[idx]; SocialForceAgent &parentAgent = *p->context[childAgent.contextId]; // *(SocialForceAgent*)childAgent.myOrigin; double velDiff = length(childAgent.dataCopy.velocity - parentAgent.dataCopy.velocity); double locDiff = length(childAgent.dataCopy.loc - parentAgent.dataCopy.loc); if (locDiff == 0 && velDiff == 0) { c->ap->takenFlags[idx] = false; c->cloneFlags[childAgent.contextId] = false; } } } __global__ void compareAndEliminateOldKernel(SocialForceClone *p, SocialForceClone *c, int numElem) { int idx = threadIdx.x + blockIdx.x * blockDim.x; if (idx < numElem) { SocialForceAgent &childAgent = *c->ap->agentPtrArray[idx]; SocialForceAgent &parentAgent = *p->context[childAgent.contextId]; double velDiff = length(childAgent.dataCopy.velocity - parentAgent.dataCopy.velocity); double locDiff = length(childAgent.dataCopy.loc - parentAgent.dataCopy.loc); if (locDiff == 0 && velDiff == 0) { c->ap->takenFlags[idx] = false; c->cloneFlags[childAgent.contextId] = false; int cloneLevelLocal = c->cloneLevel; int cloneMaskLocal = c->cloneMasks[cloneLevelLocal]; parentAgent.flagCloned[cloneLevelLocal] &= ~cloneMaskLocal; parentAgent.flagCloning[cloneLevelLocal] &= ~cloneMaskLocal; } } } template<class T> __device__ void swap(T * ar, int a, int b) { T t1 = ar[a]; ar[a] = ar[b]; ar[b] = t1; } __global__ void reorderKernel(SocialForceClone *c, int numElem) { int idx = threadIdx.x + blockIdx.x * blockDim.x; if (idx == 0) { int l = 0; int r = numElem; int i = l, j = l; for (; j < r; j++) { if (c->ap->takenFlags[j] == true) { swap<SocialForceAgent*>(c->ap->agentPtrArray, i, j); swap<bool>(c->ap->takenFlags, i, j); i++; } } c->numElem = i; } } }; void SocialForceSimApp::performClone(SocialForceClone *parentClone, SocialForceClone *childClone) { childClone->parentCloneid = parentClone->cloneid; // 1. copy the context of parent clone hipMemcpyAsync(childClone->context, parentClone->context, NUM_CAP * sizeof(SocialForceAgent*), hipMemcpyDeviceToDevice, childClone->myStream); hipStreamSynchronize(childClone->myStream); //hipMemcpy(childClone->context, parentClone->context, NUM_CAP * sizeof(SocialForceAgent*), hipMemcpyDeviceToDevice); getLastCudaError("perform clone"); // 2. update the context with agents of its own if (childClone->numElem > 0) { int gSize = GRID_SIZE(childClone->numElem); AppUtil::updateContextKernel << <gSize, BLOCK_SIZE, 0, childClone->myStream >> >(childClone->selfDev, childClone->numElem); //AppUtil::updateContextKernel << <gSize, BLOCK_SIZE >> >(childClone->selfDev, childClone->numElem); getLastCudaError("perform clone"); } // 4. perform active and passive cloning (in cloningCondition checking) if (parentClone->numElem > 0) { int gSize = GRID_SIZE(parentClone->numElem); //AppUtil::performCloningKernel << <gSize, BLOCK_SIZE >> >(parentClone->selfDev, childClone->selfDev, NUM_CAP); AppUtil::performCloningOldKernel << <gSize, BLOCK_SIZE, 0, childClone->myStream >> >(parentClone->selfDev, childClone->selfDev, parentClone->numElem); hipMemcpyAsync(childClone, childClone->selfDev, sizeof(SocialForceClone), hipMemcpyDeviceToHost, childClone->myStream); hipStreamSynchronize(childClone->myStream); getLastCudaError("perform clone"); } } void compareAndEliminateCPU(SocialForceClone *parentClone, SocialForceClone *childClone) { wchar_t message[20]; for (int i = 0; i < childClone->numElem; i++) { SocialForceAgent &childAgent = *childClone->ap->agentPtrArray[i]; SocialForceAgent parentAgent; // *(SocialForceAgent*)childAgent.myOrigin; if (length(childAgent.dataCopy.velocity - parentAgent.dataCopy.velocity) == 0 && length(childAgent.dataCopy.loc - parentAgent.dataCopy.loc) == 0) { childClone->ap->takenFlags[i] = false; childClone->cloneFlags[childAgent.contextId] = false; } /*else { if (childClone->cloneid == 4) { swprintf_s(message, 20, L"not false: %d\n", i); OutputDebugString(message); } }*/ } childClone->numElem = childClone->ap->reorder(childClone->numElem); } void SocialForceSimApp::compareAndEliminate(SocialForceClone *parentClone, SocialForceClone *childClone) { if (childClone->numElem == 0) return; int gSize = GRID_SIZE(childClone->numElem); AppUtil::compareAndEliminateOldKernel << <gSize, BLOCK_SIZE, 0, childClone->myStream >> >(parentClone->selfDev, childClone->selfDev, childClone->numElem); //AppUtil::compareAndEliminateKernel << <gSize, BLOCK_SIZE>> >(parentClone->selfDev, childClone->selfDev, childClone->numElem); gSize = GRID_SIZE(NUM_CAP); AppUtil::reorderKernel << <1, 1, 0, childClone->myStream >> >(childClone->selfDev, childClone->numElem); //AppUtil::reorderKernel << <1, 1 >> >(childClone->selfDev, childClone->numElem); hipMemcpyAsync(childClone, childClone->selfDev, sizeof(SocialForceClone), hipMemcpyDeviceToHost, childClone->myStream); hipStreamSynchronize(childClone->myStream); } void SocialForceSimApp::proc(int p, int c, bool o, char *s) { performClone(cAll[p], cAll[c]); cAll[c]->step(stepCount); if (o) { if (stepCount < 800) cAll[c]->output(stepCount, s); } compareAndEliminate(cAll[p], cAll[c]); } void swap(int **cloneTree, int a, int b) { int t1 = cloneTree[0][a]; cloneTree[0][a] = cloneTree[0][b]; cloneTree[0][b] = t1; t1 = cloneTree[1][a]; cloneTree[1][a] = cloneTree[1][b]; cloneTree[1][b] = t1; } void quickSort(int **cloneTree, int l, int r) { if (l == r) return; int pi = l + rand() % (r - l); swap(cloneTree, l, pi); int pivot = cloneTree[0][l]; int i = l + 1, j = l + 1; for (; j < r; j++) { if (cloneTree[0][j] < pivot) { swap(cloneTree, i, j); i++; } } swap(cloneTree, l, i - 1); quickSort(cloneTree, l, i - 1); quickSort(cloneTree, i, r); } void SocialForceSimApp::mst() { // clone diff matrix int **cloneDiff = new int*[totalClone]; for (int i = 0; i < totalClone; i++) { cloneDiff[i] = new int[totalClone]; for (int j = 0; j < totalClone; j++) cloneDiff[i][j] = 0; } for (int i = 0; i < totalClone; i++) { for (int j = 0; j < totalClone; j++) { for (int k = 0; k < NUM_PARAM; k++) { if (cAll[i]->cloneParams[k] != cAll[j]->cloneParams[k]) cloneDiff[i][j]++; } wchar_t message[20]; swprintf_s(message, 20, L"%d ", cloneDiff[i][j]); OutputDebugString(message); } OutputDebugString(L"\n"); } int *parent = cloneTree[0] = new int[totalClone]; int *child = cloneTree[1] = new int[totalClone]; int *key = new int[totalClone]; bool *mstSet = new bool[totalClone]; for (int i = 0; i < totalClone; i++) child[i] = i, key[i] = INT_MAX, mstSet[i] = false; key[0] = 0; parent[0] = -1; child[0] = 0; int count = 0; while (count++ < totalClone - 1) { int minKey = INT_MAX; int minIdx; for (int j = 0; j < totalClone; j++) if (mstSet[j] == false && key[j] < minKey) minKey = key[j], minIdx = j; mstSet[minIdx] = true; for (int j = 0; j < totalClone; j++) if (cloneDiff[minIdx][j] && mstSet[j] == false && cloneDiff[minIdx][j] < key[j]) parent[j] = minIdx, key[j] = cloneDiff[minIdx][j]; } quickSort(cloneTree, 0, totalClone); for (int i = 1; i < totalClone; i++) { wchar_t message[20]; swprintf_s(message, 20, L"%d - %d: %d\n", cloneTree[0][i], cloneTree[1][i], cloneDiff[i][parent[i]]); OutputDebugString(message); } delete mstSet; delete key; } __global__ void getLocAndColorKernel(SocialForceClone *c, double2 *loc, uchar4 *color, int *contextId, int numElem) { int idx = threadIdx.x + blockIdx.x * blockDim.x; if (idx < numElem) { loc[idx] = c->context[idx]->data.loc; color[idx] = c->context[idx]->color; contextId[idx] = c->context[idx]->contextId; } } void SocialForceSimApp::getLocAndColorFromDevice(){ SocialForceClone *c = cAll[paintId]; int gSize = GRID_SIZE(NUM_CAP); getLocAndColorKernel << <gSize, BLOCK_SIZE >> >(c->selfDev, debugLocDev, debugColorDev, debugContextIdDev, NUM_CAP); hipMemcpy(debugLocHost, debugLocDev, sizeof(double2) * NUM_CAP, hipMemcpyDeviceToHost); hipMemcpy(debugColorHost, debugColorDev, sizeof(uchar4) * NUM_CAP, hipMemcpyDeviceToHost); hipMemcpy(debugContextIdHost, debugContextIdDev, sizeof(int) * NUM_CAP, hipMemcpyDeviceToHost); hipMemcpy(c->takenMap, c->selfDev->takenMap, sizeof(bool) * NUM_CELL * NUM_CELL, hipMemcpyDeviceToHost); //hipMemcpy(debugCidStartsHost, c->cidStarts, sizeof(int) * NUM_CELL * NUM_CELL, hipMemcpyDeviceToHost); //hipMemcpy(debugCidEndsHost, c->cidEnds, sizeof(int) * NUM_CELL * NUM_CELL, hipMemcpyDeviceToHost); //wchar_t message[128]; //for (int i = 0; i < NUM_CELL * NUM_CELL; i++) { // swprintf_s(message, L"(%d, %d) ", debugCidStartsHost[i], debugCidEndsHost[i]); // OutputDebugString(message); //} //OutputDebugString(L"\n"); } __global__ void initRandomKernel(SocialForceClone* c, int numElemLocal) { int idx = threadIdx.x + blockIdx.x * blockDim.x; if (idx < numElemLocal) { hiprand_init(1234, idx, 0, &c->rState[idx]); } } __global__ void initRootCloneKernel(SocialForceClone* c, int numElemLocal) { int idx = threadIdx.x + blockIdx.x * blockDim.x; if (idx < numElemLocal) { c->ap->agentArray[idx].init(c, idx); c->context[idx] = &c->ap->agentArray[idx]; c->cloneFlags[idx] = false; } if (idx == 0) c->numElem = numElemLocal; } void SocialForceSimApp::initRootClone(SocialForceClone* cHost, SocialForceClone* cDev) { cHost->numElem = NUM_CAP; int gSize = GRID_SIZE(NUM_CAP); initRandomKernel << <gSize, BLOCK_SIZE >> >(cDev, NUM_CAP); initRootCloneKernel << <gSize, BLOCK_SIZE >> >(cDev, NUM_CAP); }
06ad4b21f3c29884a81b8379e5f4f05d57849ac4.cu
#include "cuda_runtime.h" #include <fstream> #include "SocialForceGPU.h" #include <omp.h> __global__ void testFunc() { } namespace NeighborModule { __device__ int zcode(int x, int y) { //return x * NUM_CELL + y; x &= 0x0000ffff; // x = ---- ---- ---- ---- fedc ba98 7654 3210 y &= 0x0000ffff; // x = ---- ---- ---- ---- fedc ba98 7654 3210 x = (x ^ (x << 8)) & 0x00ff00ff; // x = ---- ---- fedc ba98 ---- ---- 7654 3210 y = (y ^ (y << 8)) & 0x00ff00ff; // x = ---- ---- fedc ba98 ---- ---- 7654 3210 y = (y ^ (y << 4)) & 0x0f0f0f0f; // x = ---- fedc ---- ba98 ---- 7654 ---- 3210 x = (x ^ (x << 4)) & 0x0f0f0f0f; // x = ---- fedc ---- ba98 ---- 7654 ---- 3210 y = (y ^ (y << 2)) & 0x33333333; // x = --fe --dc --ba --98 --76 --54 --32 --10 x = (x ^ (x << 2)) & 0x33333333; // x = --fe --dc --ba --98 --76 --54 --32 --10 y = (y ^ (y << 1)) & 0x55555555; // x = -f-e -d-c -b-a -9-8 -7-6 -5-4 -3-2 -1-0 x = (x ^ (x << 1)) & 0x55555555; // x = -f-e -d-c -b-a -9-8 -7-6 -5-4 -3-2 -1-0 return x | (y << 1); } __device__ int zcode(const double2 &loc) { int ix = loc.x / (ENV_DIM / NUM_CELL); int iy = loc.y / (ENV_DIM / NUM_CELL); return zcode(ix, iy); } __device__ int zcode(SocialForceAgent *agent) { return zcode(agent->data.loc); } __device__ void swap(SocialForceAgent** agentPtrs, int a, int b) { SocialForceAgent* temp = agentPtrs[a]; agentPtrs[a] = agentPtrs[b]; agentPtrs[b] = temp; } __device__ void quickSortByAgentLoc(SocialForceAgent** agentPtrs, curandState &rState, int l, int r) { if (l == r) return; int pi = l + curand(&rState) % (r - l); swap(agentPtrs, l, pi); SocialForceAgent* pivot = agentPtrs[l]; int i = l + 1, j = l + 1; for (; j < r; j++) { if (zcode(agentPtrs[j]) < zcode(pivot)) { swap(agentPtrs, i, j); i++; } } swap(agentPtrs, l, i - 1); quickSortByAgentLoc(agentPtrs, rState, l, i - 1); quickSortByAgentLoc(agentPtrs, rState, i, r); } __global__ void sortAgentByLocKernel(SocialForceAgent** agentPtrsToSort, curandState *rState, int numCap) { int idx = threadIdx.x + blockIdx.x * blockDim.x; curandState &rStateLocal = *rState; if (idx == 0) quickSortByAgentLoc(agentPtrsToSort, rStateLocal, 0, numCap); } __global__ void setCidStartEndKernel(SocialForceAgent** contextSorted, int* cidStarts, int* cidEnds, int numCap) { const int idx = threadIdx.x + blockIdx.x * blockDim.x; if (idx < numCap && idx > 0) { int cid = zcode(contextSorted[idx]); int cidPrev = zcode(contextSorted[idx - 1]); if (cid != cidPrev) { cidStarts[cid] = idx; cidEnds[cidPrev] = idx; } } if (idx == 0) { int cid = zcode(contextSorted[0]); cidStarts[cid] = 0; cid = zcode(contextSorted[numCap - 1]); cidEnds[cid] = numCap; } } } extern "C" void runTest() { testFunc << <32, 32 >> >(); } /* helper functions and data structures*/ #define checkCudaErrors(err) __checkCudaErrors(err, __FILE__, __LINE__) inline void __checkCudaErrors(cudaError err, const char *file, const int line) { if (cudaSuccess != err) { fprintf(stderr, "%s(%i) : CUDA Runtime API error %d: %s.\n", file, line, (int)err, cudaGetErrorString(err)); exit(-1); } } namespace APUtil { __global__ void hookPointerAndDataKernel(SocialForceAgent** agentPtrArray, SocialForceAgent* agentArray, int numCap) { int index = threadIdx.x + blockIdx.x * blockDim.x; if (index < numCap) agentPtrArray[index] = &agentArray[index]; } }; extern "C" void hookPointerAndData(SocialForceAgent** agentPtrArray, SocialForceAgent* agentArray, int numCap) { int gSize = GRID_SIZE(numCap); APUtil::hookPointerAndDataKernel << <gSize, BLOCK_SIZE >> >(agentPtrArray, agentArray, numCap); } __device__ double SocialForceAgent::correctCrossBoader(double val, double limit) { if (val >= limit) return limit - 0.001; else if (val < 0) return 0; return val; } void SocialForceAgent::computeIndivSocialForceRoom(const SocialForceAgentData &myData, const SocialForceAgentData &otherData, double2 &fSum){ double cMass = 100; //my data const double2& loc = myData.loc; const double2& goal = myData.goal; const double2& velo = myData.velocity; const double& v0 = myData.v0; const double& mass = myData.mass; //other's data const double2& locOther = otherData.loc; const double2& goalOther = otherData.goal; const double2& veloOther = otherData.velocity; const double& v0Other = otherData.v0; const double& massOther = otherData.mass; double d = 1e-15 + sqrt((loc.x - locOther.x) * (loc.x - locOther.x) + (loc.y - locOther.y) * (loc.y - locOther.y)); double dDelta = mass / cMass + massOther / cMass - d; double fExp = A * exp(dDelta / B); double fKg = dDelta < 0 ? 0 : k1 *dDelta; double nijx = (loc.x - locOther.x) / d; double nijy = (loc.y - locOther.y) / d; double fnijx = (fExp + fKg) * nijx; double fnijy = (fExp + fKg) * nijy; double fkgx = 0; double fkgy = 0; if (dDelta > 0) { double tix = -nijy; double tiy = nijx; fkgx = k2 * dDelta; fkgy = k2 * dDelta; double vijDelta = (veloOther.x - velo.x) * tix + (veloOther.y - velo.y) * tiy; fkgx = fkgx * vijDelta * tix; fkgy = fkgy * vijDelta * tiy; } fSum.x += fnijx + fkgx; fSum.y += fnijy + fkgy; } __device__ void SocialForceAgent::computeForceWithWall(const SocialForceAgentData &dataLocal, obstacleLine &wall, const int &cMass, double2 &fSum) { double2 wl = make_double2(wall.ex - wall.sx, wall.ey - wall.sy); if (length(wl) == 0) return; double diw, crx, cry; const double2 &loc = dataLocal.loc; diw = wall.pointToLineDist(loc, crx, cry); double virDiw = DIST(loc.x, loc.y, crx, cry); if (virDiw == 0) return; double niwx = (loc.x - crx) / virDiw; double niwy = (loc.y - cry) / virDiw; double drw = dataLocal.mass / cMass - diw; double fiw1 = A * exp(drw / B); if (drw > 0) fiw1 += k1 * drw; double fniwx = fiw1 * niwx; double fniwy = fiw1 * niwy; double fiwKgx = 0, fiwKgy = 0; if (drw > 0) { double fiwKg = k2 * drw * (dataLocal.velocity.x * (-niwy) + dataLocal.velocity.y * niwx); fiwKgx = fiwKg * (-niwy); fiwKgy = fiwKg * niwx; } fSum.x += fniwx - fiwKgx; fSum.y += fniwy - fiwKgy; } __device__ void SocialForceAgent::computeWallImpaction(const SocialForceAgentData &dataLocal, obstacleLine &wall, const double2 &newVelo, const double &tick, double &mint){ double crx, cry, tt; const double2 &loc = dataLocal.loc; int ret = wall.intersection2LineSeg( loc.x, loc.y, loc.x + 0.5 * newVelo.x * tick, loc.y + 0.5 * newVelo.y * tick, crx, cry ); if (ret == 1) { if (fabs(crx - loc.x) > 0) tt = (crx - loc.x) / (newVelo.x * tick); else tt = (crx - loc.y) / (newVelo.y * tick + 1e-20); if (tt < mint) mint = tt; } } __device__ void SocialForceAgent::computeDirection(const SocialForceAgentData &dataLocal, double2 &dvt) { //my data const double2& loc = dataLocal.loc; const double2& goal = dataLocal.goal; const double2& velo = dataLocal.velocity; const double& v0 = dataLocal.v0; const double& mass = dataLocal.mass; dvt.x = 0; dvt.y = 0; double2 diff; diff.x = 0; diff.y = 0; double d0 = sqrt((loc.x - goal.x) * (loc.x - goal.x) + (loc.y - goal.y) * (loc.y - goal.y)); diff.x = v0 * (goal.x - loc.x) / d0; diff.y = v0 * (goal.y - loc.y) / d0; dvt.x = (diff.x - velo.x) / tao; dvt.y = (diff.y - velo.y) / tao; } __device__ int sharedMinAndMax(int value, bool minFlag) { for (int i = 16; i >= 1; i /= 2) { if (minFlag) value = min(value, __shfl_xor(value, i, 32)); else value = max(value, __shfl_xor(value, i, 32)); } return value; } __device__ void SocialForceAgent::computeSocialForceRoom(SocialForceAgentData &dataLocal, double2 &fSum) { //__shared__ SocialForceAgentData sdata[BLOCK_SIZE]; fSum.x = 0; fSum.y = 0; double ds = 0; int neighborCount = 0; for (int i = 0; i < NUM_CAP; i++) { SocialForceAgent *other = myClone->context[i]; SocialForceAgentData otherData = other->data; ds = length(otherData.loc - dataLocal.loc); if (ds < 6 && ds > 0) { neighborCount++; computeIndivSocialForceRoom(dataLocal, otherData, fSum); for (int i = 0; i < NUM_PARAM; i++) this->flagCloning[i] |= other->flagCloning[i]; } } dataLocal.numNeighbor = neighborCount; } __device__ void SocialForceAgent::chooseNewGoal(const double2 &newLoc, double epsilon, double2 &newGoal) { double2 oldGoal = newGoal; double2 center = make_double2(ENV_DIM / 2, ENV_DIM / 2); if (newLoc.x < center.x && newLoc.y <= center.y) { newGoal.x = 0.5 * ENV_DIM; newGoal.y = 0.3 * ENV_DIM; } else if (newLoc.x <= center.x && newLoc.y > center.y) { newGoal.x = 0.3 * ENV_DIM; newGoal.y = 0.5 * ENV_DIM; } else if (newLoc.x > center.x && newLoc.y > center.y) { newGoal.x = 0.5 * ENV_DIM; newGoal.y = 0.7 * ENV_DIM; } else if (newLoc.x >= center.x && newLoc.y < center.y){ newGoal.x = 0.9 * ENV_DIM; newGoal.y = 0.3 * ENV_DIM; } } __device__ void SocialForceAgent::step(){ double cMass = 100; const double2& loc = data.loc; const double2& goal = data.goal; const double2& velo = data.velocity; const double& v0 = data.v0; const double& mass = data.mass; //compute the direction double2 dvt; computeDirection(data, dvt); //compute force with other agents double2 fSum; computeSocialForceRoom(data, fSum); //compute force with walls and gates for (int i = 0; i < NUM_WALLS; i++) { obstacleLine wall = myClone->walls[i]; computeForceWithWall(data, wall, cMass, fSum); } for (int i = 0; i < NUM_PARAM; i++) { obstacleLine &gate = myClone->gates[i]; if (gate.pointToLineDist(loc) < 6) { // ideally, parent clone agent should compare against all child clone parameter configuration this->flagCloning[i] = -1; } } //sum up dvt.x += fSum.x / mass; dvt.y += fSum.y / mass; double2 newVelo = data.velocity; double2 newLoc = data.loc; double2 newGoal = data.goal; double tick = 0.1; newVelo.x += dvt.x * tick * (1);// + this->random->gaussian() * 0.1); newVelo.y += dvt.y * tick * (1);// + this->random->gaussian() * 0.1); double dv = sqrt(newVelo.x * newVelo.x + newVelo.y * newVelo.y); if (dv > maxv) { newVelo.x = newVelo.x * maxv / dv; newVelo.y = newVelo.y * maxv / dv; } double mint = 1; for (int i = 0; i < NUM_WALLS; i++) { obstacleLine wall = myClone->walls[i]; computeWallImpaction(data, wall, newVelo, tick, mint); } newVelo.x *= mint; newVelo.y *= mint; newLoc.x += newVelo.x * tick; newLoc.y += newVelo.y * tick; double goalTemp = goal.x; chooseNewGoal(newLoc, mass / cMass, newGoal); newLoc.x = correctCrossBoader(newLoc.x, ENV_DIM); newLoc.y = correctCrossBoader(newLoc.y, ENV_DIM); dataCopy = data; dataCopy.loc = newLoc; dataCopy.velocity = newVelo; dataCopy.goal = newGoal; } __device__ void SocialForceAgent::init(SocialForceClone* c, int idx) { this->contextId = idx; //this->myOrigin = NULL; this->goalIdx = 0; this->myClone = c; for (int i = 0; i < NUM_PARAM; i++) { this->flagCloning[i] = 0; this->flagCloned[i] = 0; } curandState_t rStateLocal = c->rState[idx]; this->color.x = curand(&rStateLocal) % 256; this->color.y = curand(&rStateLocal) % 256; this->color.z = curand(&rStateLocal) % 256; this->color.w = curand(&rStateLocal) % 256; SocialForceAgentData & dataLocal = this->data; //= &sfModel->originalAgents->dataArray[dataSlot]; float rx = (float)(idx / 32) / (float)32; float ry = (float)(idx % 32) / (float)32; dataLocal.loc.x = (0.6 + 0.1 * curand_uniform(&rStateLocal)) * ENV_DIM; dataLocal.loc.y = (0.5 + 0.4 * curand_uniform(&rStateLocal)) * ENV_DIM; dataLocal.velocity.x = 2;//4 * (this->random->uniform()-0.5); dataLocal.velocity.y = 2;//4 * (this->random->uniform()-0.5); dataLocal.v0 = 2; dataLocal.mass = 50; dataLocal.numNeighbor = 0; //chooseNewGoal(dataLocal.loc, 0, dataLocal.goal); dataLocal.goal = make_double2(0.5 * ENV_DIM, 0.7 * ENV_DIM); this->dataCopy = dataLocal; } __device__ void SocialForceAgent::initNewClone(SocialForceAgent *parent, SocialForceClone *childClone) { this->color = childClone->color; this->contextId = parent->contextId; //this->myOrigin = parent; this->myClone = childClone; this->goalIdx = parent->goalIdx; for (int i = 0; i < NUM_GOAL; i++) this->goalSeq[i] = parent->goalSeq[i]; for (int i = 0; i < NUM_PARAM; i++) { this->flagCloning[i] = 0; this->flagCloned[i] = 0; } this->data = parent->data; this->dataCopy = parent->dataCopy; } namespace clone { __global__ void stepKernel(SocialForceClone *c, int numElemLocal) { int index = threadIdx.x + blockIdx.x * blockDim.x; if (index < numElemLocal) c->ap->agentPtrArray[index]->step(); } __global__ void swapKernel(SocialForceClone *c, int numElemLocal) { int idx = threadIdx.x + blockIdx.x * blockDim.x; if (idx < numElemLocal) { SocialForceAgent &agent = *c->ap->agentPtrArray[idx]; agent.data = agent.dataCopy; } } } void SocialForceClone::step(int stepCount) { if (numElem == 0) return; int gSize; //alterGate(stepCount); /* cudaMemcpyAsync(contextSorted, context, sizeof(SocialForceAgent*) * NUM_CAP, cudaMemcpyDeviceToDevice, myStream); cudaStreamSynchronize(myStream); NeighborModule::sortAgentByLocKernel << <1, 1, 0, myStream >> >(this->contextSorted, this->rState, NUM_CAP); cudaMemsetAsync(cidStarts, 0xff, sizeof(int) * NUM_CELL * NUM_CELL, myStream); cudaMemsetAsync(cidEnds, 0xff, sizeof(int) * NUM_CELL * NUM_CELL, myStream); cudaStreamSynchronize(myStream); gSize = GRID_SIZE(NUM_CAP); NeighborModule::setCidStartEndKernel<<<gSize, BLOCK_SIZE, 0, myStream>>>(contextSorted, cidStarts, cidEnds, NUM_CAP); NeighborModule::sortAgentByLocKernel << <1, 1, 0, myStream >> >(this->apHost->agentPtrArray, this->rState, this->numElem); */ gSize = GRID_SIZE(numElem); size_t smemSize = sizeof(SocialForceAgentData) * BLOCK_SIZE; clone::stepKernel << <gSize, BLOCK_SIZE, smemSize, myStream >> >(selfDev, numElem); //clone::stepKernel << <gSize, BLOCK_SIZE >> >(selfDev, numElem); } void SocialForceClone::swap() { if (numElem == 0) return; int gSize = GRID_SIZE(numElem); clone::swapKernel << <gSize, BLOCK_SIZE >> >(selfDev, numElem); } void SocialForceClone::alterGate(int stepCount) { bool changed = false; for (int i = 0; i < NUM_PARAM; i++) { if (cloneParams[i] == stepCount) { changed = true; gates[i].init(0, 0, 0, 0); //cudaMemcpyAsync(&selfDev->gates[i], &gates[i], sizeof(obstacleLine), cudaMemcpyHostToDevice, myStream); cudaMemcpy(&selfDev->gates[i], &gates[i], sizeof(obstacleLine), cudaMemcpyHostToDevice); } } } namespace AppUtil { __device__ bool cloningCondition(SocialForceAgent *agent, SocialForceClone *parentClone, SocialForceClone *childClone) { // if agent has been cloned? if (childClone->cloneFlags[agent->contextId] == true) return false; // active cloning condition double2 &loc = agent->data.loc; for (int i = 0; i < NUM_PARAM; i++) { int param1 = parentClone->cloneParams[i]; int param2 = childClone->cloneParams[i]; if (param1 != param2) { obstacleLine g1 = parentClone->gates[i]; obstacleLine g2 = childClone->gates[i]; if (g1.pointToLineDist(loc) < 6) return true; if (g2.pointToLineDist(loc) < 6) return true; } } // passive cloning condition #define MY_MAX(a, b) (a > b ? a : b) #define MY_MIN(a, b) (a < b ? a : b) int minx = MY_MAX((loc.x - RADIUS_I) / CELL_DIM, 0); int miny = MY_MAX((loc.y - RADIUS_I) / CELL_DIM, 0); int maxx = MY_MIN((loc.x + RADIUS_I) / CELL_DIM, NUM_CELL - 1); int maxy = MY_MIN((loc.y + RADIUS_I) / CELL_DIM, NUM_CELL - 1); for (int i = minx; i <= maxx; i++) for (int j = miny; j <= maxy; j++) if (childClone->takenMap[i * NUM_CELL + j]) return true; // pass all the check, don't need to be cloned return false; } __global__ void updateContextKernel(SocialForceClone *c, int numElem) { int idx = threadIdx.x + blockIdx.x * blockDim.x; if (idx < numElem) { SocialForceAgent *agent = c->ap->agentPtrArray[idx]; c->context[agent->contextId] = agent; } } __global__ void constructPassiveMap(SocialForceClone *c, int numElem) { int idx = threadIdx.x + blockIdx.x * blockDim.x; if (idx < numElem) { SocialForceAgent &agent = *c->ap->agentPtrArray[idx]; int takenId = agent.data.loc.x / CELL_DIM; takenId = takenId * NUM_CELL + agent.data.loc.y / CELL_DIM; c->takenMap[takenId] = true; } } __global__ void performCloningKernel(SocialForceClone *p, SocialForceClone *c, int numCap) { int idx = threadIdx.x + blockIdx.x * blockDim.x; if (idx < numCap) { SocialForceAgent *agent = p->context[idx]; if (cloningCondition(agent, p, c)) { uint lastNum = atomicInc(&c->numElem, numCap); SocialForceAgent& childAgent = *c->ap->agentPtrArray[lastNum]; c->ap->takenFlags[lastNum] = true; childAgent.initNewClone(agent, c); c->context[childAgent.contextId] = &childAgent; c->cloneFlags[childAgent.contextId] = true; //c->numElem++; /* not written back */ } } } __global__ void performCloningOldKernel(SocialForceClone *p, SocialForceClone *c, int numParent) { int idx = threadIdx.x + blockIdx.x * blockDim.x; if (idx < numParent) { SocialForceAgent *agent = p->ap->agentPtrArray[idx]; int cloneLevelLocal = c->cloneLevel; int cloneMaskLocal = c->cloneMasks[cloneLevelLocal]; int cloneDecision = ~agent->flagCloned[cloneLevelLocal] & cloneMaskLocal & agent->flagCloning[cloneLevelLocal]; if (cloneDecision > 0) { agent->flagCloned[cloneLevelLocal] |= cloneMaskLocal; agent->flagCloning[cloneLevelLocal] &= ~cloneMaskLocal; uint lastNum = atomicInc(&c->numElem, NUM_CAP); SocialForceAgent &childAgent = *c->ap->agentPtrArray[lastNum]; c->ap->takenFlags[lastNum] = true; childAgent.initNewClone(agent, c); c->context[childAgent.contextId] = &childAgent; c->cloneFlags[childAgent.contextId] = true; } } } __global__ void compareAndEliminateKernel(SocialForceClone *p, SocialForceClone *c, int numElem) { int idx = threadIdx.x + blockIdx.x * blockDim.x; if (idx < numElem) { SocialForceAgent &childAgent = *c->ap->agentPtrArray[idx]; SocialForceAgent &parentAgent = *p->context[childAgent.contextId]; // *(SocialForceAgent*)childAgent.myOrigin; double velDiff = length(childAgent.dataCopy.velocity - parentAgent.dataCopy.velocity); double locDiff = length(childAgent.dataCopy.loc - parentAgent.dataCopy.loc); if (locDiff == 0 && velDiff == 0) { c->ap->takenFlags[idx] = false; c->cloneFlags[childAgent.contextId] = false; } } } __global__ void compareAndEliminateOldKernel(SocialForceClone *p, SocialForceClone *c, int numElem) { int idx = threadIdx.x + blockIdx.x * blockDim.x; if (idx < numElem) { SocialForceAgent &childAgent = *c->ap->agentPtrArray[idx]; SocialForceAgent &parentAgent = *p->context[childAgent.contextId]; double velDiff = length(childAgent.dataCopy.velocity - parentAgent.dataCopy.velocity); double locDiff = length(childAgent.dataCopy.loc - parentAgent.dataCopy.loc); if (locDiff == 0 && velDiff == 0) { c->ap->takenFlags[idx] = false; c->cloneFlags[childAgent.contextId] = false; int cloneLevelLocal = c->cloneLevel; int cloneMaskLocal = c->cloneMasks[cloneLevelLocal]; parentAgent.flagCloned[cloneLevelLocal] &= ~cloneMaskLocal; parentAgent.flagCloning[cloneLevelLocal] &= ~cloneMaskLocal; } } } template<class T> __device__ void swap(T * ar, int a, int b) { T t1 = ar[a]; ar[a] = ar[b]; ar[b] = t1; } __global__ void reorderKernel(SocialForceClone *c, int numElem) { int idx = threadIdx.x + blockIdx.x * blockDim.x; if (idx == 0) { int l = 0; int r = numElem; int i = l, j = l; for (; j < r; j++) { if (c->ap->takenFlags[j] == true) { swap<SocialForceAgent*>(c->ap->agentPtrArray, i, j); swap<bool>(c->ap->takenFlags, i, j); i++; } } c->numElem = i; } } }; void SocialForceSimApp::performClone(SocialForceClone *parentClone, SocialForceClone *childClone) { childClone->parentCloneid = parentClone->cloneid; // 1. copy the context of parent clone cudaMemcpyAsync(childClone->context, parentClone->context, NUM_CAP * sizeof(SocialForceAgent*), cudaMemcpyDeviceToDevice, childClone->myStream); cudaStreamSynchronize(childClone->myStream); //cudaMemcpy(childClone->context, parentClone->context, NUM_CAP * sizeof(SocialForceAgent*), cudaMemcpyDeviceToDevice); getLastCudaError("perform clone"); // 2. update the context with agents of its own if (childClone->numElem > 0) { int gSize = GRID_SIZE(childClone->numElem); AppUtil::updateContextKernel << <gSize, BLOCK_SIZE, 0, childClone->myStream >> >(childClone->selfDev, childClone->numElem); //AppUtil::updateContextKernel << <gSize, BLOCK_SIZE >> >(childClone->selfDev, childClone->numElem); getLastCudaError("perform clone"); } // 4. perform active and passive cloning (in cloningCondition checking) if (parentClone->numElem > 0) { int gSize = GRID_SIZE(parentClone->numElem); //AppUtil::performCloningKernel << <gSize, BLOCK_SIZE >> >(parentClone->selfDev, childClone->selfDev, NUM_CAP); AppUtil::performCloningOldKernel << <gSize, BLOCK_SIZE, 0, childClone->myStream >> >(parentClone->selfDev, childClone->selfDev, parentClone->numElem); cudaMemcpyAsync(childClone, childClone->selfDev, sizeof(SocialForceClone), cudaMemcpyDeviceToHost, childClone->myStream); cudaStreamSynchronize(childClone->myStream); getLastCudaError("perform clone"); } } void compareAndEliminateCPU(SocialForceClone *parentClone, SocialForceClone *childClone) { wchar_t message[20]; for (int i = 0; i < childClone->numElem; i++) { SocialForceAgent &childAgent = *childClone->ap->agentPtrArray[i]; SocialForceAgent parentAgent; // *(SocialForceAgent*)childAgent.myOrigin; if (length(childAgent.dataCopy.velocity - parentAgent.dataCopy.velocity) == 0 && length(childAgent.dataCopy.loc - parentAgent.dataCopy.loc) == 0) { childClone->ap->takenFlags[i] = false; childClone->cloneFlags[childAgent.contextId] = false; } /*else { if (childClone->cloneid == 4) { swprintf_s(message, 20, L"not false: %d\n", i); OutputDebugString(message); } }*/ } childClone->numElem = childClone->ap->reorder(childClone->numElem); } void SocialForceSimApp::compareAndEliminate(SocialForceClone *parentClone, SocialForceClone *childClone) { if (childClone->numElem == 0) return; int gSize = GRID_SIZE(childClone->numElem); AppUtil::compareAndEliminateOldKernel << <gSize, BLOCK_SIZE, 0, childClone->myStream >> >(parentClone->selfDev, childClone->selfDev, childClone->numElem); //AppUtil::compareAndEliminateKernel << <gSize, BLOCK_SIZE>> >(parentClone->selfDev, childClone->selfDev, childClone->numElem); gSize = GRID_SIZE(NUM_CAP); AppUtil::reorderKernel << <1, 1, 0, childClone->myStream >> >(childClone->selfDev, childClone->numElem); //AppUtil::reorderKernel << <1, 1 >> >(childClone->selfDev, childClone->numElem); cudaMemcpyAsync(childClone, childClone->selfDev, sizeof(SocialForceClone), cudaMemcpyDeviceToHost, childClone->myStream); cudaStreamSynchronize(childClone->myStream); } void SocialForceSimApp::proc(int p, int c, bool o, char *s) { performClone(cAll[p], cAll[c]); cAll[c]->step(stepCount); if (o) { if (stepCount < 800) cAll[c]->output(stepCount, s); } compareAndEliminate(cAll[p], cAll[c]); } void swap(int **cloneTree, int a, int b) { int t1 = cloneTree[0][a]; cloneTree[0][a] = cloneTree[0][b]; cloneTree[0][b] = t1; t1 = cloneTree[1][a]; cloneTree[1][a] = cloneTree[1][b]; cloneTree[1][b] = t1; } void quickSort(int **cloneTree, int l, int r) { if (l == r) return; int pi = l + rand() % (r - l); swap(cloneTree, l, pi); int pivot = cloneTree[0][l]; int i = l + 1, j = l + 1; for (; j < r; j++) { if (cloneTree[0][j] < pivot) { swap(cloneTree, i, j); i++; } } swap(cloneTree, l, i - 1); quickSort(cloneTree, l, i - 1); quickSort(cloneTree, i, r); } void SocialForceSimApp::mst() { // clone diff matrix int **cloneDiff = new int*[totalClone]; for (int i = 0; i < totalClone; i++) { cloneDiff[i] = new int[totalClone]; for (int j = 0; j < totalClone; j++) cloneDiff[i][j] = 0; } for (int i = 0; i < totalClone; i++) { for (int j = 0; j < totalClone; j++) { for (int k = 0; k < NUM_PARAM; k++) { if (cAll[i]->cloneParams[k] != cAll[j]->cloneParams[k]) cloneDiff[i][j]++; } wchar_t message[20]; swprintf_s(message, 20, L"%d ", cloneDiff[i][j]); OutputDebugString(message); } OutputDebugString(L"\n"); } int *parent = cloneTree[0] = new int[totalClone]; int *child = cloneTree[1] = new int[totalClone]; int *key = new int[totalClone]; bool *mstSet = new bool[totalClone]; for (int i = 0; i < totalClone; i++) child[i] = i, key[i] = INT_MAX, mstSet[i] = false; key[0] = 0; parent[0] = -1; child[0] = 0; int count = 0; while (count++ < totalClone - 1) { int minKey = INT_MAX; int minIdx; for (int j = 0; j < totalClone; j++) if (mstSet[j] == false && key[j] < minKey) minKey = key[j], minIdx = j; mstSet[minIdx] = true; for (int j = 0; j < totalClone; j++) if (cloneDiff[minIdx][j] && mstSet[j] == false && cloneDiff[minIdx][j] < key[j]) parent[j] = minIdx, key[j] = cloneDiff[minIdx][j]; } quickSort(cloneTree, 0, totalClone); for (int i = 1; i < totalClone; i++) { wchar_t message[20]; swprintf_s(message, 20, L"%d - %d: %d\n", cloneTree[0][i], cloneTree[1][i], cloneDiff[i][parent[i]]); OutputDebugString(message); } delete mstSet; delete key; } __global__ void getLocAndColorKernel(SocialForceClone *c, double2 *loc, uchar4 *color, int *contextId, int numElem) { int idx = threadIdx.x + blockIdx.x * blockDim.x; if (idx < numElem) { loc[idx] = c->context[idx]->data.loc; color[idx] = c->context[idx]->color; contextId[idx] = c->context[idx]->contextId; } } void SocialForceSimApp::getLocAndColorFromDevice(){ SocialForceClone *c = cAll[paintId]; int gSize = GRID_SIZE(NUM_CAP); getLocAndColorKernel << <gSize, BLOCK_SIZE >> >(c->selfDev, debugLocDev, debugColorDev, debugContextIdDev, NUM_CAP); cudaMemcpy(debugLocHost, debugLocDev, sizeof(double2) * NUM_CAP, cudaMemcpyDeviceToHost); cudaMemcpy(debugColorHost, debugColorDev, sizeof(uchar4) * NUM_CAP, cudaMemcpyDeviceToHost); cudaMemcpy(debugContextIdHost, debugContextIdDev, sizeof(int) * NUM_CAP, cudaMemcpyDeviceToHost); cudaMemcpy(c->takenMap, c->selfDev->takenMap, sizeof(bool) * NUM_CELL * NUM_CELL, cudaMemcpyDeviceToHost); //cudaMemcpy(debugCidStartsHost, c->cidStarts, sizeof(int) * NUM_CELL * NUM_CELL, cudaMemcpyDeviceToHost); //cudaMemcpy(debugCidEndsHost, c->cidEnds, sizeof(int) * NUM_CELL * NUM_CELL, cudaMemcpyDeviceToHost); //wchar_t message[128]; //for (int i = 0; i < NUM_CELL * NUM_CELL; i++) { // swprintf_s(message, L"(%d, %d) ", debugCidStartsHost[i], debugCidEndsHost[i]); // OutputDebugString(message); //} //OutputDebugString(L"\n"); } __global__ void initRandomKernel(SocialForceClone* c, int numElemLocal) { int idx = threadIdx.x + blockIdx.x * blockDim.x; if (idx < numElemLocal) { curand_init(1234, idx, 0, &c->rState[idx]); } } __global__ void initRootCloneKernel(SocialForceClone* c, int numElemLocal) { int idx = threadIdx.x + blockIdx.x * blockDim.x; if (idx < numElemLocal) { c->ap->agentArray[idx].init(c, idx); c->context[idx] = &c->ap->agentArray[idx]; c->cloneFlags[idx] = false; } if (idx == 0) c->numElem = numElemLocal; } void SocialForceSimApp::initRootClone(SocialForceClone* cHost, SocialForceClone* cDev) { cHost->numElem = NUM_CAP; int gSize = GRID_SIZE(NUM_CAP); initRandomKernel << <gSize, BLOCK_SIZE >> >(cDev, NUM_CAP); initRootCloneKernel << <gSize, BLOCK_SIZE >> >(cDev, NUM_CAP); }
730db65281cf2b4bea8fce69fe1424e975f00f51.hip
// !!! This is a file automatically generated by hipify!!! #include <algorithm> #include <cassert> #include <cstdio> #include <iostream> #include <fstream> #include <string> #include <vector> #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> #include "definitions.h" #include "config.h" #include "particle.h" #include "beam_elements.h" #if defined( DEMOTRACK_ENABLE_BEAMFIELDS ) && ( DEMOTRACK_ENABLE_BEAMFIELDS == 1 ) #include "beamfields.h" #endif /* DEMOTRACK_ENABLE_BEAMFIELDS */ #include "lattice.h" __global__ void Track_particles_until_turn( demotrack::Particle* particle_set, demotrack::int64_type const num_particles, double const* __restrict__ lattice_buffer, demotrack::uint64_type const max_lattice_buffer_index, demotrack::int64_type const until_turn ) { namespace dt = demotrack; dt::int64_type const STRIDE = blockDim.x * gridDim.x; dt::int64_type idx = threadIdx.x + blockIdx.x * blockDim.x; #if defined( DEMOTRACK_ENABLE_BEAMFIELDS ) && \ ( DEMOTRACK_ENABLE_BEAMFIELDS == 1 ) if( idx == 0 ) printf( "info :: beam-fields enabled in kernel\r\n" ); #else /* !defined( DEMOTRACK_ENABLE_BEAMFIELDS ) */ if( idx == 0 ) printf( "info :: beam-fields disabled in kernel\r\n" ); #endif /* DEMOTRACK_ENABLE_BEAMFIELDS */ for( ; idx < num_particles ; idx += STRIDE ) { dt::Particle* __restrict__ p = &particle_set[ idx ]; dt::uint64_type const start_at_element = p->at_element; while( ( p->state == 1 ) && ( p->at_turn < until_turn ) ) { dt::uint64_type slot_idx = 0; while( ( p->state == 1 ) && ( slot_idx < max_lattice_buffer_index ) ) { /* all elements are stored with their type_id as the first * data member -> retrieve this number and dispatch * the track method accordingly */ dt::beam_element_type const type_id = ( dt::beam_element_type )( int )lattice_buffer[ slot_idx ]; switch( type_id ) { case dt::BEAM_ELEMENT_DRIFT: // cf. beam_elements.h { const dt::Drift *const __restrict__ elem = ( dt::Drift const* )&lattice_buffer[ slot_idx ]; dt::uint64_type const next_slot_idx = elem->track( *p, slot_idx ); dt::Drift::GLOBAL_APERTURE_CHECK( *p ); slot_idx = next_slot_idx; break; } case dt::BEAM_ELEMENT_DRIFT_EXACT: // cf. beam_elements.h { const dt::DriftExact *const __restrict__ elem = ( dt::DriftExact const* )&lattice_buffer[ slot_idx ]; dt::uint64_type const next_slot_idx = elem->track( *p, slot_idx ); // Use GLOBAL_APERTURE_CHECK from Drift -> it's the same dt::Drift::GLOBAL_APERTURE_CHECK( *p ); slot_idx = next_slot_idx; break; } case dt::BEAM_ELEMENT_MULTIPOLE: // cf. beam_elements.h { const dt::Multipole *const __restrict__ elem = ( dt::Multipole const* )&lattice_buffer[ slot_idx ]; dt::uint64_type const next_slot_idx = elem->track( *p, slot_idx ); slot_idx = next_slot_idx; break; } case dt::BEAM_ELEMENT_XY_SHIFT: // cf. beam_elements.h { const dt::XYShift *const __restrict__ elem = ( dt::XYShift const* )&lattice_buffer[ slot_idx ]; dt::uint64_type const next_slot_idx = elem->track( *p, slot_idx ); slot_idx = next_slot_idx; break; } case dt::BEAM_ELEMENT_SROTATION: // cf. beam_elements.h { const dt::SRotation *const __restrict__ elem = ( dt::SRotation const* )&lattice_buffer[ slot_idx ]; dt::uint64_type const next_slot_idx = elem->track( *p, slot_idx ); slot_idx = next_slot_idx; break; } case dt::BEAM_ELEMENT_CAVITY: // cf. beam_elements.h { const dt::Cavity *const __restrict__ elem = ( dt::Cavity const* )&lattice_buffer[ slot_idx ]; dt::uint64_type const next_slot_idx = elem->track( *p, slot_idx ); slot_idx = next_slot_idx; break; } case dt::BEAM_ELEMENT_LIMIT_RECT: // cf. beam_elements.h { const dt::LimitRect *const __restrict__ elem = ( dt::LimitRect const* )&lattice_buffer[ slot_idx ]; dt::uint64_type const next_slot_idx = elem->track( *p, slot_idx ); slot_idx = next_slot_idx; break; } case dt::BEAM_ELEMENT_LIMIT_ELLIPSE: // cf. beam_elements.h { const dt::LimitEllipse *const __restrict__ elem = ( dt::LimitEllipse const* )&lattice_buffer[ slot_idx ]; dt::uint64_type const next_slot_idx = elem->track( *p, slot_idx ); slot_idx = next_slot_idx; break; } case dt::BEAM_ELEMENT_LIMIT_RECT_ELLIPSE: // cf. beam_elements.h { const dt::LimitRectEllipse *const __restrict__ elem = ( dt::LimitRectEllipse const* )&lattice_buffer[ slot_idx ]; dt::uint64_type const next_slot_idx = elem->track( *p, slot_idx ); slot_idx = next_slot_idx; break; } case dt::BEAM_ELEMENT_DIPEDGE: // cf. beam_elements.h { const dt::DipoleEdge *const __restrict__ elem = ( dt::DipoleEdge const* )&lattice_buffer[ slot_idx ]; dt::uint64_type const next_slot_idx = elem->track( *p, slot_idx ); slot_idx = next_slot_idx; break; } #if defined( DEMOTRACK_ENABLE_BEAMFIELDS ) && \ ( DEMOTRACK_ENABLE_BEAMFIELDS == 1 ) case dt::BEAM_ELEMENT_SC_COASTING: // cf. beamfields.h { const dt::SpaceChargeCoasting *const __restrict__ elem = ( dt::SpaceChargeCoasting const* )&lattice_buffer[ slot_idx ]; dt::uint64_type const next_slot_idx = elem->track( *p, slot_idx ); slot_idx = next_slot_idx; break; } #endif /* beamfields enabled */ default: { /* unknown beam element -> loose particle and quit */ p->state = 0; slot_idx = max_lattice_buffer_index; } }; } if( p->state == 1 ) { p->at_element = start_at_element; ++p->at_turn; } } } } int main( int argc, char* argv[] ) { namespace dt = demotrack; /* ********************************************************************* */ /* Prepare particle set to track */ dt::uint64_type NUM_PARTICLES = 50 * 1024; dt::int64_type TRACK_UNTIL_TURN = 1000; std::string path_to_lattice_data = std::string{}; std::string path_to_particle_data = std::string{}; std::string path_to_output_data = std::string{}; if( argc >= 2 ) { NUM_PARTICLES = std::stoi( argv[ 1 ] ); if( argc >= 3 ) { TRACK_UNTIL_TURN = std::stoi( argv[ 2 ] ); if( argc >= 4 ) { path_to_particle_data = std::string{ argv[ 3 ] }; if( path_to_particle_data.compare( "default" ) == 0 ) { path_to_particle_data.clear(); } if( argc >= 5 ) { path_to_lattice_data = std::string{ argv[ 4 ] }; if( path_to_lattice_data.compare( "default" ) == 0 ) { path_to_lattice_data.clear(); } if( argc >= 6 ) { path_to_output_data = std::string{ argv[ 5 ] }; if( path_to_output_data.compare( "none" ) == 0 ) { path_to_output_data.clear(); } } } } } } else { std::cout << "Usage : " << argv[ 0 ] << " [NUM_PARTICLES] [TRACK_UNTIL_TURN]" << " [PATH_TO_PARTICLE_DATA] [PATH_TO_LATTICE_DATA]" << " [PATH_TO_OUTPUT_DATA]\r\n" << std::endl; } /* ********************************************************************* */ /* Prepare particle data: */ std::vector< dt::Particle > particles_host; dt::Particles_load( particles_host, NUM_PARTICLES, path_to_particle_data ); /* ********************************************************************* */ /* Prepare lattice / machine description: */ std::vector< double > lattice_host; dt::uint64_type const LATTICE_SIZE = dt::load_lattice( lattice_host, path_to_lattice_data ); /* ********************************************************************** */ /* Allocate buffers on the device */ dt::Particle* particles_dev = nullptr; double* lattice_dev = nullptr; auto status = ::hipMalloc( ( void** )&particles_dev, sizeof( dt::Particle ) * particles_host.size() ); assert( status == hipSuccess ); status = ::hipMalloc( ( void** )&lattice_dev, sizeof( double ) * LATTICE_SIZE ); assert( status == hipSuccess ); /* Copy particle and lattice data to device */ status = ::hipMemcpy( lattice_dev, lattice_host.data(), LATTICE_SIZE * sizeof( double ), ::hipMemcpyHostToDevice ); assert( status == hipSuccess ); status = ::hipMemcpy( particles_dev, particles_host.data(), particles_host.size() * sizeof( dt::Particle ), ::hipMemcpyHostToDevice ); assert( status == hipSuccess ); /* ******************************************************************** */ /* Estimate block size */ int BLOCK_SIZE = 0; int MIN_GRID_SIZE = 0; #if defined( DEMOTRACK_CUDA_CALCULATE_BLOCKSIZE ) && \ ( DEMOTRACK_CUDA_CALCULATE_BLOCKSIZE == 1 ) status = ::hipOccupancyMaxPotentialBlockSize( &MIN_GRID_SIZE, /* -> minimum grid size needed for max occupancy */ &BLOCK_SIZE, /* -> estimated optimal block size */ Track_particles_until_turn, /* the kernel */ 0u, /* -> dynamic shared memory per block required [bytes] */ 0u /* -> max block size limit for the kernel; 0 == no limit */ ); assert( status == hipSuccess ); #elif defined( DEMOTRACK_DEFAULT_BLOCK_SIZE ) && \ ( DEMOTRACK_DEFAULT_BLOCK_SIZE > 0 ) BLOCK_SIZE = DEMOTRACK_DEFAULT_BLOCK_SIZE; #else BLOCK_SIZE = 1; #endif /* DEMOTRACK_HIP_CALCULATE_BLOCKSIZE */ assert( BLOCK_SIZE > 0 ); int const GRID_SIZE = ( NUM_PARTICLES + BLOCK_SIZE - 1 ) / BLOCK_SIZE; /* ******************************************************************** */ /* Run kernel: */ ::hipDeviceProp_t props; int device = 0; status = ::hipGetDevice( &device ); assert( status == hipSuccess ); status = ::hipGetDeviceProperties( &props, device ); assert( status == hipSuccess ); char pci_bus_id_str[] = { '\0', '\0', '\0', '\0', '\0', '\0', '\0', '\0', '\0', '\0', '\0', '\0', '\0', '\0', '\0', '\0', '\0', '\0', '\0', '\0', '\0', '\0', '\0', '\0', '\0', '\0', '\0', '\0', '\0', '\0', '\0', '\0' }; status = ::hipDeviceGetPCIBusId( pci_bus_id_str, 32, device ); assert( status == hipSuccess ); std::cout << "number of particles : " << NUM_PARTICLES << "\r\n" << "number of turns : " << TRACK_UNTIL_TURN << "\r\n"; if( !path_to_particle_data.empty() ) { std::cout << "particle data : " << path_to_particle_data << "\r\n"; } else { std::cout << "particle data : generated\r\n"; } if( !path_to_lattice_data.empty() ) { std::cout << "lattice : " << path_to_lattice_data << "\r\n"; } else { std::cout << "lattice : generated fodo lattice\r\n"; } #if defined( DEMOTRACK_ENABLE_BEAMFIELDS ) && \ ( DEMOTRACK_ENABLE_BEAMFIELDS == 1 ) std::cout << "space-charge enabled : true\r\n"; #else std::cout << "space-charge enabled : false\r\n"; #endif /* SC emabled */ std::cout << "DEVICE : " << pci_bus_id_str << " ( " << props.name << " )\r\n" << "NUM_OF_BLOCKS : " << GRID_SIZE << "\r\n" << "MIN_GRID_SIZE : " << MIN_GRID_SIZE << "\r\n" << "THREADS_PER_BLOCK : " << BLOCK_SIZE << "\r\n"; if( !path_to_output_data.empty() ) { std::cout << "path to output data : " << path_to_output_data << "\r\n"; } /* Prepare cuda events to estimate the elapsed wall time */ ::hipEvent_t start; status = ::hipEventCreate( &start ); assert( status == hipSuccess ); ::hipEvent_t stop; status = ::hipEventCreate( &stop ); assert( status == hipSuccess ); status = ::hipEventRecord( start ); assert( status == hipSuccess ); /* Run kernel */ hipLaunchKernelGGL(( Track_particles_until_turn), dim3(GRID_SIZE), dim3(BLOCK_SIZE) , 0, 0, particles_dev, NUM_PARTICLES, lattice_dev, LATTICE_SIZE, TRACK_UNTIL_TURN ); status = ::hipDeviceSynchronize(); /* Estimate wall time */ status = ::hipEventRecord( stop ); assert( status == hipSuccess ); status = ::hipEventSynchronize( stop ); assert( status == hipSuccess ); float wtime = 0.0; status = ::hipEventElapsedTime( &wtime, start, stop ); assert( status == hipSuccess ); std::cout << "-------------------------------------------------------\r\n" << "Elapsed time : " << wtime << " msec total \r\n" << " : " << wtime / ( ::max( NUM_PARTICLES * TRACK_UNTIL_TURN, dt::uint64_type{ 1 } ) ) << " msec / particle / turn\r\n"; /* Fetch data */ status = ::hipMemcpy( particles_host.data(), particles_dev, particles_host.size() * sizeof( dt::Particle ), ::hipMemcpyDeviceToHost ); assert( status == hipSuccess ); /* ********************************************************************* */ /* Verify tracking results */ dt::uint64_type num_active_particles = 0u; dt::uint64_type num_lost_particles = 0u; for( auto& p : particles_host ) { if( ( p.state == 1 ) && ( p.at_turn == TRACK_UNTIL_TURN ) ) { ++num_active_particles; } else if( ( p.state == 0 ) && ( p.at_turn < TRACK_UNTIL_TURN ) ) { ++num_lost_particles; } else { std::cerr << "illegal particle id = " << p.id << ", at_turn = " << p.at_turn << ", at_element = " << p.at_element << ", state = " << p.state << std::endl; } } std::cout << "-------------------------------------------------------\r\n" << "num lost particles : " << num_lost_particles << "\r\n" << "num active particles : " << num_active_particles << "\r\n" << std::endl; if( !path_to_output_data.empty() ) { FILE* fp = std::fopen( path_to_output_data.c_str(), "wb" ); double const temp = static_cast< double >( particles_host.size() ); auto ret = std::fwrite( &temp, sizeof( double ), 1u, fp ); bool success = ( ret == 1 ); for( auto const& p : particles_host ) { ret = std::fwrite( &p, sizeof( dt::Particle ), 1u, fp ); success &= ( ret == 1 ); } if( success ) { std::cout << "Written particle state to " << path_to_output_data << "\r\n" << std::endl; } std::fflush( fp ); std::fclose( fp ); } /* ********************************************************************* */ /* Cleaning up, Freeing resources */ ::hipFree( lattice_dev ); lattice_dev = nullptr; ::hipFree( particles_dev ); particles_dev = nullptr; ::hipEventDestroy( start ); ::hipEventDestroy( stop ); return 0; }
730db65281cf2b4bea8fce69fe1424e975f00f51.cu
#include <algorithm> #include <cassert> #include <cstdio> #include <iostream> #include <fstream> #include <string> #include <vector> #include <cuda.h> #include <cuda_runtime.h> #include "definitions.h" #include "config.h" #include "particle.h" #include "beam_elements.h" #if defined( DEMOTRACK_ENABLE_BEAMFIELDS ) && ( DEMOTRACK_ENABLE_BEAMFIELDS == 1 ) #include "beamfields.h" #endif /* DEMOTRACK_ENABLE_BEAMFIELDS */ #include "lattice.h" __global__ void Track_particles_until_turn( demotrack::Particle* particle_set, demotrack::int64_type const num_particles, double const* __restrict__ lattice_buffer, demotrack::uint64_type const max_lattice_buffer_index, demotrack::int64_type const until_turn ) { namespace dt = demotrack; dt::int64_type const STRIDE = blockDim.x * gridDim.x; dt::int64_type idx = threadIdx.x + blockIdx.x * blockDim.x; #if defined( DEMOTRACK_ENABLE_BEAMFIELDS ) && \ ( DEMOTRACK_ENABLE_BEAMFIELDS == 1 ) if( idx == 0 ) printf( "info :: beam-fields enabled in kernel\r\n" ); #else /* !defined( DEMOTRACK_ENABLE_BEAMFIELDS ) */ if( idx == 0 ) printf( "info :: beam-fields disabled in kernel\r\n" ); #endif /* DEMOTRACK_ENABLE_BEAMFIELDS */ for( ; idx < num_particles ; idx += STRIDE ) { dt::Particle* __restrict__ p = &particle_set[ idx ]; dt::uint64_type const start_at_element = p->at_element; while( ( p->state == 1 ) && ( p->at_turn < until_turn ) ) { dt::uint64_type slot_idx = 0; while( ( p->state == 1 ) && ( slot_idx < max_lattice_buffer_index ) ) { /* all elements are stored with their type_id as the first * data member -> retrieve this number and dispatch * the track method accordingly */ dt::beam_element_type const type_id = ( dt::beam_element_type )( int )lattice_buffer[ slot_idx ]; switch( type_id ) { case dt::BEAM_ELEMENT_DRIFT: // cf. beam_elements.h { const dt::Drift *const __restrict__ elem = ( dt::Drift const* )&lattice_buffer[ slot_idx ]; dt::uint64_type const next_slot_idx = elem->track( *p, slot_idx ); dt::Drift::GLOBAL_APERTURE_CHECK( *p ); slot_idx = next_slot_idx; break; } case dt::BEAM_ELEMENT_DRIFT_EXACT: // cf. beam_elements.h { const dt::DriftExact *const __restrict__ elem = ( dt::DriftExact const* )&lattice_buffer[ slot_idx ]; dt::uint64_type const next_slot_idx = elem->track( *p, slot_idx ); // Use GLOBAL_APERTURE_CHECK from Drift -> it's the same dt::Drift::GLOBAL_APERTURE_CHECK( *p ); slot_idx = next_slot_idx; break; } case dt::BEAM_ELEMENT_MULTIPOLE: // cf. beam_elements.h { const dt::Multipole *const __restrict__ elem = ( dt::Multipole const* )&lattice_buffer[ slot_idx ]; dt::uint64_type const next_slot_idx = elem->track( *p, slot_idx ); slot_idx = next_slot_idx; break; } case dt::BEAM_ELEMENT_XY_SHIFT: // cf. beam_elements.h { const dt::XYShift *const __restrict__ elem = ( dt::XYShift const* )&lattice_buffer[ slot_idx ]; dt::uint64_type const next_slot_idx = elem->track( *p, slot_idx ); slot_idx = next_slot_idx; break; } case dt::BEAM_ELEMENT_SROTATION: // cf. beam_elements.h { const dt::SRotation *const __restrict__ elem = ( dt::SRotation const* )&lattice_buffer[ slot_idx ]; dt::uint64_type const next_slot_idx = elem->track( *p, slot_idx ); slot_idx = next_slot_idx; break; } case dt::BEAM_ELEMENT_CAVITY: // cf. beam_elements.h { const dt::Cavity *const __restrict__ elem = ( dt::Cavity const* )&lattice_buffer[ slot_idx ]; dt::uint64_type const next_slot_idx = elem->track( *p, slot_idx ); slot_idx = next_slot_idx; break; } case dt::BEAM_ELEMENT_LIMIT_RECT: // cf. beam_elements.h { const dt::LimitRect *const __restrict__ elem = ( dt::LimitRect const* )&lattice_buffer[ slot_idx ]; dt::uint64_type const next_slot_idx = elem->track( *p, slot_idx ); slot_idx = next_slot_idx; break; } case dt::BEAM_ELEMENT_LIMIT_ELLIPSE: // cf. beam_elements.h { const dt::LimitEllipse *const __restrict__ elem = ( dt::LimitEllipse const* )&lattice_buffer[ slot_idx ]; dt::uint64_type const next_slot_idx = elem->track( *p, slot_idx ); slot_idx = next_slot_idx; break; } case dt::BEAM_ELEMENT_LIMIT_RECT_ELLIPSE: // cf. beam_elements.h { const dt::LimitRectEllipse *const __restrict__ elem = ( dt::LimitRectEllipse const* )&lattice_buffer[ slot_idx ]; dt::uint64_type const next_slot_idx = elem->track( *p, slot_idx ); slot_idx = next_slot_idx; break; } case dt::BEAM_ELEMENT_DIPEDGE: // cf. beam_elements.h { const dt::DipoleEdge *const __restrict__ elem = ( dt::DipoleEdge const* )&lattice_buffer[ slot_idx ]; dt::uint64_type const next_slot_idx = elem->track( *p, slot_idx ); slot_idx = next_slot_idx; break; } #if defined( DEMOTRACK_ENABLE_BEAMFIELDS ) && \ ( DEMOTRACK_ENABLE_BEAMFIELDS == 1 ) case dt::BEAM_ELEMENT_SC_COASTING: // cf. beamfields.h { const dt::SpaceChargeCoasting *const __restrict__ elem = ( dt::SpaceChargeCoasting const* )&lattice_buffer[ slot_idx ]; dt::uint64_type const next_slot_idx = elem->track( *p, slot_idx ); slot_idx = next_slot_idx; break; } #endif /* beamfields enabled */ default: { /* unknown beam element -> loose particle and quit */ p->state = 0; slot_idx = max_lattice_buffer_index; } }; } if( p->state == 1 ) { p->at_element = start_at_element; ++p->at_turn; } } } } int main( int argc, char* argv[] ) { namespace dt = demotrack; /* ********************************************************************* */ /* Prepare particle set to track */ dt::uint64_type NUM_PARTICLES = 50 * 1024; dt::int64_type TRACK_UNTIL_TURN = 1000; std::string path_to_lattice_data = std::string{}; std::string path_to_particle_data = std::string{}; std::string path_to_output_data = std::string{}; if( argc >= 2 ) { NUM_PARTICLES = std::stoi( argv[ 1 ] ); if( argc >= 3 ) { TRACK_UNTIL_TURN = std::stoi( argv[ 2 ] ); if( argc >= 4 ) { path_to_particle_data = std::string{ argv[ 3 ] }; if( path_to_particle_data.compare( "default" ) == 0 ) { path_to_particle_data.clear(); } if( argc >= 5 ) { path_to_lattice_data = std::string{ argv[ 4 ] }; if( path_to_lattice_data.compare( "default" ) == 0 ) { path_to_lattice_data.clear(); } if( argc >= 6 ) { path_to_output_data = std::string{ argv[ 5 ] }; if( path_to_output_data.compare( "none" ) == 0 ) { path_to_output_data.clear(); } } } } } } else { std::cout << "Usage : " << argv[ 0 ] << " [NUM_PARTICLES] [TRACK_UNTIL_TURN]" << " [PATH_TO_PARTICLE_DATA] [PATH_TO_LATTICE_DATA]" << " [PATH_TO_OUTPUT_DATA]\r\n" << std::endl; } /* ********************************************************************* */ /* Prepare particle data: */ std::vector< dt::Particle > particles_host; dt::Particles_load( particles_host, NUM_PARTICLES, path_to_particle_data ); /* ********************************************************************* */ /* Prepare lattice / machine description: */ std::vector< double > lattice_host; dt::uint64_type const LATTICE_SIZE = dt::load_lattice( lattice_host, path_to_lattice_data ); /* ********************************************************************** */ /* Allocate buffers on the device */ dt::Particle* particles_dev = nullptr; double* lattice_dev = nullptr; auto status = ::cudaMalloc( ( void** )&particles_dev, sizeof( dt::Particle ) * particles_host.size() ); assert( status == CUDA_SUCCESS ); status = ::cudaMalloc( ( void** )&lattice_dev, sizeof( double ) * LATTICE_SIZE ); assert( status == CUDA_SUCCESS ); /* Copy particle and lattice data to device */ status = ::cudaMemcpy( lattice_dev, lattice_host.data(), LATTICE_SIZE * sizeof( double ), ::cudaMemcpyHostToDevice ); assert( status == CUDA_SUCCESS ); status = ::cudaMemcpy( particles_dev, particles_host.data(), particles_host.size() * sizeof( dt::Particle ), ::cudaMemcpyHostToDevice ); assert( status == CUDA_SUCCESS ); /* ******************************************************************** */ /* Estimate block size */ int BLOCK_SIZE = 0; int MIN_GRID_SIZE = 0; #if defined( DEMOTRACK_CUDA_CALCULATE_BLOCKSIZE ) && \ ( DEMOTRACK_CUDA_CALCULATE_BLOCKSIZE == 1 ) status = ::cudaOccupancyMaxPotentialBlockSize( &MIN_GRID_SIZE, /* -> minimum grid size needed for max occupancy */ &BLOCK_SIZE, /* -> estimated optimal block size */ Track_particles_until_turn, /* the kernel */ 0u, /* -> dynamic shared memory per block required [bytes] */ 0u /* -> max block size limit for the kernel; 0 == no limit */ ); assert( status == CUDA_SUCCESS ); #elif defined( DEMOTRACK_DEFAULT_BLOCK_SIZE ) && \ ( DEMOTRACK_DEFAULT_BLOCK_SIZE > 0 ) BLOCK_SIZE = DEMOTRACK_DEFAULT_BLOCK_SIZE; #else BLOCK_SIZE = 1; #endif /* DEMOTRACK_HIP_CALCULATE_BLOCKSIZE */ assert( BLOCK_SIZE > 0 ); int const GRID_SIZE = ( NUM_PARTICLES + BLOCK_SIZE - 1 ) / BLOCK_SIZE; /* ******************************************************************** */ /* Run kernel: */ ::cudaDeviceProp props; int device = 0; status = ::cudaGetDevice( &device ); assert( status == CUDA_SUCCESS ); status = ::cudaGetDeviceProperties( &props, device ); assert( status == CUDA_SUCCESS ); char pci_bus_id_str[] = { '\0', '\0', '\0', '\0', '\0', '\0', '\0', '\0', '\0', '\0', '\0', '\0', '\0', '\0', '\0', '\0', '\0', '\0', '\0', '\0', '\0', '\0', '\0', '\0', '\0', '\0', '\0', '\0', '\0', '\0', '\0', '\0' }; status = ::cudaDeviceGetPCIBusId( pci_bus_id_str, 32, device ); assert( status == CUDA_SUCCESS ); std::cout << "number of particles : " << NUM_PARTICLES << "\r\n" << "number of turns : " << TRACK_UNTIL_TURN << "\r\n"; if( !path_to_particle_data.empty() ) { std::cout << "particle data : " << path_to_particle_data << "\r\n"; } else { std::cout << "particle data : generated\r\n"; } if( !path_to_lattice_data.empty() ) { std::cout << "lattice : " << path_to_lattice_data << "\r\n"; } else { std::cout << "lattice : generated fodo lattice\r\n"; } #if defined( DEMOTRACK_ENABLE_BEAMFIELDS ) && \ ( DEMOTRACK_ENABLE_BEAMFIELDS == 1 ) std::cout << "space-charge enabled : true\r\n"; #else std::cout << "space-charge enabled : false\r\n"; #endif /* SC emabled */ std::cout << "DEVICE : " << pci_bus_id_str << " ( " << props.name << " )\r\n" << "NUM_OF_BLOCKS : " << GRID_SIZE << "\r\n" << "MIN_GRID_SIZE : " << MIN_GRID_SIZE << "\r\n" << "THREADS_PER_BLOCK : " << BLOCK_SIZE << "\r\n"; if( !path_to_output_data.empty() ) { std::cout << "path to output data : " << path_to_output_data << "\r\n"; } /* Prepare cuda events to estimate the elapsed wall time */ ::cudaEvent_t start; status = ::cudaEventCreate( &start ); assert( status == CUDA_SUCCESS ); ::cudaEvent_t stop; status = ::cudaEventCreate( &stop ); assert( status == CUDA_SUCCESS ); status = ::cudaEventRecord( start ); assert( status == CUDA_SUCCESS ); /* Run kernel */ Track_particles_until_turn<<< GRID_SIZE, BLOCK_SIZE >>>( particles_dev, NUM_PARTICLES, lattice_dev, LATTICE_SIZE, TRACK_UNTIL_TURN ); status = ::cudaDeviceSynchronize(); /* Estimate wall time */ status = ::cudaEventRecord( stop ); assert( status == CUDA_SUCCESS ); status = ::cudaEventSynchronize( stop ); assert( status == CUDA_SUCCESS ); float wtime = 0.0; status = ::cudaEventElapsedTime( &wtime, start, stop ); assert( status == CUDA_SUCCESS ); std::cout << "-------------------------------------------------------\r\n" << "Elapsed time : " << wtime << " msec total \r\n" << " : " << wtime / ( std::max( NUM_PARTICLES * TRACK_UNTIL_TURN, dt::uint64_type{ 1 } ) ) << " msec / particle / turn\r\n"; /* Fetch data */ status = ::cudaMemcpy( particles_host.data(), particles_dev, particles_host.size() * sizeof( dt::Particle ), ::cudaMemcpyDeviceToHost ); assert( status == CUDA_SUCCESS ); /* ********************************************************************* */ /* Verify tracking results */ dt::uint64_type num_active_particles = 0u; dt::uint64_type num_lost_particles = 0u; for( auto& p : particles_host ) { if( ( p.state == 1 ) && ( p.at_turn == TRACK_UNTIL_TURN ) ) { ++num_active_particles; } else if( ( p.state == 0 ) && ( p.at_turn < TRACK_UNTIL_TURN ) ) { ++num_lost_particles; } else { std::cerr << "illegal particle id = " << p.id << ", at_turn = " << p.at_turn << ", at_element = " << p.at_element << ", state = " << p.state << std::endl; } } std::cout << "-------------------------------------------------------\r\n" << "num lost particles : " << num_lost_particles << "\r\n" << "num active particles : " << num_active_particles << "\r\n" << std::endl; if( !path_to_output_data.empty() ) { FILE* fp = std::fopen( path_to_output_data.c_str(), "wb" ); double const temp = static_cast< double >( particles_host.size() ); auto ret = std::fwrite( &temp, sizeof( double ), 1u, fp ); bool success = ( ret == 1 ); for( auto const& p : particles_host ) { ret = std::fwrite( &p, sizeof( dt::Particle ), 1u, fp ); success &= ( ret == 1 ); } if( success ) { std::cout << "Written particle state to " << path_to_output_data << "\r\n" << std::endl; } std::fflush( fp ); std::fclose( fp ); } /* ********************************************************************* */ /* Cleaning up, Freeing resources */ ::cudaFree( lattice_dev ); lattice_dev = nullptr; ::cudaFree( particles_dev ); particles_dev = nullptr; ::cudaEventDestroy( start ); ::cudaEventDestroy( stop ); return 0; }
a48932ed7b61b3ce5fff01cbfa5e8d523375431d.hip
// !!! This is a file automatically generated by hipify!!! #include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <hiprand/hiprand_kernel.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <sys/time.h> #include "set_valid_pos.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { hipSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; int32_t *pos_buff = NULL; hipMalloc(&pos_buff, XSIZE*YSIZE); int32_t *count_buff = NULL; hipMalloc(&count_buff, XSIZE*YSIZE); const int32_t entry_count = 1; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); hipFree(0);hipLaunchKernelGGL(( set_valid_pos), dim3(gridBlock),dim3(threadBlock), 0, 0, pos_buff,count_buff,entry_count); hipDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL(( set_valid_pos), dim3(gridBlock),dim3(threadBlock), 0, 0, pos_buff,count_buff,entry_count); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL(( set_valid_pos), dim3(gridBlock),dim3(threadBlock), 0, 0, pos_buff,count_buff,entry_count); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
a48932ed7b61b3ce5fff01cbfa5e8d523375431d.cu
#include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <curand_kernel.h> #include <stdlib.h> #include <cuda.h> #include <sys/time.h> #include "set_valid_pos.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { cudaSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; int32_t *pos_buff = NULL; cudaMalloc(&pos_buff, XSIZE*YSIZE); int32_t *count_buff = NULL; cudaMalloc(&count_buff, XSIZE*YSIZE); const int32_t entry_count = 1; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); cudaFree(0); set_valid_pos<<<gridBlock,threadBlock>>>(pos_buff,count_buff,entry_count); cudaDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) { set_valid_pos<<<gridBlock,threadBlock>>>(pos_buff,count_buff,entry_count); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) { set_valid_pos<<<gridBlock,threadBlock>>>(pos_buff,count_buff,entry_count); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
79f36e32dada004550f37cf6d0d4f1b472a85393.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <cstdint> #include <cstdio> #include <ctime> #include <cstring> #include <hip/hip_fp16.h> #include "volrend/cuda/common.cuh" #include "volrend/cuda/rt_core.cuh" #include "volrend/render_options.hpp" #include "volrend/cuda/data_spec.cuh" namespace volrend { #define MAX3(a, b, c) max(max(a, b), c) #define MIN3(a, b, c) min(min(a, b), c) namespace { template<typename scalar_t> __host__ __device__ __inline__ static void screen2worlddir( int ix, int iy, const CameraSpec& cam, scalar_t* out, scalar_t* cen) { scalar_t xyz[3] ={ (ix - 0.5f * cam.width) / cam.fx, -(iy - 0.5f * cam.height) / cam.fy, -1.0f}; _mv3(cam.transform, xyz, out); _normalize(out); _copy3(cam.transform + 9, cen); } template<typename scalar_t> __host__ __device__ __inline__ void maybe_world2ndc( const TreeSpec& tree, scalar_t* __restrict__ dir, scalar_t* __restrict__ cen) { if (tree.ndc_width <= 0) return; scalar_t t = -(1.f + cen[2]) / dir[2]; for (int i = 0; i < 3; ++i) { cen[i] = cen[i] + t * dir[i]; } dir[0] = -((2 * tree.ndc_focal) / tree.ndc_width) * (dir[0] / dir[2] - cen[0] / cen[2]); dir[1] = -((2 * tree.ndc_focal) / tree.ndc_height) * (dir[1] / dir[2] - cen[1] / cen[2]); dir[2] = -2 / cen[2]; cen[0] = -((2 * tree.ndc_focal) / tree.ndc_width) * (cen[0] / cen[2]); cen[1] = -((2 * tree.ndc_focal) / tree.ndc_height) * (cen[1] / cen[2]); cen[2] = 1 + 2 / cen[2]; _normalize(dir); } template<typename scalar_t> __host__ __device__ __inline__ void rodrigues( const scalar_t* __restrict__ aa, scalar_t* __restrict__ dir) { scalar_t angle = _norm(aa); if (angle < 1e-6) return; scalar_t k[3]; for (int i = 0; i < 3; ++i) k[i] = aa[i] / angle; scalar_t cos_angle = cos(angle), sin_angle = sin(angle); scalar_t cross[3]; _cross3(k, dir, cross); scalar_t dot = _dot3(k, dir); for (int i = 0; i < 3; ++i) { dir[i] = dir[i] * cos_angle + cross[i] * sin_angle + k[i] * dot * (1.0 - cos_angle); } } } // namespace namespace device { // Primary rendering kernel __global__ static void render_kernel( hipSurfaceObject_t surf_obj, hipSurfaceObject_t surf_obj_depth, CameraSpec cam, TreeSpec tree, RenderOptions opt, float* probe_coeffs, bool offscreen) { CUDA_GET_THREAD_ID(idx, cam.width * cam.height); const int x = idx % cam.width, y = idx / cam.width; float dir[3], cen[3], out[4]; uint8_t rgbx_init[4]; if (!offscreen) { // Read existing values for compositing (with meshes) surf2Dread(reinterpret_cast<uint32_t*>(rgbx_init), surf_obj, x * 4, y, hipBoundaryModeZero); } bool enable_draw = tree.N > 0; out[0] = out[1] = out[2] = out[3] = 0.f; if (opt.enable_probe && y < opt.probe_disp_size + 5 && x >= cam.width - opt.probe_disp_size - 5) { // Draw probe circle float basis_fn[VOLREND_GLOBAL_BASIS_MAX]; int xx = x - (cam.width - opt.probe_disp_size) + 5; int yy = y - 5; cen[0] = -(xx / (0.5f * opt.probe_disp_size) - 1.f); cen[1] = (yy / (0.5f * opt.probe_disp_size) - 1.f); float c = cen[0] * cen[0] + cen[1] * cen[1]; if (c <= 1.f) { enable_draw = false; if (tree.data_format.basis_dim >= 0) { cen[2] = -sqrtf(1 - c); _mv3(cam.transform, cen, dir); maybe_precalc_basis(tree, dir, basis_fn); for (int t = 0; t < 3; ++t) { int off = t * tree.data_format.basis_dim; float tmp = 0.f; for (int i = opt.basis_minmax[0]; i <= opt.basis_minmax[1]; ++i) { tmp += basis_fn[i] * probe_coeffs[off + i]; } out[t] = 1.f / (1.f + expf(-tmp)); } out[3] = 1.f; } else { for (int i = 0; i < 3; ++i) out[i] = probe_coeffs[i]; out[3] = 1.f; } } else { out[0] = out[1] = out[2] = 0.f; } } if (enable_draw) { screen2worlddir(x, y, cam, dir, cen); float vdir[3] = {dir[0], dir[1], dir[2]}; maybe_world2ndc(tree, dir, cen); for (int i = 0; i < 3; ++i) { cen[i] = tree.offset[i] + tree.scale[i] * cen[i]; } float t_max = 1e9f; if (!offscreen) { surf2Dread(&t_max, surf_obj_depth, x * sizeof(float), y, hipBoundaryModeZero); } rodrigues(opt.rot_dirs, vdir); trace_ray(tree, dir, vdir, cen, opt, t_max, out); } // Compositing with existing color const float nalpha = 1.f - out[3]; if (offscreen) { const float remain = opt.background_brightness * nalpha; out[0] += remain; out[1] += remain; out[2] += remain; } else { out[0] += rgbx_init[0] / 255.f * nalpha; out[1] += rgbx_init[1] / 255.f * nalpha; out[2] += rgbx_init[2] / 255.f * nalpha; } // Output pixel color uint8_t rgbx[4] = { uint8_t(out[0] * 255), uint8_t(out[1] * 255), uint8_t(out[2] * 255), 255 }; surf2Dwrite( *reinterpret_cast<uint32_t*>(rgbx), surf_obj, x * 4, y, hipBoundaryModeZero); // squelches out-of-bound writes } __global__ static void retrieve_cursor_lumisphere_kernel( TreeSpec tree, RenderOptions opt, float* out) { float cen[3]; for (int i = 0; i < 3; ++i) { cen[i] = tree.offset[i] + tree.scale[i] * opt.probe[i]; } float _cube_sz; const half* tree_val; query_single_from_root(tree, cen, &tree_val, &_cube_sz); for (int i = 0; i < tree.data_dim - 1; ++i) { out[i] = __half2float(tree_val[i]); } } } // namespace device __host__ void launch_renderer(const N3Tree& tree, const Camera& cam, const RenderOptions& options, hipArray_t& image_arr, hipArray_t& depth_arr, hipStream_t stream, bool offscreen) { hipSurfaceObject_t surf_obj = 0, surf_obj_depth = 0; float* probe_coeffs = nullptr; if (options.enable_probe) { cuda(Malloc(&probe_coeffs, (tree.data_dim - 1) * sizeof(float))); hipLaunchKernelGGL(( device::retrieve_cursor_lumisphere_kernel), dim3(1), dim3(1), 0, stream, tree, options, probe_coeffs); } { struct hipResourceDesc res_desc; memset(&res_desc, 0, sizeof(res_desc)); res_desc.resType = hipResourceTypeArray; res_desc.res.array.array = image_arr; hipCreateSurfaceObject(&surf_obj, &res_desc); } if (!offscreen) { { struct hipResourceDesc res_desc; memset(&res_desc, 0, sizeof(res_desc)); res_desc.resType = hipResourceTypeArray; res_desc.res.array.array = depth_arr; hipCreateSurfaceObject(&surf_obj_depth, &res_desc); } } // less threads is weirdly faster for me than 1024 // Not sure if this scales to a good GPU const int N_CUDA_THREADS = 320; const int blocks = N_BLOCKS_NEEDED(cam.width * cam.height, N_CUDA_THREADS); hipLaunchKernelGGL(( device::render_kernel), dim3(blocks), dim3(N_CUDA_THREADS), 0, stream, surf_obj, surf_obj_depth, cam, tree, options, probe_coeffs, offscreen); if (options.enable_probe) { hipFree(probe_coeffs); } } } // namespace volrend
79f36e32dada004550f37cf6d0d4f1b472a85393.cu
#include <cstdint> #include <cstdio> #include <ctime> #include <cstring> #include <cuda_fp16.h> #include "volrend/cuda/common.cuh" #include "volrend/cuda/rt_core.cuh" #include "volrend/render_options.hpp" #include "volrend/cuda/data_spec.cuh" namespace volrend { #define MAX3(a, b, c) max(max(a, b), c) #define MIN3(a, b, c) min(min(a, b), c) namespace { template<typename scalar_t> __host__ __device__ __inline__ static void screen2worlddir( int ix, int iy, const CameraSpec& cam, scalar_t* out, scalar_t* cen) { scalar_t xyz[3] ={ (ix - 0.5f * cam.width) / cam.fx, -(iy - 0.5f * cam.height) / cam.fy, -1.0f}; _mv3(cam.transform, xyz, out); _normalize(out); _copy3(cam.transform + 9, cen); } template<typename scalar_t> __host__ __device__ __inline__ void maybe_world2ndc( const TreeSpec& tree, scalar_t* __restrict__ dir, scalar_t* __restrict__ cen) { if (tree.ndc_width <= 0) return; scalar_t t = -(1.f + cen[2]) / dir[2]; for (int i = 0; i < 3; ++i) { cen[i] = cen[i] + t * dir[i]; } dir[0] = -((2 * tree.ndc_focal) / tree.ndc_width) * (dir[0] / dir[2] - cen[0] / cen[2]); dir[1] = -((2 * tree.ndc_focal) / tree.ndc_height) * (dir[1] / dir[2] - cen[1] / cen[2]); dir[2] = -2 / cen[2]; cen[0] = -((2 * tree.ndc_focal) / tree.ndc_width) * (cen[0] / cen[2]); cen[1] = -((2 * tree.ndc_focal) / tree.ndc_height) * (cen[1] / cen[2]); cen[2] = 1 + 2 / cen[2]; _normalize(dir); } template<typename scalar_t> __host__ __device__ __inline__ void rodrigues( const scalar_t* __restrict__ aa, scalar_t* __restrict__ dir) { scalar_t angle = _norm(aa); if (angle < 1e-6) return; scalar_t k[3]; for (int i = 0; i < 3; ++i) k[i] = aa[i] / angle; scalar_t cos_angle = cos(angle), sin_angle = sin(angle); scalar_t cross[3]; _cross3(k, dir, cross); scalar_t dot = _dot3(k, dir); for (int i = 0; i < 3; ++i) { dir[i] = dir[i] * cos_angle + cross[i] * sin_angle + k[i] * dot * (1.0 - cos_angle); } } } // namespace namespace device { // Primary rendering kernel __global__ static void render_kernel( cudaSurfaceObject_t surf_obj, cudaSurfaceObject_t surf_obj_depth, CameraSpec cam, TreeSpec tree, RenderOptions opt, float* probe_coeffs, bool offscreen) { CUDA_GET_THREAD_ID(idx, cam.width * cam.height); const int x = idx % cam.width, y = idx / cam.width; float dir[3], cen[3], out[4]; uint8_t rgbx_init[4]; if (!offscreen) { // Read existing values for compositing (with meshes) surf2Dread(reinterpret_cast<uint32_t*>(rgbx_init), surf_obj, x * 4, y, cudaBoundaryModeZero); } bool enable_draw = tree.N > 0; out[0] = out[1] = out[2] = out[3] = 0.f; if (opt.enable_probe && y < opt.probe_disp_size + 5 && x >= cam.width - opt.probe_disp_size - 5) { // Draw probe circle float basis_fn[VOLREND_GLOBAL_BASIS_MAX]; int xx = x - (cam.width - opt.probe_disp_size) + 5; int yy = y - 5; cen[0] = -(xx / (0.5f * opt.probe_disp_size) - 1.f); cen[1] = (yy / (0.5f * opt.probe_disp_size) - 1.f); float c = cen[0] * cen[0] + cen[1] * cen[1]; if (c <= 1.f) { enable_draw = false; if (tree.data_format.basis_dim >= 0) { cen[2] = -sqrtf(1 - c); _mv3(cam.transform, cen, dir); maybe_precalc_basis(tree, dir, basis_fn); for (int t = 0; t < 3; ++t) { int off = t * tree.data_format.basis_dim; float tmp = 0.f; for (int i = opt.basis_minmax[0]; i <= opt.basis_minmax[1]; ++i) { tmp += basis_fn[i] * probe_coeffs[off + i]; } out[t] = 1.f / (1.f + expf(-tmp)); } out[3] = 1.f; } else { for (int i = 0; i < 3; ++i) out[i] = probe_coeffs[i]; out[3] = 1.f; } } else { out[0] = out[1] = out[2] = 0.f; } } if (enable_draw) { screen2worlddir(x, y, cam, dir, cen); float vdir[3] = {dir[0], dir[1], dir[2]}; maybe_world2ndc(tree, dir, cen); for (int i = 0; i < 3; ++i) { cen[i] = tree.offset[i] + tree.scale[i] * cen[i]; } float t_max = 1e9f; if (!offscreen) { surf2Dread(&t_max, surf_obj_depth, x * sizeof(float), y, cudaBoundaryModeZero); } rodrigues(opt.rot_dirs, vdir); trace_ray(tree, dir, vdir, cen, opt, t_max, out); } // Compositing with existing color const float nalpha = 1.f - out[3]; if (offscreen) { const float remain = opt.background_brightness * nalpha; out[0] += remain; out[1] += remain; out[2] += remain; } else { out[0] += rgbx_init[0] / 255.f * nalpha; out[1] += rgbx_init[1] / 255.f * nalpha; out[2] += rgbx_init[2] / 255.f * nalpha; } // Output pixel color uint8_t rgbx[4] = { uint8_t(out[0] * 255), uint8_t(out[1] * 255), uint8_t(out[2] * 255), 255 }; surf2Dwrite( *reinterpret_cast<uint32_t*>(rgbx), surf_obj, x * 4, y, cudaBoundaryModeZero); // squelches out-of-bound writes } __global__ static void retrieve_cursor_lumisphere_kernel( TreeSpec tree, RenderOptions opt, float* out) { float cen[3]; for (int i = 0; i < 3; ++i) { cen[i] = tree.offset[i] + tree.scale[i] * opt.probe[i]; } float _cube_sz; const half* tree_val; query_single_from_root(tree, cen, &tree_val, &_cube_sz); for (int i = 0; i < tree.data_dim - 1; ++i) { out[i] = __half2float(tree_val[i]); } } } // namespace device __host__ void launch_renderer(const N3Tree& tree, const Camera& cam, const RenderOptions& options, cudaArray_t& image_arr, cudaArray_t& depth_arr, cudaStream_t stream, bool offscreen) { cudaSurfaceObject_t surf_obj = 0, surf_obj_depth = 0; float* probe_coeffs = nullptr; if (options.enable_probe) { cuda(Malloc(&probe_coeffs, (tree.data_dim - 1) * sizeof(float))); device::retrieve_cursor_lumisphere_kernel<<<1, 1, 0, stream>>>( tree, options, probe_coeffs); } { struct cudaResourceDesc res_desc; memset(&res_desc, 0, sizeof(res_desc)); res_desc.resType = cudaResourceTypeArray; res_desc.res.array.array = image_arr; cudaCreateSurfaceObject(&surf_obj, &res_desc); } if (!offscreen) { { struct cudaResourceDesc res_desc; memset(&res_desc, 0, sizeof(res_desc)); res_desc.resType = cudaResourceTypeArray; res_desc.res.array.array = depth_arr; cudaCreateSurfaceObject(&surf_obj_depth, &res_desc); } } // less threads is weirdly faster for me than 1024 // Not sure if this scales to a good GPU const int N_CUDA_THREADS = 320; const int blocks = N_BLOCKS_NEEDED(cam.width * cam.height, N_CUDA_THREADS); device::render_kernel<<<blocks, N_CUDA_THREADS, 0, stream>>>( surf_obj, surf_obj_depth, cam, tree, options, probe_coeffs, offscreen); if (options.enable_probe) { cudaFree(probe_coeffs); } } } // namespace volrend
7c4deb104da7cdefce31c213c7cdfe912135695b.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /** * Andr Luiz Abdalla Silveira 8030353 * Mauricio Luiz Cardoso 6796479 * * Esse programa escrito em CUDA visa criar um algoritmo que gera uma reduo * de matrizes. Cada matriz representada por um vetor e todos esto * reunidos num vetor de vetores. A ideia fazer uma funo que faz uma * comparao entre vetores fazendo o mnimo de operaes */ #include <stdio.h> #include <stdlib.h> #define E 9 // qtde de elementos de cada matriz #define linhaElementos 3 // quantidade de elementos da linha int numMatrizes; __global__ void os_menores(int *d_matrizes, int posLimite, int jump) { int indexIni = threadIdx.x + blockIdx.x * blockDim.x; for(int i = indexIni; i < posLimite; i += jump) if(d_matrizes[indexIni] > d_matrizes[i]) d_matrizes[indexIni] = d_matrizes[i]; } /* Imprime todas as matrizes de dimenso ExE contidas em matrizes*/ void leitura (int *matrizes, int numMats) { int i, k; for (i = 0; i < numMats * linhaElementos; i++) { for (k = 0; k < linhaElementos; k++) printf("%d\t", *(matrizes++)); printf("\n"); if((i+1) % linhaElementos == 0) printf("********************\n"); } } void menorMatriz(int *d_matrizes, int numMats) { if(numMats > 1) { int numBlocks = 0; int numMatResto; int jump = 0; int numThreads = 0; int posLimite; // carga de tamanho de um bloco if(numMats <= E * 10) { numMatResto = 1; numThreads = E; numBlocks = 1; } else { const int numMatThreads = 3; // 3 foi escolhido para que numthreads seja maior multiplo de E(tamanho de cada matriz) e menor que um warp(32) numThreads = E * numMatThreads; int espacoTrabThre = 10 * numThreads; //cada thread devera comparar ate E * 10 matrizes numBlocks = E * numMats / espacoTrabThre; numMatResto = numBlocks * numMatThreads; } posLimite = numMats * E; jump = numBlocks * numThreads; hipLaunchKernelGGL(( os_menores), dim3(numBlocks), dim3(numThreads), 0, 0, d_matrizes, posLimite, jump); hipDeviceSynchronize(); menorMatriz(d_matrizes, numMatResto); } } void encontraMenorMatriz(int* matrizes) { int tam = numMatrizes * E * sizeof(int); int *d_matrizes; // Alloc space for device copies of a, b, c hipMalloc((void **) &d_matrizes, tam); // Copy inputs to device hipMemcpy(d_matrizes, matrizes, tam, hipMemcpyHostToDevice); // encontra menor matriz menorMatriz(d_matrizes, numMatrizes); // Copy result back to host hipMemcpy(matrizes, d_matrizes, tam, hipMemcpyDeviceToHost); hipFree(d_matrizes); } /* Le o arquivo arq que contem matrizes no formato declarado no enunciado e retorna um vetor com todas matrizes lidas*/ int* alocaMatrizesArquivo(FILE *arq){ char asteriscos[10]; int *matrizes, *matrizesAux; fscanf(arq, "%d", &numMatrizes); matrizes = (int *) malloc(E * numMatrizes * sizeof(int)); matrizesAux = matrizes; for(int i = 0; i < numMatrizes; i++) { fscanf(arq, "%s", asteriscos); //pula a linha de asteriscos for(int j = 0; j < E; j++) fscanf(arq, "%d", matrizesAux++); } return matrizes; } int main (int argc, char* argv[]) { if(argc != 2) { printf("Argumento do programa: nome do arquivo\n"); } else { FILE *entrada; entrada = fopen(argv[1], "r"); if (entrada == NULL) { printf("Deu ruim pra abrir o arquivo\n"); return EXIT_FAILURE; } int *matrizes = alocaMatrizesArquivo(entrada); fclose(entrada); encontraMenorMatriz(matrizes); leitura(matrizes, 1); // leitura(get_min(mat, 0, qtde)); free(matrizes); return EXIT_SUCCESS; } }
7c4deb104da7cdefce31c213c7cdfe912135695b.cu
/** * André Luiz Abdalla Silveira 8030353 * Mauricio Luiz Cardoso 6796479 * * Esse programa escrito em CUDA visa criar um algoritmo que gera uma redução * de matrizes. Cada matriz é representada por um vetor e todos estão * reunidos num vetor de vetores. A ideia é fazer uma função que faz uma * comparação entre vetores fazendo o mínimo de operações */ #include <stdio.h> #include <stdlib.h> #define E 9 // qtde de elementos de cada matriz #define linhaElementos 3 // quantidade de elementos da linha int numMatrizes; __global__ void os_menores(int *d_matrizes, int posLimite, int jump) { int indexIni = threadIdx.x + blockIdx.x * blockDim.x; for(int i = indexIni; i < posLimite; i += jump) if(d_matrizes[indexIni] > d_matrizes[i]) d_matrizes[indexIni] = d_matrizes[i]; } /* Imprime todas as matrizes de dimensão ExE contidas em matrizes*/ void leitura (int *matrizes, int numMats) { int i, k; for (i = 0; i < numMats * linhaElementos; i++) { for (k = 0; k < linhaElementos; k++) printf("%d\t", *(matrizes++)); printf("\n"); if((i+1) % linhaElementos == 0) printf("********************\n"); } } void menorMatriz(int *d_matrizes, int numMats) { if(numMats > 1) { int numBlocks = 0; int numMatResto; int jump = 0; int numThreads = 0; int posLimite; // carga de tamanho de um bloco if(numMats <= E * 10) { numMatResto = 1; numThreads = E; numBlocks = 1; } else { const int numMatThreads = 3; // 3 foi escolhido para que numthreads seja maior multiplo de E(tamanho de cada matriz) e menor que um warp(32) numThreads = E * numMatThreads; int espacoTrabThre = 10 * numThreads; //cada thread devera comparar ate E * 10 matrizes numBlocks = E * numMats / espacoTrabThre; numMatResto = numBlocks * numMatThreads; } posLimite = numMats * E; jump = numBlocks * numThreads; os_menores<<<numBlocks, numThreads>>>(d_matrizes, posLimite, jump); cudaDeviceSynchronize(); menorMatriz(d_matrizes, numMatResto); } } void encontraMenorMatriz(int* matrizes) { int tam = numMatrizes * E * sizeof(int); int *d_matrizes; // Alloc space for device copies of a, b, c cudaMalloc((void **) &d_matrizes, tam); // Copy inputs to device cudaMemcpy(d_matrizes, matrizes, tam, cudaMemcpyHostToDevice); // encontra menor matriz menorMatriz(d_matrizes, numMatrizes); // Copy result back to host cudaMemcpy(matrizes, d_matrizes, tam, cudaMemcpyDeviceToHost); cudaFree(d_matrizes); } /* Le o arquivo arq que contem matrizes no formato declarado no enunciado e retorna um vetor com todas matrizes lidas*/ int* alocaMatrizesArquivo(FILE *arq){ char asteriscos[10]; int *matrizes, *matrizesAux; fscanf(arq, "%d", &numMatrizes); matrizes = (int *) malloc(E * numMatrizes * sizeof(int)); matrizesAux = matrizes; for(int i = 0; i < numMatrizes; i++) { fscanf(arq, "%s", asteriscos); //pula a linha de asteriscos for(int j = 0; j < E; j++) fscanf(arq, "%d", matrizesAux++); } return matrizes; } int main (int argc, char* argv[]) { if(argc != 2) { printf("Argumento do programa: nome do arquivo\n"); } else { FILE *entrada; entrada = fopen(argv[1], "r"); if (entrada == NULL) { printf("Deu ruim pra abrir o arquivo\n"); return EXIT_FAILURE; } int *matrizes = alocaMatrizesArquivo(entrada); fclose(entrada); encontraMenorMatriz(matrizes); leitura(matrizes, 1); // leitura(get_min(mat, 0, qtde)); free(matrizes); return EXIT_SUCCESS; } }
8b3b25532ff2b113973ed8e9902908cfc758e88f.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" __device__ void remove_cols(short *deleted_cols, int *col_group, const int conflict_col_id, const int total_dl_matrix_col_num) { for (int i = threadIdx.x; i < total_dl_matrix_col_num; i = i + blockDim.x) { if (col_group[i] == col_group[conflict_col_id]) { deleted_cols[i] = -1; } } } __global__ void remove_cols(int *deleted_cols, int *col_group, const int conflict_col_id, const int total_dl_matrix_col_num) { for (int i = threadIdx.x; i < total_dl_matrix_col_num; i = i + blockDim.x) { if (col_group[i] == col_group[conflict_col_id]) { deleted_cols[i] = -1; } } }
8b3b25532ff2b113973ed8e9902908cfc758e88f.cu
#include "includes.h" __device__ void remove_cols(short *deleted_cols, int *col_group, const int conflict_col_id, const int total_dl_matrix_col_num) { for (int i = threadIdx.x; i < total_dl_matrix_col_num; i = i + blockDim.x) { if (col_group[i] == col_group[conflict_col_id]) { deleted_cols[i] = -1; } } } __global__ void remove_cols(int *deleted_cols, int *col_group, const int conflict_col_id, const int total_dl_matrix_col_num) { for (int i = threadIdx.x; i < total_dl_matrix_col_num; i = i + blockDim.x) { if (col_group[i] == col_group[conflict_col_id]) { deleted_cols[i] = -1; } } }
e27d5925272d420a7f5b133eff79e8fd7f8bb7ab.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" /* * PARA CORRERLO: * $ export LD_LIBRARY_PATH=/usr/local/cuda/lib * $ export PATH=$PATH:/usr/local/cuda/bin * $ nvcc -o matrixTrans matrixTrans.cu -O2 -lc -lm * $ ./matrixTrans n */ /* * UNSIGNED INT --> Tipo de dato para enteros, nmeros sin punto decimal. * Los enteros sin signo pueden ser tan grandes como 65535 * y tan pequeos como 0. * Son almacenados como 16 bits de informacin. * * SIZE_T --> is an unsigned integer type guaranteed to support the longest * object for the platform you use. It is also the result of the * sizeof operator.sizeof returns the size of the type in bytes. * So in your context of question in both cases you pass a * size_t to malloc. */ #define NUMBER_THREADS 32 float elapsed_time_ms; int gpudev = 1; char *dev_mat_in, *dev_mat_out; //--------------------------------------------------------------------------- __global__ void kernelTransposeMatrix(const char *mat_in, char *mat_out, unsigned int rows, unsigned int cols){ unsigned int idx = threadIdx.x + blockIdx.x * blockDim.x; unsigned int idy = threadIdx.y + blockIdx.y * blockDim.y; if (idx < cols && idy < rows) { unsigned int pos = idy * cols + idx; unsigned int trans_pos = idx * rows + idy; mat_out[trans_pos] = mat_in[pos]; } }
e27d5925272d420a7f5b133eff79e8fd7f8bb7ab.cu
#include "includes.h" /* * PARA CORRERLO: * $ export LD_LIBRARY_PATH=/usr/local/cuda/lib * $ export PATH=$PATH:/usr/local/cuda/bin * $ nvcc -o matrixTrans matrixTrans.cu -O2 -lc -lm * $ ./matrixTrans n */ /* * UNSIGNED INT --> Tipo de dato para enteros, números sin punto decimal. * Los enteros sin signo pueden ser tan grandes como 65535 * y tan pequeños como 0. * Son almacenados como 16 bits de información. * * SIZE_T --> is an unsigned integer type guaranteed to support the longest * object for the platform you use. It is also the result of the * sizeof operator.sizeof returns the size of the type in bytes. * So in your context of question in both cases you pass a * size_t to malloc. */ #define NUMBER_THREADS 32 float elapsed_time_ms; int gpudev = 1; char *dev_mat_in, *dev_mat_out; //--------------------------------------------------------------------------- __global__ void kernelTransposeMatrix(const char *mat_in, char *mat_out, unsigned int rows, unsigned int cols){ unsigned int idx = threadIdx.x + blockIdx.x * blockDim.x; unsigned int idy = threadIdx.y + blockIdx.y * blockDim.y; if (idx < cols && idy < rows) { unsigned int pos = idy * cols + idx; unsigned int trans_pos = idx * rows + idy; mat_out[trans_pos] = mat_in[pos]; } }
1752e0fa119852259ddda6d88295c70823eb0ee5.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* It is instructive to compare zero-copy and unified memory. For the former, the memory is allocated in page-locked fashion on the host. A device thread has to reach out to get the data. No guarantee of coherence is provided as, for instance, the host could change the content of the pinned memory while the device reads its content. For UM, the memory is allocated on the device and transparently made available where needed. Specifically, upon a call to hipError_t hipMallocManaged(void** devPtr, size_t size, unsigned int flag); the user has, in devPtr, a pointer to an address of a chunk of device memory. This address can be equally well manipulated on the device and the host (although, as illustrated below, not simultaneously). Note that hipMallocManaged and hipMalloc are semantically identical; in fact, the former can be used anywhere the latter is used. UM enables a single-pointer-to-data memory model. For instance, the same pointer can be used on the host in a memcpy operation to copy a set of integers to an array mA, and then on the device to alter, just like in the code snippet above, the value of each entry in mA. The data in mA will be coherent as long as the host does not touch entries in mA when the GPU executes a kernel. The host can safely operate with/on mA only after a hipDeviceSynchronize call. Failure to obey this rule will lead to a segfault, as illustrated in the following example lifted from the CUDA Programming guide. */ #include <iostream> #include <cmath> const int ARRAY_SIZE = 1000; __global__ void increment(double* aArray, double val, unsigned int sz) { unsigned int indx = blockIdx.x * blockDim.x + threadIdx.x; if (indx < sz) aArray[indx] += val; } int main() { double* mA; hipMallocManaged(&mA, ARRAY_SIZE * sizeof(double)); for (int i = 0; i < ARRAY_SIZE; i++) mA[i] = 1.*i; double inc_val = 2.0; hipLaunchKernelGGL(( increment), dim3(2), dim3(512), 0, 0, mA, inc_val, ARRAY_SIZE); hipDeviceSynchronize(); double error = 0.; for (int i = 0; i < ARRAY_SIZE; i++) error += ::fabs(mA[i] - (i + inc_val)); for (int i=0;i<ARRAY_SIZE;++i){ std::cout<<mA[i]<<' '; } std::cout<<'\n'; std::cout << "Test: " << (error < 1.E-9 ? "Passed" : "Failed") << std::endl; hipFree(mA); return 0; }
1752e0fa119852259ddda6d88295c70823eb0ee5.cu
/* It is instructive to compare zero-copy and unified memory. For the former, the memory is allocated in page-locked fashion on the host. A device thread has to reach out to get the data. No guarantee of coherence is provided as, for instance, the host could change the content of the pinned memory while the device reads its content. For UM, the memory is allocated on the device and transparently made available where needed. Specifically, upon a call to cudaError_t cudaMallocManaged(void** devPtr, size_t size, unsigned int flag); the user has, in devPtr, a pointer to an address of a chunk of device memory. This address can be equally well manipulated on the device and the host (although, as illustrated below, not simultaneously). Note that cudaMallocManaged and cudaMalloc are semantically identical; in fact, the former can be used anywhere the latter is used. UM enables a “single-pointer-to-data” memory model. For instance, the same pointer can be used on the host in a memcpy operation to copy a set of integers to an array mA, and then on the device to alter, just like in the code snippet above, the value of each entry in mA. The data in mA will be coherent as long as the host does not touch entries in mA when the GPU executes a kernel. The host can safely operate with/on mA only after a cudaDeviceSynchronize call. Failure to obey this rule will lead to a segfault, as illustrated in the following example lifted from the CUDA Programming guide. */ #include <iostream> #include <cmath> const int ARRAY_SIZE = 1000; __global__ void increment(double* aArray, double val, unsigned int sz) { unsigned int indx = blockIdx.x * blockDim.x + threadIdx.x; if (indx < sz) aArray[indx] += val; } int main() { double* mA; cudaMallocManaged(&mA, ARRAY_SIZE * sizeof(double)); for (int i = 0; i < ARRAY_SIZE; i++) mA[i] = 1.*i; double inc_val = 2.0; increment<<<2, 512>>>(mA, inc_val, ARRAY_SIZE); cudaDeviceSynchronize(); double error = 0.; for (int i = 0; i < ARRAY_SIZE; i++) error += std::fabs(mA[i] - (i + inc_val)); for (int i=0;i<ARRAY_SIZE;++i){ std::cout<<mA[i]<<' '; } std::cout<<'\n'; std::cout << "Test: " << (error < 1.E-9 ? "Passed" : "Failed") << std::endl; cudaFree(mA); return 0; }
a446ac154c971164a570fa93203c794d549d85cf.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // modified from mmdetection #include "modulated_deform_conv_3d_cuda_kernel.cuh" using namespace at; #ifndef AT_CHECK #define AT_CHECK TORCH_CHECK #endif void modulated_deformable_3d_im2col_cuda( const at::Tensor data_im, const at::Tensor data_offset, const at::Tensor data_mask, const int batch_size, const int channels, const int depth_im, const int height_im, const int width_im, const int depth_col, const int height_col, const int width_col, const int kernel_d, const int kernel_h, const int kernel_w, const int pad_d, const int pad_h, const int pad_w, const int stride_d, const int stride_h, const int stride_w, const int dilation_d, const int dilation_h, const int dilation_w, const int deformable_group, at::Tensor data_col) { // num_axes should be smaller than block size const int channel_per_deformable_group = channels / deformable_group; const int num_kernels = channels * batch_size * depth_col * height_col * width_col; AT_DISPATCH_FLOATING_TYPES_AND_HALF( data_im.scalar_type(), "modulated_deformable_im2col_gpu", ([&] { const scalar_t *data_im_ = data_im.data<scalar_t>(); const scalar_t *data_offset_ = data_offset.data<scalar_t>(); const scalar_t *data_mask_ = data_mask.data<scalar_t>(); scalar_t *data_col_ = data_col.data<scalar_t>(); hipLaunchKernelGGL(( modulated_deformable_im2col_gpu_kernel), dim3(GET_BLOCKS(num_kernels)), dim3(CUDA_NUM_THREADS), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), num_kernels, data_im_, data_offset_, data_mask_, depth_im, height_im, width_im, kernel_d, kernel_h, kernel_w, pad_d, pad_h, pad_w, stride_d, stride_h, stride_w, dilation_d, dilation_h, dilation_w, channel_per_deformable_group, batch_size, channels, deformable_group, depth_col, height_col, width_col, data_col_); })); hipError_t err = hipGetLastError(); if (err != hipSuccess) { printf("error in modulated_deformable_im2col_cuda: %s\n", hipGetErrorString(err)); } } void modulated_deformable_3d_col2im_cuda( const at::Tensor data_col, const at::Tensor data_offset, const at::Tensor data_mask, const int batch_size, const int channels, const int depth_im, const int height_im, const int width_im, const int depth_col, const int height_col, const int width_col, const int kernel_d, const int kernel_h, const int kernel_w, const int pad_d, const int pad_h, const int pad_w, const int stride_d, const int stride_h, const int stride_w, const int dilation_d, const int dilation_h, const int dilation_w, const int deformable_group, at::Tensor grad_im) { const int channel_per_deformable_group = channels / deformable_group; const int num_kernels = channels * kernel_h * kernel_w * batch_size * height_col * width_col; AT_DISPATCH_FLOATING_TYPES_AND_HALF( data_col.scalar_type(), "modulated_deformable_col2im_gpu", ([&] { const scalar_t *data_col_ = data_col.data<scalar_t>(); const scalar_t *data_offset_ = data_offset.data<scalar_t>(); const scalar_t *data_mask_ = data_mask.data<scalar_t>(); scalar_t *grad_im_ = grad_im.data<scalar_t>(); hipLaunchKernelGGL(( modulated_deformable_col2im_gpu_kernel) , dim3(GET_BLOCKS(num_kernels)), dim3(CUDA_NUM_THREADS), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), num_kernels, data_col_, data_offset_, data_mask_, channels, depth_im, height_im, width_im, kernel_d, kernel_h, kernel_w, pad_d, pad_h, pad_w, stride_d, stride_h, stride_w, dilation_d, dilation_h, dilation_w, channel_per_deformable_group, batch_size, deformable_group, depth_col, height_col, width_col, grad_im_); })); hipError_t err = hipGetLastError(); if (err != hipSuccess) { printf("error in modulated_deformable_col2im_cuda: %s\n", hipGetErrorString(err)); } } void modulated_deformable_3d_col2im_coord_cuda( const at::Tensor data_col, const at::Tensor data_im, const at::Tensor data_offset, const at::Tensor data_mask, const int batch_size, const int channels, const int depth_im, const int height_im, const int width_im, const int depth_col, const int height_col, const int width_col, const int kernel_d, const int kernel_h, const int kernel_w, const int pad_d, const int pad_h, const int pad_w, const int stride_d, const int stride_h, const int stride_w, const int dilation_d, const int dilation_h, const int dilation_w, const int deformable_group, at::Tensor grad_offset, at::Tensor grad_mask) { const int channel_per_deformable_group = channels * kernel_d * kernel_h * kernel_w / deformable_group; const int num_kernels = batch_size * depth_col * height_col * width_col * 3 * kernel_d * kernel_h * kernel_w * deformable_group; AT_DISPATCH_FLOATING_TYPES_AND_HALF( data_col.scalar_type(), "modulated_deformable_col2im_coord_gpu", ([&] { const scalar_t *data_col_ = data_col.data<scalar_t>(); const scalar_t *data_im_ = data_im.data<scalar_t>(); const scalar_t *data_offset_ = data_offset.data<scalar_t>(); const scalar_t *data_mask_ = data_mask.data<scalar_t>(); scalar_t *grad_offset_ = grad_offset.data<scalar_t>(); scalar_t *grad_mask_ = grad_mask.data<scalar_t>(); hipLaunchKernelGGL(( modulated_deformable_col2im_coord_gpu_kernel) , dim3(GET_BLOCKS(num_kernels)), dim3(CUDA_NUM_THREADS), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), num_kernels, data_col_, data_im_, data_offset_, data_mask_, channels, depth_im, height_im, width_im, kernel_d, kernel_h, kernel_w, pad_d, pad_h, pad_w, stride_d, stride_h, stride_w, dilation_d, dilation_h, dilation_w, channel_per_deformable_group, batch_size, 3 * kernel_d * kernel_h * kernel_w * deformable_group, deformable_group, depth_col, height_col, width_col, grad_offset_, grad_mask_); })); hipError_t err = hipGetLastError(); if (err != hipSuccess) { printf("error in modulated_deformable_col2im_coord_cuda: %s\n", hipGetErrorString(err)); } } void modulated_deform_conv_3d_cuda_forward( at::Tensor input, at::Tensor weight, at::Tensor bias, at::Tensor ones, at::Tensor offset, at::Tensor mask, at::Tensor output, at::Tensor columns, int kernel_d, int kernel_h, int kernel_w, const int stride_d, const int stride_h, const int stride_w, const int pad_d, const int pad_h, const int pad_w, const int dilation_d, const int dilation_h, const int dilation_w, const int group, const int deformable_group, const bool with_bias) { AT_CHECK(input.is_contiguous(), "input tensor has to be contiguous"); AT_CHECK(weight.is_contiguous(), "weight tensor has to be contiguous"); at::DeviceGuard guard(input.device()); const int batch = input.size(0); const int channels = input.size(1); const int depth = input.size(2); const int height = input.size(3); const int width = input.size(4); const int channels_out = weight.size(0); const int channels_kernel = weight.size(1); const int kernel_d_ = weight.size(2); const int kernel_h_ = weight.size(3); const int kernel_w_ = weight.size(4); if (kernel_h_ != kernel_h || kernel_w_ != kernel_w || kernel_d_ != kernel_d) AT_ERROR("Input shape and kernel shape wont match: (%d x %d x %d vs %d x %d x %d).", kernel_h, kernel_w, kernel_d, kernel_h_, kernel_w_, kernel_d_); if (channels != channels_kernel * group) AT_ERROR("Input shape and kernel channels wont match: (%d vs %d).", channels, channels_kernel * group); const int depth_out = (depth + 2 * pad_d - (dilation_d * (kernel_d - 1) + 1)) / stride_d + 1; const int height_out = (height + 2 * pad_h - (dilation_h * (kernel_h - 1) + 1)) / stride_h + 1; const int width_out = (width + 2 * pad_w - (dilation_w * (kernel_w - 1) + 1)) / stride_w + 1; if (ones.ndimension() != 3 || ones.size(0) * ones.size(1) * ones.size(2) < depth_out * height_out * width_out) { // Resize plane and fill with ones... ones = at::ones({depth_out, height_out, width_out}, input.options()); } // resize output output = output.view({batch, channels_out, depth_out, height_out, width_out}).zero_(); // resize temporary columns columns = at::zeros({channels * kernel_d * kernel_h * kernel_w, 1 * depth_out * height_out * width_out}, input.options()); output = output.view({output.size(0), group, output.size(1) / group, output.size(2), output.size(3), output.size(4)}); for (int b = 0; b < batch; b++) { modulated_deformable_3d_im2col_cuda( input[b], offset[b], mask[b], 1, channels, depth, height, width, depth_out, height_out, width_out, kernel_d, kernel_h, kernel_w, pad_d, pad_h, pad_w, stride_d, stride_h, stride_w, dilation_d, dilation_h, dilation_w, deformable_group, columns); // divide into group weight = weight.view({group, weight.size(0) / group, weight.size(1), weight.size(2), weight.size(3), weight.size(4)}); columns = columns.view({group, columns.size(0) / group, columns.size(1)}); for (int g = 0; g < group; g++) { output[b][g] = output[b][g] .flatten(1) .addmm_(weight[g].flatten(1), columns[g]) .view_as(output[b][g]); } weight = weight.view({weight.size(0) * weight.size(1), weight.size(2), weight.size(3), weight.size(4), weight.size(5)}); columns = columns.view({columns.size(0) * columns.size(1), columns.size(2)}); } output = output.view({output.size(0), output.size(1) * output.size(2), output.size(3), output.size(4), output.size(5)}); if (with_bias) { output += bias.view({1, bias.size(0), 1, 1, 1}); } } void modulated_deform_conv_3d_cuda_backward( at::Tensor input, at::Tensor weight, at::Tensor bias, at::Tensor ones, at::Tensor offset, at::Tensor mask, at::Tensor columns, at::Tensor grad_input, at::Tensor grad_weight, at::Tensor grad_bias, at::Tensor grad_offset, at::Tensor grad_mask, at::Tensor grad_output, int kernel_d, int kernel_h, int kernel_w, int stride_d, int stride_h, int stride_w, int pad_d, int pad_h, int pad_w, int dilation_d, int dilation_h, int dilation_w, int group, int deformable_group, const bool with_bias) { AT_CHECK(input.is_contiguous(), "input tensor has to be contiguous"); AT_CHECK(weight.is_contiguous(), "weight tensor has to be contiguous"); at::DeviceGuard guard(input.device()); const int batch = input.size(0); const int channels = input.size(1); const int depth = input.size(2); const int height = input.size(3); const int width = input.size(4); const int channels_kernel = weight.size(1); const int kernel_d_ = weight.size(2); const int kernel_h_ = weight.size(3); const int kernel_w_ = weight.size(4); if (kernel_h_ != kernel_h || kernel_w_ != kernel_w || kernel_d_ != kernel_d) AT_ERROR("Input shape and kernel shape wont match: (%d x %d x %d vs %d x %d x %d).", kernel_h, kernel_w, kernel_d, kernel_h_, kernel_w_, kernel_d_); if (channels != channels_kernel * group) AT_ERROR("Input shape and kernel channels wont match: (%d vs %d).", channels, channels_kernel * group); const int depth_out = (depth + 2 * pad_d - (dilation_d * (kernel_d - 1) + 1)) / stride_d + 1; const int height_out = (height + 2 * pad_h - (dilation_h * (kernel_h - 1) + 1)) / stride_h + 1; const int width_out = (width + 2 * pad_w - (dilation_w * (kernel_w - 1) + 1)) / stride_w + 1; if (ones.ndimension() != 3 || ones.size(0) * ones.size(1) * ones.size(2) < depth_out * height_out * width_out) { // Resize plane and fill with ones... ones = at::ones({depth_out, height_out, width_out}, input.options()); } grad_input = grad_input.view({batch, channels, depth, height, width}); columns = at::zeros({channels * kernel_d * kernel_h * kernel_w, depth_out * height_out * width_out}, input.options()); grad_output = grad_output.view({grad_output.size(0), group, grad_output.size(1) / group, grad_output.size(2), grad_output.size(3), grad_output.size(4)}); for (int b = 0; b < batch; b++) { // divide int group columns = columns.view({group, columns.size(0) / group, columns.size(1)}); weight = weight.view({group, weight.size(0) / group, weight.size(1), weight.size(2), weight.size(3), weight.size(4)}); for (int g = 0; g < group; g++) { columns[g].addmm_(weight[g].flatten(1).transpose(0, 1), grad_output[b][g].flatten(1), 0.0f, 1.0f); } columns = columns.view({columns.size(0) * columns.size(1), columns.size(2)}); weight = weight.view({weight.size(0) * weight.size(1), weight.size(2), weight.size(3), weight.size(4), weight.size(5)}); // gradient w.r.t. input coordinate data modulated_deformable_3d_col2im_coord_cuda( columns, input[b], offset[b], mask[b], 1, channels, depth, height, width, depth_out, height_out, width_out, kernel_d, kernel_h, kernel_w, pad_d, pad_h, pad_w, stride_d, stride_h, stride_w, dilation_d, dilation_h, dilation_w, deformable_group, grad_offset[b], grad_mask[b]); // gradient w.r.t. input data modulated_deformable_3d_col2im_cuda( columns, offset[b], mask[b], 1, channels, depth, height, width, depth_out, height_out, width_out, kernel_d, kernel_h, kernel_w, pad_d, pad_h, pad_w, stride_d, stride_h, stride_w, dilation_d, dilation_h, dilation_w, deformable_group, grad_input[b]); // gradient w.r.t. weight, dWeight should accumulate across the batch and // group modulated_deformable_3d_im2col_cuda( input[b], offset[b], mask[b], 1, channels, depth, height, width, depth_out, height_out, width_out, kernel_d, kernel_h, kernel_w, pad_d, pad_h, pad_w, stride_d, stride_h, stride_w, dilation_d, dilation_h, dilation_w, deformable_group, columns); columns = columns.view({group, columns.size(0) / group, columns.size(1)}); grad_weight = grad_weight.view({group, grad_weight.size(0) / group, grad_weight.size(1), grad_weight.size(2), grad_weight.size(3), grad_weight.size(4)}); if (with_bias) grad_bias = grad_bias.view({group, grad_bias.size(0) / group}); for (int g = 0; g < group; g++) { grad_weight[g] = grad_weight[g] .flatten(1) .addmm_(grad_output[b][g].flatten(1), columns[g].transpose(0, 1)) .view_as(grad_weight[g]); if (with_bias) { grad_bias[g] = grad_bias[g] .view({-1, 1}) .addmm_(grad_output[b][g].flatten(1), ones.view({-1, 1})) .view(-1); } } columns = columns.view({columns.size(0) * columns.size(1), columns.size(2)}); grad_weight = grad_weight.view({grad_weight.size(0) * grad_weight.size(1), grad_weight.size(2), grad_weight.size(3), grad_weight.size(4), grad_weight.size(5)}); if (with_bias) grad_bias = grad_bias.view({grad_bias.size(0) * grad_bias.size(1)}); } grad_output = grad_output.view({grad_output.size(0) * grad_output.size(1), grad_output.size(2), grad_output.size(3), grad_output.size(4), grad_output.size(5)}); }
a446ac154c971164a570fa93203c794d549d85cf.cu
// modified from mmdetection #include "modulated_deform_conv_3d_cuda_kernel.cuh" using namespace at; #ifndef AT_CHECK #define AT_CHECK TORCH_CHECK #endif void modulated_deformable_3d_im2col_cuda( const at::Tensor data_im, const at::Tensor data_offset, const at::Tensor data_mask, const int batch_size, const int channels, const int depth_im, const int height_im, const int width_im, const int depth_col, const int height_col, const int width_col, const int kernel_d, const int kernel_h, const int kernel_w, const int pad_d, const int pad_h, const int pad_w, const int stride_d, const int stride_h, const int stride_w, const int dilation_d, const int dilation_h, const int dilation_w, const int deformable_group, at::Tensor data_col) { // num_axes should be smaller than block size const int channel_per_deformable_group = channels / deformable_group; const int num_kernels = channels * batch_size * depth_col * height_col * width_col; AT_DISPATCH_FLOATING_TYPES_AND_HALF( data_im.scalar_type(), "modulated_deformable_im2col_gpu", ([&] { const scalar_t *data_im_ = data_im.data<scalar_t>(); const scalar_t *data_offset_ = data_offset.data<scalar_t>(); const scalar_t *data_mask_ = data_mask.data<scalar_t>(); scalar_t *data_col_ = data_col.data<scalar_t>(); modulated_deformable_im2col_gpu_kernel<<<GET_BLOCKS(num_kernels), CUDA_NUM_THREADS, 0, at::cuda::getCurrentCUDAStream()>>>( num_kernels, data_im_, data_offset_, data_mask_, depth_im, height_im, width_im, kernel_d, kernel_h, kernel_w, pad_d, pad_h, pad_w, stride_d, stride_h, stride_w, dilation_d, dilation_h, dilation_w, channel_per_deformable_group, batch_size, channels, deformable_group, depth_col, height_col, width_col, data_col_); })); cudaError_t err = cudaGetLastError(); if (err != cudaSuccess) { printf("error in modulated_deformable_im2col_cuda: %s\n", cudaGetErrorString(err)); } } void modulated_deformable_3d_col2im_cuda( const at::Tensor data_col, const at::Tensor data_offset, const at::Tensor data_mask, const int batch_size, const int channels, const int depth_im, const int height_im, const int width_im, const int depth_col, const int height_col, const int width_col, const int kernel_d, const int kernel_h, const int kernel_w, const int pad_d, const int pad_h, const int pad_w, const int stride_d, const int stride_h, const int stride_w, const int dilation_d, const int dilation_h, const int dilation_w, const int deformable_group, at::Tensor grad_im) { const int channel_per_deformable_group = channels / deformable_group; const int num_kernels = channels * kernel_h * kernel_w * batch_size * height_col * width_col; AT_DISPATCH_FLOATING_TYPES_AND_HALF( data_col.scalar_type(), "modulated_deformable_col2im_gpu", ([&] { const scalar_t *data_col_ = data_col.data<scalar_t>(); const scalar_t *data_offset_ = data_offset.data<scalar_t>(); const scalar_t *data_mask_ = data_mask.data<scalar_t>(); scalar_t *grad_im_ = grad_im.data<scalar_t>(); modulated_deformable_col2im_gpu_kernel <<<GET_BLOCKS(num_kernels), CUDA_NUM_THREADS, 0, at::cuda::getCurrentCUDAStream()>>>( num_kernels, data_col_, data_offset_, data_mask_, channels, depth_im, height_im, width_im, kernel_d, kernel_h, kernel_w, pad_d, pad_h, pad_w, stride_d, stride_h, stride_w, dilation_d, dilation_h, dilation_w, channel_per_deformable_group, batch_size, deformable_group, depth_col, height_col, width_col, grad_im_); })); cudaError_t err = cudaGetLastError(); if (err != cudaSuccess) { printf("error in modulated_deformable_col2im_cuda: %s\n", cudaGetErrorString(err)); } } void modulated_deformable_3d_col2im_coord_cuda( const at::Tensor data_col, const at::Tensor data_im, const at::Tensor data_offset, const at::Tensor data_mask, const int batch_size, const int channels, const int depth_im, const int height_im, const int width_im, const int depth_col, const int height_col, const int width_col, const int kernel_d, const int kernel_h, const int kernel_w, const int pad_d, const int pad_h, const int pad_w, const int stride_d, const int stride_h, const int stride_w, const int dilation_d, const int dilation_h, const int dilation_w, const int deformable_group, at::Tensor grad_offset, at::Tensor grad_mask) { const int channel_per_deformable_group = channels * kernel_d * kernel_h * kernel_w / deformable_group; const int num_kernels = batch_size * depth_col * height_col * width_col * 3 * kernel_d * kernel_h * kernel_w * deformable_group; AT_DISPATCH_FLOATING_TYPES_AND_HALF( data_col.scalar_type(), "modulated_deformable_col2im_coord_gpu", ([&] { const scalar_t *data_col_ = data_col.data<scalar_t>(); const scalar_t *data_im_ = data_im.data<scalar_t>(); const scalar_t *data_offset_ = data_offset.data<scalar_t>(); const scalar_t *data_mask_ = data_mask.data<scalar_t>(); scalar_t *grad_offset_ = grad_offset.data<scalar_t>(); scalar_t *grad_mask_ = grad_mask.data<scalar_t>(); modulated_deformable_col2im_coord_gpu_kernel <<<GET_BLOCKS(num_kernels), CUDA_NUM_THREADS, 0, at::cuda::getCurrentCUDAStream()>>>( num_kernels, data_col_, data_im_, data_offset_, data_mask_, channels, depth_im, height_im, width_im, kernel_d, kernel_h, kernel_w, pad_d, pad_h, pad_w, stride_d, stride_h, stride_w, dilation_d, dilation_h, dilation_w, channel_per_deformable_group, batch_size, 3 * kernel_d * kernel_h * kernel_w * deformable_group, deformable_group, depth_col, height_col, width_col, grad_offset_, grad_mask_); })); cudaError_t err = cudaGetLastError(); if (err != cudaSuccess) { printf("error in modulated_deformable_col2im_coord_cuda: %s\n", cudaGetErrorString(err)); } } void modulated_deform_conv_3d_cuda_forward( at::Tensor input, at::Tensor weight, at::Tensor bias, at::Tensor ones, at::Tensor offset, at::Tensor mask, at::Tensor output, at::Tensor columns, int kernel_d, int kernel_h, int kernel_w, const int stride_d, const int stride_h, const int stride_w, const int pad_d, const int pad_h, const int pad_w, const int dilation_d, const int dilation_h, const int dilation_w, const int group, const int deformable_group, const bool with_bias) { AT_CHECK(input.is_contiguous(), "input tensor has to be contiguous"); AT_CHECK(weight.is_contiguous(), "weight tensor has to be contiguous"); at::DeviceGuard guard(input.device()); const int batch = input.size(0); const int channels = input.size(1); const int depth = input.size(2); const int height = input.size(3); const int width = input.size(4); const int channels_out = weight.size(0); const int channels_kernel = weight.size(1); const int kernel_d_ = weight.size(2); const int kernel_h_ = weight.size(3); const int kernel_w_ = weight.size(4); if (kernel_h_ != kernel_h || kernel_w_ != kernel_w || kernel_d_ != kernel_d) AT_ERROR("Input shape and kernel shape wont match: (%d x %d x %d vs %d x %d x %d).", kernel_h, kernel_w, kernel_d, kernel_h_, kernel_w_, kernel_d_); if (channels != channels_kernel * group) AT_ERROR("Input shape and kernel channels wont match: (%d vs %d).", channels, channels_kernel * group); const int depth_out = (depth + 2 * pad_d - (dilation_d * (kernel_d - 1) + 1)) / stride_d + 1; const int height_out = (height + 2 * pad_h - (dilation_h * (kernel_h - 1) + 1)) / stride_h + 1; const int width_out = (width + 2 * pad_w - (dilation_w * (kernel_w - 1) + 1)) / stride_w + 1; if (ones.ndimension() != 3 || ones.size(0) * ones.size(1) * ones.size(2) < depth_out * height_out * width_out) { // Resize plane and fill with ones... ones = at::ones({depth_out, height_out, width_out}, input.options()); } // resize output output = output.view({batch, channels_out, depth_out, height_out, width_out}).zero_(); // resize temporary columns columns = at::zeros({channels * kernel_d * kernel_h * kernel_w, 1 * depth_out * height_out * width_out}, input.options()); output = output.view({output.size(0), group, output.size(1) / group, output.size(2), output.size(3), output.size(4)}); for (int b = 0; b < batch; b++) { modulated_deformable_3d_im2col_cuda( input[b], offset[b], mask[b], 1, channels, depth, height, width, depth_out, height_out, width_out, kernel_d, kernel_h, kernel_w, pad_d, pad_h, pad_w, stride_d, stride_h, stride_w, dilation_d, dilation_h, dilation_w, deformable_group, columns); // divide into group weight = weight.view({group, weight.size(0) / group, weight.size(1), weight.size(2), weight.size(3), weight.size(4)}); columns = columns.view({group, columns.size(0) / group, columns.size(1)}); for (int g = 0; g < group; g++) { output[b][g] = output[b][g] .flatten(1) .addmm_(weight[g].flatten(1), columns[g]) .view_as(output[b][g]); } weight = weight.view({weight.size(0) * weight.size(1), weight.size(2), weight.size(3), weight.size(4), weight.size(5)}); columns = columns.view({columns.size(0) * columns.size(1), columns.size(2)}); } output = output.view({output.size(0), output.size(1) * output.size(2), output.size(3), output.size(4), output.size(5)}); if (with_bias) { output += bias.view({1, bias.size(0), 1, 1, 1}); } } void modulated_deform_conv_3d_cuda_backward( at::Tensor input, at::Tensor weight, at::Tensor bias, at::Tensor ones, at::Tensor offset, at::Tensor mask, at::Tensor columns, at::Tensor grad_input, at::Tensor grad_weight, at::Tensor grad_bias, at::Tensor grad_offset, at::Tensor grad_mask, at::Tensor grad_output, int kernel_d, int kernel_h, int kernel_w, int stride_d, int stride_h, int stride_w, int pad_d, int pad_h, int pad_w, int dilation_d, int dilation_h, int dilation_w, int group, int deformable_group, const bool with_bias) { AT_CHECK(input.is_contiguous(), "input tensor has to be contiguous"); AT_CHECK(weight.is_contiguous(), "weight tensor has to be contiguous"); at::DeviceGuard guard(input.device()); const int batch = input.size(0); const int channels = input.size(1); const int depth = input.size(2); const int height = input.size(3); const int width = input.size(4); const int channels_kernel = weight.size(1); const int kernel_d_ = weight.size(2); const int kernel_h_ = weight.size(3); const int kernel_w_ = weight.size(4); if (kernel_h_ != kernel_h || kernel_w_ != kernel_w || kernel_d_ != kernel_d) AT_ERROR("Input shape and kernel shape wont match: (%d x %d x %d vs %d x %d x %d).", kernel_h, kernel_w, kernel_d, kernel_h_, kernel_w_, kernel_d_); if (channels != channels_kernel * group) AT_ERROR("Input shape and kernel channels wont match: (%d vs %d).", channels, channels_kernel * group); const int depth_out = (depth + 2 * pad_d - (dilation_d * (kernel_d - 1) + 1)) / stride_d + 1; const int height_out = (height + 2 * pad_h - (dilation_h * (kernel_h - 1) + 1)) / stride_h + 1; const int width_out = (width + 2 * pad_w - (dilation_w * (kernel_w - 1) + 1)) / stride_w + 1; if (ones.ndimension() != 3 || ones.size(0) * ones.size(1) * ones.size(2) < depth_out * height_out * width_out) { // Resize plane and fill with ones... ones = at::ones({depth_out, height_out, width_out}, input.options()); } grad_input = grad_input.view({batch, channels, depth, height, width}); columns = at::zeros({channels * kernel_d * kernel_h * kernel_w, depth_out * height_out * width_out}, input.options()); grad_output = grad_output.view({grad_output.size(0), group, grad_output.size(1) / group, grad_output.size(2), grad_output.size(3), grad_output.size(4)}); for (int b = 0; b < batch; b++) { // divide int group columns = columns.view({group, columns.size(0) / group, columns.size(1)}); weight = weight.view({group, weight.size(0) / group, weight.size(1), weight.size(2), weight.size(3), weight.size(4)}); for (int g = 0; g < group; g++) { columns[g].addmm_(weight[g].flatten(1).transpose(0, 1), grad_output[b][g].flatten(1), 0.0f, 1.0f); } columns = columns.view({columns.size(0) * columns.size(1), columns.size(2)}); weight = weight.view({weight.size(0) * weight.size(1), weight.size(2), weight.size(3), weight.size(4), weight.size(5)}); // gradient w.r.t. input coordinate data modulated_deformable_3d_col2im_coord_cuda( columns, input[b], offset[b], mask[b], 1, channels, depth, height, width, depth_out, height_out, width_out, kernel_d, kernel_h, kernel_w, pad_d, pad_h, pad_w, stride_d, stride_h, stride_w, dilation_d, dilation_h, dilation_w, deformable_group, grad_offset[b], grad_mask[b]); // gradient w.r.t. input data modulated_deformable_3d_col2im_cuda( columns, offset[b], mask[b], 1, channels, depth, height, width, depth_out, height_out, width_out, kernel_d, kernel_h, kernel_w, pad_d, pad_h, pad_w, stride_d, stride_h, stride_w, dilation_d, dilation_h, dilation_w, deformable_group, grad_input[b]); // gradient w.r.t. weight, dWeight should accumulate across the batch and // group modulated_deformable_3d_im2col_cuda( input[b], offset[b], mask[b], 1, channels, depth, height, width, depth_out, height_out, width_out, kernel_d, kernel_h, kernel_w, pad_d, pad_h, pad_w, stride_d, stride_h, stride_w, dilation_d, dilation_h, dilation_w, deformable_group, columns); columns = columns.view({group, columns.size(0) / group, columns.size(1)}); grad_weight = grad_weight.view({group, grad_weight.size(0) / group, grad_weight.size(1), grad_weight.size(2), grad_weight.size(3), grad_weight.size(4)}); if (with_bias) grad_bias = grad_bias.view({group, grad_bias.size(0) / group}); for (int g = 0; g < group; g++) { grad_weight[g] = grad_weight[g] .flatten(1) .addmm_(grad_output[b][g].flatten(1), columns[g].transpose(0, 1)) .view_as(grad_weight[g]); if (with_bias) { grad_bias[g] = grad_bias[g] .view({-1, 1}) .addmm_(grad_output[b][g].flatten(1), ones.view({-1, 1})) .view(-1); } } columns = columns.view({columns.size(0) * columns.size(1), columns.size(2)}); grad_weight = grad_weight.view({grad_weight.size(0) * grad_weight.size(1), grad_weight.size(2), grad_weight.size(3), grad_weight.size(4), grad_weight.size(5)}); if (with_bias) grad_bias = grad_bias.view({grad_bias.size(0) * grad_bias.size(1)}); } grad_output = grad_output.view({grad_output.size(0) * grad_output.size(1), grad_output.size(2), grad_output.size(3), grad_output.size(4), grad_output.size(5)}); }
761764153ebf5de54e0852acf6d7624a38ed80c6.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include<stdio.h> #include<stdlib.h> #include<math.h> #include<cuda.h> #include<hip/hip_runtime_api.h> __global__ void vecSum(double* devIn, int pow_step, int n) { //The thread ID (including its block ID) int i = blockIdx.x * blockDim.x + threadIdx.x; //Safety check to prevent unwanted threads. if(pow_step*i < n) //The two 'adjacent' elements of the array (or //the two children in the segment tree) are added and //the result is stored in the first element. devIn[pow_step*i] = devIn[pow_step*i+(pow_step/2)] + devIn[pow_step*i]; } int main() { //Size of the array int n = 15; //hostIn: The array accessible by the host. //devIn: The input array accessible by the device. double *hostIn, *devIn; //hostOut: The output value accessible by the host. double hostOut; //The total size of the array (in bytes) size_t b = n*sizeof(double); //Allocating memory to host and device copies of array hostIn = (double*)malloc(b); hipMalloc(&devIn, b); //Initialising the array. Here, we are randomly initialising the values. int i; printf("\nArray: "); for(i=0; i<n; i++) { hostIn[i] = rand()%10 + (float)rand()/RAND_MAX; printf("%f ", hostIn[i]); } //Copying the values in the host array to the device memory. hipMemcpy(devIn, hostIn, b, hipMemcpyHostToDevice); //Defining the block size and the grid size. int blk_size = 8, grd_size = (int)ceil((float)n/blk_size); //We are constructing a segment tree of the given array, where the internal //nodes store the sum of the subarray corresponding to the leaves in its //subtree. Each level in the tree can then be used to exhibit data-level parallelism. //The step variable indicates the total levels of the tree. int step = (int)ceil((float)(log(n)/log(2))); for(i=0; i<step; i++) //We will be calling the device function corresponding to each level of the //tree to achieve parallelism hipLaunchKernelGGL(( vecSum), dim3(grd_size), dim3(blk_size), 0, 0, devIn, pow(2, i+1), n); //Copying the value of the output (which is present as the first element in the devIn array) //to the host memory. hipMemcpy(&hostOut, &devIn[0], sizeof(double), hipMemcpyDeviceToHost); printf("\n\nFinal sum: %f\n", hostOut); //Freeing the host and the device memory. hipFree(devIn); free(hostIn); return 0; }
761764153ebf5de54e0852acf6d7624a38ed80c6.cu
#include<stdio.h> #include<stdlib.h> #include<math.h> #include<cuda.h> #include<cuda_runtime_api.h> __global__ void vecSum(double* devIn, int pow_step, int n) { //The thread ID (including its block ID) int i = blockIdx.x * blockDim.x + threadIdx.x; //Safety check to prevent unwanted threads. if(pow_step*i < n) //The two 'adjacent' elements of the array (or //the two children in the segment tree) are added and //the result is stored in the first element. devIn[pow_step*i] = devIn[pow_step*i+(pow_step/2)] + devIn[pow_step*i]; } int main() { //Size of the array int n = 15; //hostIn: The array accessible by the host. //devIn: The input array accessible by the device. double *hostIn, *devIn; //hostOut: The output value accessible by the host. double hostOut; //The total size of the array (in bytes) size_t b = n*sizeof(double); //Allocating memory to host and device copies of array hostIn = (double*)malloc(b); cudaMalloc(&devIn, b); //Initialising the array. Here, we are randomly initialising the values. int i; printf("\nArray: "); for(i=0; i<n; i++) { hostIn[i] = rand()%10 + (float)rand()/RAND_MAX; printf("%f ", hostIn[i]); } //Copying the values in the host array to the device memory. cudaMemcpy(devIn, hostIn, b, cudaMemcpyHostToDevice); //Defining the block size and the grid size. int blk_size = 8, grd_size = (int)ceil((float)n/blk_size); //We are constructing a segment tree of the given array, where the internal //nodes store the sum of the subarray corresponding to the leaves in its //subtree. Each level in the tree can then be used to exhibit data-level parallelism. //The step variable indicates the total levels of the tree. int step = (int)ceil((float)(log(n)/log(2))); for(i=0; i<step; i++) //We will be calling the device function corresponding to each level of the //tree to achieve parallelism vecSum<<<grd_size, blk_size>>>(devIn, pow(2, i+1), n); //Copying the value of the output (which is present as the first element in the devIn array) //to the host memory. cudaMemcpy(&hostOut, &devIn[0], sizeof(double), cudaMemcpyDeviceToHost); printf("\n\nFinal sum: %f\n", hostOut); //Freeing the host and the device memory. cudaFree(devIn); free(hostIn); return 0; }
c87301ad1677676d0524a252d3e49b56133b7100.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Copyright (c) 2021, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "HugeCTR/include/embeddings/sparse_embedding_functors.hpp" namespace HugeCTR { namespace { // for one-hot, the value_index mapping is linear (no need to use hashtable) template <typename TypeKey> __global__ void hash_key_value_index_mapping_kernel(size_t nnz, int slot_num, const uint32_t *mapping_offsets, const TypeKey *hash_key, size_t *hash_value_index) { size_t gid = blockIdx.x * blockDim.x + threadIdx.x; if (gid < nnz) { int slot_id = gid % slot_num; hash_value_index[gid] = hash_key[gid] - mapping_offsets[slot_id]; } } } // namespace /** * forward propagation on each GPU for LocalizedSlotSparseEmbeddingOneHot. * Because there is no hashtable in this class, so there must be a mapping table * between input valud_index and local value_index. * @param batch_size batch size for the current mini-batch computation. * @param slot_num the number of slots for current GPU * @param row_offset row_offset (CSR format of input sparse tensors) * @param hash_key value (CSR format of input sparse tensors) * @param nnz non-zero feature number per batch * @param mapping_offsets the mapping between input value_index and local value_index * @param hash_value_index hash table value_index(row index of embedding) * @param stream cuda stream */ template <typename TypeHashKey> void SparseEmbeddingFunctors::forward_mapping_per_gpu(size_t batch_size, size_t slot_num, const Tensor2<TypeHashKey> &hash_key, size_t nnz, const Tensor2<uint32_t> &mapping_offsets, Tensor2<size_t> &hash_value_index, hipStream_t stream) { // remove hashtable get_insert(), and do linear mapping between key and value_index if (nnz > 0) { hipLaunchKernelGGL(( hash_key_value_index_mapping_kernel), dim3((nnz + 255) / 256), dim3(256), 0, stream, nnz, slot_num, mapping_offsets.get_ptr(), hash_key.get_ptr(), hash_value_index.get_ptr()); } return; } template void SparseEmbeddingFunctors::forward_mapping_per_gpu<unsigned int>( size_t batch_size, size_t slot_num, const Tensor2<unsigned int> &hash_key, size_t nnz, const Tensor2<uint32_t> &mapping_offsets, Tensor2<size_t> &hash_value_index, hipStream_t stream); template void SparseEmbeddingFunctors::forward_mapping_per_gpu<long long>( size_t batch_size, size_t slot_num, const Tensor2<long long> &hash_key, size_t nnz, const Tensor2<uint32_t> &mapping_offsets, Tensor2<size_t> &hash_value_index, hipStream_t stream); } // namespace HugeCTR
c87301ad1677676d0524a252d3e49b56133b7100.cu
/* * Copyright (c) 2021, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "HugeCTR/include/embeddings/sparse_embedding_functors.hpp" namespace HugeCTR { namespace { // for one-hot, the value_index mapping is linear (no need to use hashtable) template <typename TypeKey> __global__ void hash_key_value_index_mapping_kernel(size_t nnz, int slot_num, const uint32_t *mapping_offsets, const TypeKey *hash_key, size_t *hash_value_index) { size_t gid = blockIdx.x * blockDim.x + threadIdx.x; if (gid < nnz) { int slot_id = gid % slot_num; hash_value_index[gid] = hash_key[gid] - mapping_offsets[slot_id]; } } } // namespace /** * forward propagation on each GPU for LocalizedSlotSparseEmbeddingOneHot. * Because there is no hashtable in this class, so there must be a mapping table * between input valud_index and local value_index. * @param batch_size batch size for the current mini-batch computation. * @param slot_num the number of slots for current GPU * @param row_offset row_offset (CSR format of input sparse tensors) * @param hash_key value (CSR format of input sparse tensors) * @param nnz non-zero feature number per batch * @param mapping_offsets the mapping between input value_index and local value_index * @param hash_value_index hash table value_index(row index of embedding) * @param stream cuda stream */ template <typename TypeHashKey> void SparseEmbeddingFunctors::forward_mapping_per_gpu(size_t batch_size, size_t slot_num, const Tensor2<TypeHashKey> &hash_key, size_t nnz, const Tensor2<uint32_t> &mapping_offsets, Tensor2<size_t> &hash_value_index, cudaStream_t stream) { // remove hashtable get_insert(), and do linear mapping between key and value_index if (nnz > 0) { hash_key_value_index_mapping_kernel<<<(nnz + 255) / 256, 256, 0, stream>>>( nnz, slot_num, mapping_offsets.get_ptr(), hash_key.get_ptr(), hash_value_index.get_ptr()); } return; } template void SparseEmbeddingFunctors::forward_mapping_per_gpu<unsigned int>( size_t batch_size, size_t slot_num, const Tensor2<unsigned int> &hash_key, size_t nnz, const Tensor2<uint32_t> &mapping_offsets, Tensor2<size_t> &hash_value_index, cudaStream_t stream); template void SparseEmbeddingFunctors::forward_mapping_per_gpu<long long>( size_t batch_size, size_t slot_num, const Tensor2<long long> &hash_key, size_t nnz, const Tensor2<uint32_t> &mapping_offsets, Tensor2<size_t> &hash_value_index, cudaStream_t stream); } // namespace HugeCTR
8d587de6be17185e86b89beac7f6672ff9fbb7b3.hip
// !!! This is a file automatically generated by hipify!!! #include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <hiprand/hiprand_kernel.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <sys/time.h> #include "stencil_1d.hip" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { hipSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; float *in = NULL; hipMalloc(&in, XSIZE*YSIZE); float *out = NULL; hipMalloc(&out, XSIZE*YSIZE); int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); hipFree(0);hipLaunchKernelGGL(( stencil_1d), dim3(gridBlock),dim3(threadBlock), 0, 0, in,out); hipDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL(( stencil_1d), dim3(gridBlock),dim3(threadBlock), 0, 0, in,out); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL(( stencil_1d), dim3(gridBlock),dim3(threadBlock), 0, 0, in,out); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
8d587de6be17185e86b89beac7f6672ff9fbb7b3.cu
#include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <curand_kernel.h> #include <stdlib.h> #include <cuda.h> #include <sys/time.h> #include "stencil_1d.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { cudaSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; float *in = NULL; cudaMalloc(&in, XSIZE*YSIZE); float *out = NULL; cudaMalloc(&out, XSIZE*YSIZE); int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); cudaFree(0); stencil_1d<<<gridBlock,threadBlock>>>(in,out); cudaDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) { stencil_1d<<<gridBlock,threadBlock>>>(in,out); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) { stencil_1d<<<gridBlock,threadBlock>>>(in,out); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
856c2b5208fd874be2dd247774915ee5c401675f.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // // auto-generated by ops.py // __constant__ int xdim0_accelerate_kernel; int xdim0_accelerate_kernel_h = -1; __constant__ int ydim0_accelerate_kernel; int ydim0_accelerate_kernel_h = -1; __constant__ int xdim1_accelerate_kernel; int xdim1_accelerate_kernel_h = -1; __constant__ int ydim1_accelerate_kernel; int ydim1_accelerate_kernel_h = -1; __constant__ int xdim2_accelerate_kernel; int xdim2_accelerate_kernel_h = -1; __constant__ int ydim2_accelerate_kernel; int ydim2_accelerate_kernel_h = -1; __constant__ int xdim3_accelerate_kernel; int xdim3_accelerate_kernel_h = -1; __constant__ int ydim3_accelerate_kernel; int ydim3_accelerate_kernel_h = -1; __constant__ int xdim4_accelerate_kernel; int xdim4_accelerate_kernel_h = -1; __constant__ int ydim4_accelerate_kernel; int ydim4_accelerate_kernel_h = -1; __constant__ int xdim5_accelerate_kernel; int xdim5_accelerate_kernel_h = -1; __constant__ int ydim5_accelerate_kernel; int ydim5_accelerate_kernel_h = -1; __constant__ int xdim6_accelerate_kernel; int xdim6_accelerate_kernel_h = -1; __constant__ int ydim6_accelerate_kernel; int ydim6_accelerate_kernel_h = -1; __constant__ int xdim7_accelerate_kernel; int xdim7_accelerate_kernel_h = -1; __constant__ int ydim7_accelerate_kernel; int ydim7_accelerate_kernel_h = -1; __constant__ int xdim8_accelerate_kernel; int xdim8_accelerate_kernel_h = -1; __constant__ int ydim8_accelerate_kernel; int ydim8_accelerate_kernel_h = -1; __constant__ int xdim9_accelerate_kernel; int xdim9_accelerate_kernel_h = -1; __constant__ int ydim9_accelerate_kernel; int ydim9_accelerate_kernel_h = -1; __constant__ int xdim10_accelerate_kernel; int xdim10_accelerate_kernel_h = -1; __constant__ int ydim10_accelerate_kernel; int ydim10_accelerate_kernel_h = -1; __constant__ int xdim11_accelerate_kernel; int xdim11_accelerate_kernel_h = -1; __constant__ int ydim11_accelerate_kernel; int ydim11_accelerate_kernel_h = -1; __constant__ int xdim12_accelerate_kernel; int xdim12_accelerate_kernel_h = -1; __constant__ int ydim12_accelerate_kernel; int ydim12_accelerate_kernel_h = -1; __constant__ int xdim13_accelerate_kernel; int xdim13_accelerate_kernel_h = -1; __constant__ int ydim13_accelerate_kernel; int ydim13_accelerate_kernel_h = -1; #define OPS_ACC0(x,y,z) (x+xdim0_accelerate_kernel*(y)+xdim0_accelerate_kernel*ydim0_accelerate_kernel*(z)) #define OPS_ACC1(x,y,z) (x+xdim1_accelerate_kernel*(y)+xdim1_accelerate_kernel*ydim1_accelerate_kernel*(z)) #define OPS_ACC2(x,y,z) (x+xdim2_accelerate_kernel*(y)+xdim2_accelerate_kernel*ydim2_accelerate_kernel*(z)) #define OPS_ACC3(x,y,z) (x+xdim3_accelerate_kernel*(y)+xdim3_accelerate_kernel*ydim3_accelerate_kernel*(z)) #define OPS_ACC4(x,y,z) (x+xdim4_accelerate_kernel*(y)+xdim4_accelerate_kernel*ydim4_accelerate_kernel*(z)) #define OPS_ACC5(x,y,z) (x+xdim5_accelerate_kernel*(y)+xdim5_accelerate_kernel*ydim5_accelerate_kernel*(z)) #define OPS_ACC6(x,y,z) (x+xdim6_accelerate_kernel*(y)+xdim6_accelerate_kernel*ydim6_accelerate_kernel*(z)) #define OPS_ACC7(x,y,z) (x+xdim7_accelerate_kernel*(y)+xdim7_accelerate_kernel*ydim7_accelerate_kernel*(z)) #define OPS_ACC8(x,y,z) (x+xdim8_accelerate_kernel*(y)+xdim8_accelerate_kernel*ydim8_accelerate_kernel*(z)) #define OPS_ACC9(x,y,z) (x+xdim9_accelerate_kernel*(y)+xdim9_accelerate_kernel*ydim9_accelerate_kernel*(z)) #define OPS_ACC10(x,y,z) (x+xdim10_accelerate_kernel*(y)+xdim10_accelerate_kernel*ydim10_accelerate_kernel*(z)) #define OPS_ACC11(x,y,z) (x+xdim11_accelerate_kernel*(y)+xdim11_accelerate_kernel*ydim11_accelerate_kernel*(z)) #define OPS_ACC12(x,y,z) (x+xdim12_accelerate_kernel*(y)+xdim12_accelerate_kernel*ydim12_accelerate_kernel*(z)) #define OPS_ACC13(x,y,z) (x+xdim13_accelerate_kernel*(y)+xdim13_accelerate_kernel*ydim13_accelerate_kernel*(z)) //user function __device__ void accelerate_kernel( const double *density0, const double *volume, double *stepbymass, const double *xvel0, double *xvel1, const double *xarea, const double *pressure, const double *yvel0, double *yvel1, const double *yarea, const double *viscosity, const double *zvel0, double *zvel1, const double *zarea) { double nodal_mass = 0.0; nodal_mass =(density0[OPS_ACC0(-1,-1, 0)] * volume[OPS_ACC1(-1,-1, 0)] + density0[OPS_ACC0( 0,-1, 0)] * volume[OPS_ACC1( 0,-1, 0)] + density0[OPS_ACC0( 0, 0, 0)] * volume[OPS_ACC1( 0, 0, 0)] + density0[OPS_ACC0(-1, 0, 0)] * volume[OPS_ACC1(-1, 0, 0)] + density0[OPS_ACC0(-1,-1,-1)] * volume[OPS_ACC1(-1,-1,-1)] + density0[OPS_ACC0( 0,-1,-1)] * volume[OPS_ACC1( 0,-1,-1)] + density0[OPS_ACC0( 0, 0,-1)] * volume[OPS_ACC1( 0, 0,-1)] + density0[OPS_ACC0(-1, 0,-1)] * volume[OPS_ACC1(-1, 0,-1)]) * 0.125; stepbymass[OPS_ACC2(0,0,0)] = 0.25*dt / nodal_mass; xvel1[OPS_ACC4(0,0,0)] = xvel0[OPS_ACC3(0,0,0)] - stepbymass[OPS_ACC2(0,0,0)] * ( xarea[OPS_ACC5(0,0,0)] * ( pressure[OPS_ACC6(0,0,0)] - pressure[OPS_ACC6(-1,0,0)] ) + xarea[OPS_ACC5(0,-1,0)] * ( pressure[OPS_ACC6(0,-1,0)] - pressure[OPS_ACC6(-1,-1,0)] ) + xarea[OPS_ACC5(0,0,-1)] * ( pressure[OPS_ACC6(0,0,-1)] - pressure[OPS_ACC6(-1,0,-1)] ) + xarea[OPS_ACC5(0,-1,-1)] * ( pressure[OPS_ACC6(0,-1,-1)] - pressure[OPS_ACC6(-1,-1,-1)] ) ); yvel1[OPS_ACC8(0,0,0)] = yvel0[OPS_ACC7(0,0,0)] - stepbymass[OPS_ACC2(0,0,0)] * ( yarea[OPS_ACC9(0,0,0)] * ( pressure[OPS_ACC6(0,0,0)] - pressure[OPS_ACC6(0,-1,0)] ) + yarea[OPS_ACC9(-1,0,0)] * ( pressure[OPS_ACC6(-1,0,0)] - pressure[OPS_ACC6(-1,-1,0)] ) + yarea[OPS_ACC9(0,0,-1)] * ( pressure[OPS_ACC6(0,0,-1)] - pressure[OPS_ACC6(0,-1,-1)] ) + yarea[OPS_ACC9(-1,0,-1)]* ( pressure[OPS_ACC6(-1,0,-1)] - pressure[OPS_ACC6(-1,-1,-1)] ) ); zvel1[OPS_ACC12(0,0,0)] = zvel0[OPS_ACC11(0,0,0)] - stepbymass[OPS_ACC2(0,0,0)] * ( zarea[OPS_ACC13(0,0,0)] * ( pressure[OPS_ACC6(0,0,0)] - pressure[OPS_ACC6(0,0,-1)] ) + zarea[OPS_ACC13(0,-1,0)] * ( pressure[OPS_ACC6(0,-1,0)] - pressure[OPS_ACC6(0,-1,-1)] ) + zarea[OPS_ACC13(-1,0,0)] * ( pressure[OPS_ACC6(-1,0,0)] - pressure[OPS_ACC6(-1,0,-1)] ) + zarea[OPS_ACC13(-1,-1,0)]* ( pressure[OPS_ACC6(-1,-1,0)] - pressure[OPS_ACC6(-1,-1,-1)] ) ); xvel1[OPS_ACC4(0,0,0)] = xvel1[OPS_ACC4(0,0,0)] - stepbymass[OPS_ACC2(0,0,0)] * ( xarea[OPS_ACC5(0,0,0)] * ( viscosity[OPS_ACC10(0,0,0)] - viscosity[OPS_ACC10(-1,0,0)] ) + xarea[OPS_ACC5(0,-1,0)] * ( viscosity[OPS_ACC10(0,-1,0)] - viscosity[OPS_ACC10(-1,-1,0)] ) + xarea[OPS_ACC5(0,0,-1)] * ( viscosity[OPS_ACC10(0,0,-1)] - viscosity[OPS_ACC10(-1,0,-1)] ) + xarea[OPS_ACC5(0,-1,-1)]* ( viscosity[OPS_ACC10(0,-1,-1)] - viscosity[OPS_ACC10(-1,-1,-1)] ) ); yvel1[OPS_ACC8(0,0,0)] = yvel1[OPS_ACC8(0,0,0)] - stepbymass[OPS_ACC2(0,0,0)] * ( yarea[OPS_ACC9(0,0,0)] * ( viscosity[OPS_ACC10(0,0,0)] - viscosity[OPS_ACC10(0,-1,0)] ) + yarea[OPS_ACC9(-1,0,0)] * ( viscosity[OPS_ACC10(-1,0,0)] - viscosity[OPS_ACC10(-1,-1,0)] ) + yarea[OPS_ACC9(0,0,-1)] * ( viscosity[OPS_ACC10(0,0,-1)] - viscosity[OPS_ACC10(0,-1,-1)] ) + yarea[OPS_ACC9(-1,0,-1)]* ( viscosity[OPS_ACC10(-1,0,-1)]- viscosity[OPS_ACC10(-1,-1,-1)] ) ); zvel1[OPS_ACC12(0,0,0)] = zvel1[OPS_ACC12(0,0,0)] - stepbymass[OPS_ACC2(0,0,0)] * ( zarea[OPS_ACC13(0,0,0)] * ( viscosity[OPS_ACC10(0,0,0)] - viscosity[OPS_ACC10(0,0,-1)] ) + zarea[OPS_ACC13(0,-1,0)] * ( viscosity[OPS_ACC10(0,-1,0)] - viscosity[OPS_ACC10(0,-1,-1)] ) + zarea[OPS_ACC13(-1,0,0)] * ( viscosity[OPS_ACC10(-1,0,0)] - viscosity[OPS_ACC10(-1,0,-1)] ) + zarea[OPS_ACC13(-1,-1,0)]* ( viscosity[OPS_ACC10(-1,-1,0)]- viscosity[OPS_ACC10(-1,-1,-1)] ) ); } #undef OPS_ACC0 #undef OPS_ACC1 #undef OPS_ACC2 #undef OPS_ACC3 #undef OPS_ACC4 #undef OPS_ACC5 #undef OPS_ACC6 #undef OPS_ACC7 #undef OPS_ACC8 #undef OPS_ACC9 #undef OPS_ACC10 #undef OPS_ACC11 #undef OPS_ACC12 #undef OPS_ACC13 __global__ void ops_accelerate_kernel( const double* __restrict arg0, const double* __restrict arg1, double* __restrict arg2, const double* __restrict arg3, double* __restrict arg4, const double* __restrict arg5, const double* __restrict arg6, const double* __restrict arg7, double* __restrict arg8, const double* __restrict arg9, const double* __restrict arg10, const double* __restrict arg11, double* __restrict arg12, const double* __restrict arg13, int size0, int size1, int size2 ){ int idx_z = blockDim.z * blockIdx.z + threadIdx.z; int idx_y = blockDim.y * blockIdx.y + threadIdx.y; int idx_x = blockDim.x * blockIdx.x + threadIdx.x; arg0 += idx_x * 1 + idx_y * 1 * xdim0_accelerate_kernel + idx_z * 1 * xdim0_accelerate_kernel * ydim0_accelerate_kernel; arg1 += idx_x * 1 + idx_y * 1 * xdim1_accelerate_kernel + idx_z * 1 * xdim1_accelerate_kernel * ydim1_accelerate_kernel; arg2 += idx_x * 1 + idx_y * 1 * xdim2_accelerate_kernel + idx_z * 1 * xdim2_accelerate_kernel * ydim2_accelerate_kernel; arg3 += idx_x * 1 + idx_y * 1 * xdim3_accelerate_kernel + idx_z * 1 * xdim3_accelerate_kernel * ydim3_accelerate_kernel; arg4 += idx_x * 1 + idx_y * 1 * xdim4_accelerate_kernel + idx_z * 1 * xdim4_accelerate_kernel * ydim4_accelerate_kernel; arg5 += idx_x * 1 + idx_y * 1 * xdim5_accelerate_kernel + idx_z * 1 * xdim5_accelerate_kernel * ydim5_accelerate_kernel; arg6 += idx_x * 1 + idx_y * 1 * xdim6_accelerate_kernel + idx_z * 1 * xdim6_accelerate_kernel * ydim6_accelerate_kernel; arg7 += idx_x * 1 + idx_y * 1 * xdim7_accelerate_kernel + idx_z * 1 * xdim7_accelerate_kernel * ydim7_accelerate_kernel; arg8 += idx_x * 1 + idx_y * 1 * xdim8_accelerate_kernel + idx_z * 1 * xdim8_accelerate_kernel * ydim8_accelerate_kernel; arg9 += idx_x * 1 + idx_y * 1 * xdim9_accelerate_kernel + idx_z * 1 * xdim9_accelerate_kernel * ydim9_accelerate_kernel; arg10 += idx_x * 1 + idx_y * 1 * xdim10_accelerate_kernel + idx_z * 1 * xdim10_accelerate_kernel * ydim10_accelerate_kernel; arg11 += idx_x * 1 + idx_y * 1 * xdim11_accelerate_kernel + idx_z * 1 * xdim11_accelerate_kernel * ydim11_accelerate_kernel; arg12 += idx_x * 1 + idx_y * 1 * xdim12_accelerate_kernel + idx_z * 1 * xdim12_accelerate_kernel * ydim12_accelerate_kernel; arg13 += idx_x * 1 + idx_y * 1 * xdim13_accelerate_kernel + idx_z * 1 * xdim13_accelerate_kernel * ydim13_accelerate_kernel; if (idx_x < size0 && idx_y < size1 && idx_z < size2) { accelerate_kernel(arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8, arg9, arg10, arg11, arg12, arg13); } } // host stub function void ops_par_loop_accelerate_kernel(char const *name, ops_block block, int dim, int* range, ops_arg arg0, ops_arg arg1, ops_arg arg2, ops_arg arg3, ops_arg arg4, ops_arg arg5, ops_arg arg6, ops_arg arg7, ops_arg arg8, ops_arg arg9, ops_arg arg10, ops_arg arg11, ops_arg arg12, ops_arg arg13) { ops_arg args[14] = { arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8, arg9, arg10, arg11, arg12, arg13}; ops_timing_realloc(1,"accelerate_kernel"); OPS_kernels[1].count++; //compute locally allocated range for the sub-block int start[3]; int end[3]; #ifdef OPS_MPI sub_block_list sb = OPS_sub_block_list[block->index]; if (!sb->owned) return; for ( int n=0; n<3; n++ ){ start[n] = sb->decomp_disp[n];end[n] = sb->decomp_disp[n]+sb->decomp_size[n]; if (start[n] >= range[2*n]) { start[n] = 0; } else { start[n] = range[2*n] - start[n]; } if (sb->id_m[n]==MPI_PROC_NULL && range[2*n] < 0) start[n] = range[2*n]; if (end[n] >= range[2*n+1]) { end[n] = range[2*n+1] - sb->decomp_disp[n]; } else { end[n] = sb->decomp_size[n]; } if (sb->id_p[n]==MPI_PROC_NULL && (range[2*n+1] > sb->decomp_disp[n]+sb->decomp_size[n])) end[n] += (range[2*n+1]-sb->decomp_disp[n]-sb->decomp_size[n]); } #else //OPS_MPI for ( int n=0; n<3; n++ ){ start[n] = range[2*n];end[n] = range[2*n+1]; } #endif //OPS_MPI int x_size = MAX(0,end[0]-start[0]); int y_size = MAX(0,end[1]-start[1]); int z_size = MAX(0,end[2]-start[2]); int xdim0 = args[0].dat->size[0]*args[0].dat->dim; int ydim0 = args[0].dat->size[1]; int xdim1 = args[1].dat->size[0]*args[1].dat->dim; int ydim1 = args[1].dat->size[1]; int xdim2 = args[2].dat->size[0]*args[2].dat->dim; int ydim2 = args[2].dat->size[1]; int xdim3 = args[3].dat->size[0]*args[3].dat->dim; int ydim3 = args[3].dat->size[1]; int xdim4 = args[4].dat->size[0]*args[4].dat->dim; int ydim4 = args[4].dat->size[1]; int xdim5 = args[5].dat->size[0]*args[5].dat->dim; int ydim5 = args[5].dat->size[1]; int xdim6 = args[6].dat->size[0]*args[6].dat->dim; int ydim6 = args[6].dat->size[1]; int xdim7 = args[7].dat->size[0]*args[7].dat->dim; int ydim7 = args[7].dat->size[1]; int xdim8 = args[8].dat->size[0]*args[8].dat->dim; int ydim8 = args[8].dat->size[1]; int xdim9 = args[9].dat->size[0]*args[9].dat->dim; int ydim9 = args[9].dat->size[1]; int xdim10 = args[10].dat->size[0]*args[10].dat->dim; int ydim10 = args[10].dat->size[1]; int xdim11 = args[11].dat->size[0]*args[11].dat->dim; int ydim11 = args[11].dat->size[1]; int xdim12 = args[12].dat->size[0]*args[12].dat->dim; int ydim12 = args[12].dat->size[1]; int xdim13 = args[13].dat->size[0]*args[13].dat->dim; int ydim13 = args[13].dat->size[1]; //Timing double t1,t2,c1,c2; ops_timers_core(&c2,&t2); if (xdim0 != xdim0_accelerate_kernel_h || ydim0 != ydim0_accelerate_kernel_h || xdim1 != xdim1_accelerate_kernel_h || ydim1 != ydim1_accelerate_kernel_h || xdim2 != xdim2_accelerate_kernel_h || ydim2 != ydim2_accelerate_kernel_h || xdim3 != xdim3_accelerate_kernel_h || ydim3 != ydim3_accelerate_kernel_h || xdim4 != xdim4_accelerate_kernel_h || ydim4 != ydim4_accelerate_kernel_h || xdim5 != xdim5_accelerate_kernel_h || ydim5 != ydim5_accelerate_kernel_h || xdim6 != xdim6_accelerate_kernel_h || ydim6 != ydim6_accelerate_kernel_h || xdim7 != xdim7_accelerate_kernel_h || ydim7 != ydim7_accelerate_kernel_h || xdim8 != xdim8_accelerate_kernel_h || ydim8 != ydim8_accelerate_kernel_h || xdim9 != xdim9_accelerate_kernel_h || ydim9 != ydim9_accelerate_kernel_h || xdim10 != xdim10_accelerate_kernel_h || ydim10 != ydim10_accelerate_kernel_h || xdim11 != xdim11_accelerate_kernel_h || ydim11 != ydim11_accelerate_kernel_h || xdim12 != xdim12_accelerate_kernel_h || ydim12 != ydim12_accelerate_kernel_h || xdim13 != xdim13_accelerate_kernel_h || ydim13 != ydim13_accelerate_kernel_h) { hipMemcpyToSymbol( xdim0_accelerate_kernel, &xdim0, sizeof(int) ); xdim0_accelerate_kernel_h = xdim0; hipMemcpyToSymbol( ydim0_accelerate_kernel, &ydim0, sizeof(int) ); ydim0_accelerate_kernel_h = ydim0; hipMemcpyToSymbol( xdim1_accelerate_kernel, &xdim1, sizeof(int) ); xdim1_accelerate_kernel_h = xdim1; hipMemcpyToSymbol( ydim1_accelerate_kernel, &ydim1, sizeof(int) ); ydim1_accelerate_kernel_h = ydim1; hipMemcpyToSymbol( xdim2_accelerate_kernel, &xdim2, sizeof(int) ); xdim2_accelerate_kernel_h = xdim2; hipMemcpyToSymbol( ydim2_accelerate_kernel, &ydim2, sizeof(int) ); ydim2_accelerate_kernel_h = ydim2; hipMemcpyToSymbol( xdim3_accelerate_kernel, &xdim3, sizeof(int) ); xdim3_accelerate_kernel_h = xdim3; hipMemcpyToSymbol( ydim3_accelerate_kernel, &ydim3, sizeof(int) ); ydim3_accelerate_kernel_h = ydim3; hipMemcpyToSymbol( xdim4_accelerate_kernel, &xdim4, sizeof(int) ); xdim4_accelerate_kernel_h = xdim4; hipMemcpyToSymbol( ydim4_accelerate_kernel, &ydim4, sizeof(int) ); ydim4_accelerate_kernel_h = ydim4; hipMemcpyToSymbol( xdim5_accelerate_kernel, &xdim5, sizeof(int) ); xdim5_accelerate_kernel_h = xdim5; hipMemcpyToSymbol( ydim5_accelerate_kernel, &ydim5, sizeof(int) ); ydim5_accelerate_kernel_h = ydim5; hipMemcpyToSymbol( xdim6_accelerate_kernel, &xdim6, sizeof(int) ); xdim6_accelerate_kernel_h = xdim6; hipMemcpyToSymbol( ydim6_accelerate_kernel, &ydim6, sizeof(int) ); ydim6_accelerate_kernel_h = ydim6; hipMemcpyToSymbol( xdim7_accelerate_kernel, &xdim7, sizeof(int) ); xdim7_accelerate_kernel_h = xdim7; hipMemcpyToSymbol( ydim7_accelerate_kernel, &ydim7, sizeof(int) ); ydim7_accelerate_kernel_h = ydim7; hipMemcpyToSymbol( xdim8_accelerate_kernel, &xdim8, sizeof(int) ); xdim8_accelerate_kernel_h = xdim8; hipMemcpyToSymbol( ydim8_accelerate_kernel, &ydim8, sizeof(int) ); ydim8_accelerate_kernel_h = ydim8; hipMemcpyToSymbol( xdim9_accelerate_kernel, &xdim9, sizeof(int) ); xdim9_accelerate_kernel_h = xdim9; hipMemcpyToSymbol( ydim9_accelerate_kernel, &ydim9, sizeof(int) ); ydim9_accelerate_kernel_h = ydim9; hipMemcpyToSymbol( xdim10_accelerate_kernel, &xdim10, sizeof(int) ); xdim10_accelerate_kernel_h = xdim10; hipMemcpyToSymbol( ydim10_accelerate_kernel, &ydim10, sizeof(int) ); ydim10_accelerate_kernel_h = ydim10; hipMemcpyToSymbol( xdim11_accelerate_kernel, &xdim11, sizeof(int) ); xdim11_accelerate_kernel_h = xdim11; hipMemcpyToSymbol( ydim11_accelerate_kernel, &ydim11, sizeof(int) ); ydim11_accelerate_kernel_h = ydim11; hipMemcpyToSymbol( xdim12_accelerate_kernel, &xdim12, sizeof(int) ); xdim12_accelerate_kernel_h = xdim12; hipMemcpyToSymbol( ydim12_accelerate_kernel, &ydim12, sizeof(int) ); ydim12_accelerate_kernel_h = ydim12; hipMemcpyToSymbol( xdim13_accelerate_kernel, &xdim13, sizeof(int) ); xdim13_accelerate_kernel_h = xdim13; hipMemcpyToSymbol( ydim13_accelerate_kernel, &ydim13, sizeof(int) ); ydim13_accelerate_kernel_h = ydim13; } dim3 grid( (x_size-1)/OPS_block_size_x+ 1, (y_size-1)/OPS_block_size_y + 1, z_size); dim3 tblock(OPS_block_size_x,OPS_block_size_y,1); int dat0 = args[0].dat->elem_size; int dat1 = args[1].dat->elem_size; int dat2 = args[2].dat->elem_size; int dat3 = args[3].dat->elem_size; int dat4 = args[4].dat->elem_size; int dat5 = args[5].dat->elem_size; int dat6 = args[6].dat->elem_size; int dat7 = args[7].dat->elem_size; int dat8 = args[8].dat->elem_size; int dat9 = args[9].dat->elem_size; int dat10 = args[10].dat->elem_size; int dat11 = args[11].dat->elem_size; int dat12 = args[12].dat->elem_size; int dat13 = args[13].dat->elem_size; char *p_a[14]; //set up initial pointers int d_m[OPS_MAX_DIM]; #ifdef OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[0].dat->d_m[d] + OPS_sub_dat_list[args[0].dat->index]->d_im[d]; #else //OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[0].dat->d_m[d]; #endif //OPS_MPI int base0 = dat0 * 1 * (start[0] * args[0].stencil->stride[0] - args[0].dat->base[0] - d_m[0]); base0 = base0+ dat0 * args[0].dat->size[0] * (start[1] * args[0].stencil->stride[1] - args[0].dat->base[1] - d_m[1]); base0 = base0+ dat0 * args[0].dat->size[0] * args[0].dat->size[1] * (start[2] * args[0].stencil->stride[2] - args[0].dat->base[2] - d_m[2]); p_a[0] = (char *)args[0].data_d + base0; #ifdef OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[1].dat->d_m[d] + OPS_sub_dat_list[args[1].dat->index]->d_im[d]; #else //OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[1].dat->d_m[d]; #endif //OPS_MPI int base1 = dat1 * 1 * (start[0] * args[1].stencil->stride[0] - args[1].dat->base[0] - d_m[0]); base1 = base1+ dat1 * args[1].dat->size[0] * (start[1] * args[1].stencil->stride[1] - args[1].dat->base[1] - d_m[1]); base1 = base1+ dat1 * args[1].dat->size[0] * args[1].dat->size[1] * (start[2] * args[1].stencil->stride[2] - args[1].dat->base[2] - d_m[2]); p_a[1] = (char *)args[1].data_d + base1; #ifdef OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[2].dat->d_m[d] + OPS_sub_dat_list[args[2].dat->index]->d_im[d]; #else //OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[2].dat->d_m[d]; #endif //OPS_MPI int base2 = dat2 * 1 * (start[0] * args[2].stencil->stride[0] - args[2].dat->base[0] - d_m[0]); base2 = base2+ dat2 * args[2].dat->size[0] * (start[1] * args[2].stencil->stride[1] - args[2].dat->base[1] - d_m[1]); base2 = base2+ dat2 * args[2].dat->size[0] * args[2].dat->size[1] * (start[2] * args[2].stencil->stride[2] - args[2].dat->base[2] - d_m[2]); p_a[2] = (char *)args[2].data_d + base2; #ifdef OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[3].dat->d_m[d] + OPS_sub_dat_list[args[3].dat->index]->d_im[d]; #else //OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[3].dat->d_m[d]; #endif //OPS_MPI int base3 = dat3 * 1 * (start[0] * args[3].stencil->stride[0] - args[3].dat->base[0] - d_m[0]); base3 = base3+ dat3 * args[3].dat->size[0] * (start[1] * args[3].stencil->stride[1] - args[3].dat->base[1] - d_m[1]); base3 = base3+ dat3 * args[3].dat->size[0] * args[3].dat->size[1] * (start[2] * args[3].stencil->stride[2] - args[3].dat->base[2] - d_m[2]); p_a[3] = (char *)args[3].data_d + base3; #ifdef OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[4].dat->d_m[d] + OPS_sub_dat_list[args[4].dat->index]->d_im[d]; #else //OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[4].dat->d_m[d]; #endif //OPS_MPI int base4 = dat4 * 1 * (start[0] * args[4].stencil->stride[0] - args[4].dat->base[0] - d_m[0]); base4 = base4+ dat4 * args[4].dat->size[0] * (start[1] * args[4].stencil->stride[1] - args[4].dat->base[1] - d_m[1]); base4 = base4+ dat4 * args[4].dat->size[0] * args[4].dat->size[1] * (start[2] * args[4].stencil->stride[2] - args[4].dat->base[2] - d_m[2]); p_a[4] = (char *)args[4].data_d + base4; #ifdef OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[5].dat->d_m[d] + OPS_sub_dat_list[args[5].dat->index]->d_im[d]; #else //OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[5].dat->d_m[d]; #endif //OPS_MPI int base5 = dat5 * 1 * (start[0] * args[5].stencil->stride[0] - args[5].dat->base[0] - d_m[0]); base5 = base5+ dat5 * args[5].dat->size[0] * (start[1] * args[5].stencil->stride[1] - args[5].dat->base[1] - d_m[1]); base5 = base5+ dat5 * args[5].dat->size[0] * args[5].dat->size[1] * (start[2] * args[5].stencil->stride[2] - args[5].dat->base[2] - d_m[2]); p_a[5] = (char *)args[5].data_d + base5; #ifdef OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[6].dat->d_m[d] + OPS_sub_dat_list[args[6].dat->index]->d_im[d]; #else //OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[6].dat->d_m[d]; #endif //OPS_MPI int base6 = dat6 * 1 * (start[0] * args[6].stencil->stride[0] - args[6].dat->base[0] - d_m[0]); base6 = base6+ dat6 * args[6].dat->size[0] * (start[1] * args[6].stencil->stride[1] - args[6].dat->base[1] - d_m[1]); base6 = base6+ dat6 * args[6].dat->size[0] * args[6].dat->size[1] * (start[2] * args[6].stencil->stride[2] - args[6].dat->base[2] - d_m[2]); p_a[6] = (char *)args[6].data_d + base6; #ifdef OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[7].dat->d_m[d] + OPS_sub_dat_list[args[7].dat->index]->d_im[d]; #else //OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[7].dat->d_m[d]; #endif //OPS_MPI int base7 = dat7 * 1 * (start[0] * args[7].stencil->stride[0] - args[7].dat->base[0] - d_m[0]); base7 = base7+ dat7 * args[7].dat->size[0] * (start[1] * args[7].stencil->stride[1] - args[7].dat->base[1] - d_m[1]); base7 = base7+ dat7 * args[7].dat->size[0] * args[7].dat->size[1] * (start[2] * args[7].stencil->stride[2] - args[7].dat->base[2] - d_m[2]); p_a[7] = (char *)args[7].data_d + base7; #ifdef OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[8].dat->d_m[d] + OPS_sub_dat_list[args[8].dat->index]->d_im[d]; #else //OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[8].dat->d_m[d]; #endif //OPS_MPI int base8 = dat8 * 1 * (start[0] * args[8].stencil->stride[0] - args[8].dat->base[0] - d_m[0]); base8 = base8+ dat8 * args[8].dat->size[0] * (start[1] * args[8].stencil->stride[1] - args[8].dat->base[1] - d_m[1]); base8 = base8+ dat8 * args[8].dat->size[0] * args[8].dat->size[1] * (start[2] * args[8].stencil->stride[2] - args[8].dat->base[2] - d_m[2]); p_a[8] = (char *)args[8].data_d + base8; #ifdef OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[9].dat->d_m[d] + OPS_sub_dat_list[args[9].dat->index]->d_im[d]; #else //OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[9].dat->d_m[d]; #endif //OPS_MPI int base9 = dat9 * 1 * (start[0] * args[9].stencil->stride[0] - args[9].dat->base[0] - d_m[0]); base9 = base9+ dat9 * args[9].dat->size[0] * (start[1] * args[9].stencil->stride[1] - args[9].dat->base[1] - d_m[1]); base9 = base9+ dat9 * args[9].dat->size[0] * args[9].dat->size[1] * (start[2] * args[9].stencil->stride[2] - args[9].dat->base[2] - d_m[2]); p_a[9] = (char *)args[9].data_d + base9; #ifdef OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[10].dat->d_m[d] + OPS_sub_dat_list[args[10].dat->index]->d_im[d]; #else //OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[10].dat->d_m[d]; #endif //OPS_MPI int base10 = dat10 * 1 * (start[0] * args[10].stencil->stride[0] - args[10].dat->base[0] - d_m[0]); base10 = base10+ dat10 * args[10].dat->size[0] * (start[1] * args[10].stencil->stride[1] - args[10].dat->base[1] - d_m[1]); base10 = base10+ dat10 * args[10].dat->size[0] * args[10].dat->size[1] * (start[2] * args[10].stencil->stride[2] - args[10].dat->base[2] - d_m[2]); p_a[10] = (char *)args[10].data_d + base10; #ifdef OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[11].dat->d_m[d] + OPS_sub_dat_list[args[11].dat->index]->d_im[d]; #else //OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[11].dat->d_m[d]; #endif //OPS_MPI int base11 = dat11 * 1 * (start[0] * args[11].stencil->stride[0] - args[11].dat->base[0] - d_m[0]); base11 = base11+ dat11 * args[11].dat->size[0] * (start[1] * args[11].stencil->stride[1] - args[11].dat->base[1] - d_m[1]); base11 = base11+ dat11 * args[11].dat->size[0] * args[11].dat->size[1] * (start[2] * args[11].stencil->stride[2] - args[11].dat->base[2] - d_m[2]); p_a[11] = (char *)args[11].data_d + base11; #ifdef OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[12].dat->d_m[d] + OPS_sub_dat_list[args[12].dat->index]->d_im[d]; #else //OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[12].dat->d_m[d]; #endif //OPS_MPI int base12 = dat12 * 1 * (start[0] * args[12].stencil->stride[0] - args[12].dat->base[0] - d_m[0]); base12 = base12+ dat12 * args[12].dat->size[0] * (start[1] * args[12].stencil->stride[1] - args[12].dat->base[1] - d_m[1]); base12 = base12+ dat12 * args[12].dat->size[0] * args[12].dat->size[1] * (start[2] * args[12].stencil->stride[2] - args[12].dat->base[2] - d_m[2]); p_a[12] = (char *)args[12].data_d + base12; #ifdef OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[13].dat->d_m[d] + OPS_sub_dat_list[args[13].dat->index]->d_im[d]; #else //OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[13].dat->d_m[d]; #endif //OPS_MPI int base13 = dat13 * 1 * (start[0] * args[13].stencil->stride[0] - args[13].dat->base[0] - d_m[0]); base13 = base13+ dat13 * args[13].dat->size[0] * (start[1] * args[13].stencil->stride[1] - args[13].dat->base[1] - d_m[1]); base13 = base13+ dat13 * args[13].dat->size[0] * args[13].dat->size[1] * (start[2] * args[13].stencil->stride[2] - args[13].dat->base[2] - d_m[2]); p_a[13] = (char *)args[13].data_d + base13; ops_H_D_exchanges_device(args, 14); ops_halo_exchanges(args,14,range); ops_timers_core(&c1,&t1); OPS_kernels[1].mpi_time += t1-t2; //call kernel wrapper function, passing in pointers to data hipLaunchKernelGGL(( ops_accelerate_kernel), dim3(grid), dim3(tblock) , 0, 0, (double *)p_a[0], (double *)p_a[1], (double *)p_a[2], (double *)p_a[3], (double *)p_a[4], (double *)p_a[5], (double *)p_a[6], (double *)p_a[7], (double *)p_a[8], (double *)p_a[9], (double *)p_a[10], (double *)p_a[11], (double *)p_a[12], (double *)p_a[13],x_size, y_size, z_size); if (OPS_diags>1) { cutilSafeCall(hipDeviceSynchronize()); } ops_timers_core(&c2,&t2); OPS_kernels[1].time += t2-t1; ops_set_dirtybit_device(args, 14); ops_set_halo_dirtybit3(&args[2],range); ops_set_halo_dirtybit3(&args[4],range); ops_set_halo_dirtybit3(&args[8],range); ops_set_halo_dirtybit3(&args[12],range); //Update kernel record OPS_kernels[1].transfer += ops_compute_transfer(dim, range, &arg0); OPS_kernels[1].transfer += ops_compute_transfer(dim, range, &arg1); OPS_kernels[1].transfer += ops_compute_transfer(dim, range, &arg2); OPS_kernels[1].transfer += ops_compute_transfer(dim, range, &arg3); OPS_kernels[1].transfer += ops_compute_transfer(dim, range, &arg4); OPS_kernels[1].transfer += ops_compute_transfer(dim, range, &arg5); OPS_kernels[1].transfer += ops_compute_transfer(dim, range, &arg6); OPS_kernels[1].transfer += ops_compute_transfer(dim, range, &arg7); OPS_kernels[1].transfer += ops_compute_transfer(dim, range, &arg8); OPS_kernels[1].transfer += ops_compute_transfer(dim, range, &arg9); OPS_kernels[1].transfer += ops_compute_transfer(dim, range, &arg10); OPS_kernels[1].transfer += ops_compute_transfer(dim, range, &arg11); OPS_kernels[1].transfer += ops_compute_transfer(dim, range, &arg12); OPS_kernels[1].transfer += ops_compute_transfer(dim, range, &arg13); }
856c2b5208fd874be2dd247774915ee5c401675f.cu
// // auto-generated by ops.py // __constant__ int xdim0_accelerate_kernel; int xdim0_accelerate_kernel_h = -1; __constant__ int ydim0_accelerate_kernel; int ydim0_accelerate_kernel_h = -1; __constant__ int xdim1_accelerate_kernel; int xdim1_accelerate_kernel_h = -1; __constant__ int ydim1_accelerate_kernel; int ydim1_accelerate_kernel_h = -1; __constant__ int xdim2_accelerate_kernel; int xdim2_accelerate_kernel_h = -1; __constant__ int ydim2_accelerate_kernel; int ydim2_accelerate_kernel_h = -1; __constant__ int xdim3_accelerate_kernel; int xdim3_accelerate_kernel_h = -1; __constant__ int ydim3_accelerate_kernel; int ydim3_accelerate_kernel_h = -1; __constant__ int xdim4_accelerate_kernel; int xdim4_accelerate_kernel_h = -1; __constant__ int ydim4_accelerate_kernel; int ydim4_accelerate_kernel_h = -1; __constant__ int xdim5_accelerate_kernel; int xdim5_accelerate_kernel_h = -1; __constant__ int ydim5_accelerate_kernel; int ydim5_accelerate_kernel_h = -1; __constant__ int xdim6_accelerate_kernel; int xdim6_accelerate_kernel_h = -1; __constant__ int ydim6_accelerate_kernel; int ydim6_accelerate_kernel_h = -1; __constant__ int xdim7_accelerate_kernel; int xdim7_accelerate_kernel_h = -1; __constant__ int ydim7_accelerate_kernel; int ydim7_accelerate_kernel_h = -1; __constant__ int xdim8_accelerate_kernel; int xdim8_accelerate_kernel_h = -1; __constant__ int ydim8_accelerate_kernel; int ydim8_accelerate_kernel_h = -1; __constant__ int xdim9_accelerate_kernel; int xdim9_accelerate_kernel_h = -1; __constant__ int ydim9_accelerate_kernel; int ydim9_accelerate_kernel_h = -1; __constant__ int xdim10_accelerate_kernel; int xdim10_accelerate_kernel_h = -1; __constant__ int ydim10_accelerate_kernel; int ydim10_accelerate_kernel_h = -1; __constant__ int xdim11_accelerate_kernel; int xdim11_accelerate_kernel_h = -1; __constant__ int ydim11_accelerate_kernel; int ydim11_accelerate_kernel_h = -1; __constant__ int xdim12_accelerate_kernel; int xdim12_accelerate_kernel_h = -1; __constant__ int ydim12_accelerate_kernel; int ydim12_accelerate_kernel_h = -1; __constant__ int xdim13_accelerate_kernel; int xdim13_accelerate_kernel_h = -1; __constant__ int ydim13_accelerate_kernel; int ydim13_accelerate_kernel_h = -1; #define OPS_ACC0(x,y,z) (x+xdim0_accelerate_kernel*(y)+xdim0_accelerate_kernel*ydim0_accelerate_kernel*(z)) #define OPS_ACC1(x,y,z) (x+xdim1_accelerate_kernel*(y)+xdim1_accelerate_kernel*ydim1_accelerate_kernel*(z)) #define OPS_ACC2(x,y,z) (x+xdim2_accelerate_kernel*(y)+xdim2_accelerate_kernel*ydim2_accelerate_kernel*(z)) #define OPS_ACC3(x,y,z) (x+xdim3_accelerate_kernel*(y)+xdim3_accelerate_kernel*ydim3_accelerate_kernel*(z)) #define OPS_ACC4(x,y,z) (x+xdim4_accelerate_kernel*(y)+xdim4_accelerate_kernel*ydim4_accelerate_kernel*(z)) #define OPS_ACC5(x,y,z) (x+xdim5_accelerate_kernel*(y)+xdim5_accelerate_kernel*ydim5_accelerate_kernel*(z)) #define OPS_ACC6(x,y,z) (x+xdim6_accelerate_kernel*(y)+xdim6_accelerate_kernel*ydim6_accelerate_kernel*(z)) #define OPS_ACC7(x,y,z) (x+xdim7_accelerate_kernel*(y)+xdim7_accelerate_kernel*ydim7_accelerate_kernel*(z)) #define OPS_ACC8(x,y,z) (x+xdim8_accelerate_kernel*(y)+xdim8_accelerate_kernel*ydim8_accelerate_kernel*(z)) #define OPS_ACC9(x,y,z) (x+xdim9_accelerate_kernel*(y)+xdim9_accelerate_kernel*ydim9_accelerate_kernel*(z)) #define OPS_ACC10(x,y,z) (x+xdim10_accelerate_kernel*(y)+xdim10_accelerate_kernel*ydim10_accelerate_kernel*(z)) #define OPS_ACC11(x,y,z) (x+xdim11_accelerate_kernel*(y)+xdim11_accelerate_kernel*ydim11_accelerate_kernel*(z)) #define OPS_ACC12(x,y,z) (x+xdim12_accelerate_kernel*(y)+xdim12_accelerate_kernel*ydim12_accelerate_kernel*(z)) #define OPS_ACC13(x,y,z) (x+xdim13_accelerate_kernel*(y)+xdim13_accelerate_kernel*ydim13_accelerate_kernel*(z)) //user function __device__ void accelerate_kernel( const double *density0, const double *volume, double *stepbymass, const double *xvel0, double *xvel1, const double *xarea, const double *pressure, const double *yvel0, double *yvel1, const double *yarea, const double *viscosity, const double *zvel0, double *zvel1, const double *zarea) { double nodal_mass = 0.0; nodal_mass =(density0[OPS_ACC0(-1,-1, 0)] * volume[OPS_ACC1(-1,-1, 0)] + density0[OPS_ACC0( 0,-1, 0)] * volume[OPS_ACC1( 0,-1, 0)] + density0[OPS_ACC0( 0, 0, 0)] * volume[OPS_ACC1( 0, 0, 0)] + density0[OPS_ACC0(-1, 0, 0)] * volume[OPS_ACC1(-1, 0, 0)] + density0[OPS_ACC0(-1,-1,-1)] * volume[OPS_ACC1(-1,-1,-1)] + density0[OPS_ACC0( 0,-1,-1)] * volume[OPS_ACC1( 0,-1,-1)] + density0[OPS_ACC0( 0, 0,-1)] * volume[OPS_ACC1( 0, 0,-1)] + density0[OPS_ACC0(-1, 0,-1)] * volume[OPS_ACC1(-1, 0,-1)]) * 0.125; stepbymass[OPS_ACC2(0,0,0)] = 0.25*dt / nodal_mass; xvel1[OPS_ACC4(0,0,0)] = xvel0[OPS_ACC3(0,0,0)] - stepbymass[OPS_ACC2(0,0,0)] * ( xarea[OPS_ACC5(0,0,0)] * ( pressure[OPS_ACC6(0,0,0)] - pressure[OPS_ACC6(-1,0,0)] ) + xarea[OPS_ACC5(0,-1,0)] * ( pressure[OPS_ACC6(0,-1,0)] - pressure[OPS_ACC6(-1,-1,0)] ) + xarea[OPS_ACC5(0,0,-1)] * ( pressure[OPS_ACC6(0,0,-1)] - pressure[OPS_ACC6(-1,0,-1)] ) + xarea[OPS_ACC5(0,-1,-1)] * ( pressure[OPS_ACC6(0,-1,-1)] - pressure[OPS_ACC6(-1,-1,-1)] ) ); yvel1[OPS_ACC8(0,0,0)] = yvel0[OPS_ACC7(0,0,0)] - stepbymass[OPS_ACC2(0,0,0)] * ( yarea[OPS_ACC9(0,0,0)] * ( pressure[OPS_ACC6(0,0,0)] - pressure[OPS_ACC6(0,-1,0)] ) + yarea[OPS_ACC9(-1,0,0)] * ( pressure[OPS_ACC6(-1,0,0)] - pressure[OPS_ACC6(-1,-1,0)] ) + yarea[OPS_ACC9(0,0,-1)] * ( pressure[OPS_ACC6(0,0,-1)] - pressure[OPS_ACC6(0,-1,-1)] ) + yarea[OPS_ACC9(-1,0,-1)]* ( pressure[OPS_ACC6(-1,0,-1)] - pressure[OPS_ACC6(-1,-1,-1)] ) ); zvel1[OPS_ACC12(0,0,0)] = zvel0[OPS_ACC11(0,0,0)] - stepbymass[OPS_ACC2(0,0,0)] * ( zarea[OPS_ACC13(0,0,0)] * ( pressure[OPS_ACC6(0,0,0)] - pressure[OPS_ACC6(0,0,-1)] ) + zarea[OPS_ACC13(0,-1,0)] * ( pressure[OPS_ACC6(0,-1,0)] - pressure[OPS_ACC6(0,-1,-1)] ) + zarea[OPS_ACC13(-1,0,0)] * ( pressure[OPS_ACC6(-1,0,0)] - pressure[OPS_ACC6(-1,0,-1)] ) + zarea[OPS_ACC13(-1,-1,0)]* ( pressure[OPS_ACC6(-1,-1,0)] - pressure[OPS_ACC6(-1,-1,-1)] ) ); xvel1[OPS_ACC4(0,0,0)] = xvel1[OPS_ACC4(0,0,0)] - stepbymass[OPS_ACC2(0,0,0)] * ( xarea[OPS_ACC5(0,0,0)] * ( viscosity[OPS_ACC10(0,0,0)] - viscosity[OPS_ACC10(-1,0,0)] ) + xarea[OPS_ACC5(0,-1,0)] * ( viscosity[OPS_ACC10(0,-1,0)] - viscosity[OPS_ACC10(-1,-1,0)] ) + xarea[OPS_ACC5(0,0,-1)] * ( viscosity[OPS_ACC10(0,0,-1)] - viscosity[OPS_ACC10(-1,0,-1)] ) + xarea[OPS_ACC5(0,-1,-1)]* ( viscosity[OPS_ACC10(0,-1,-1)] - viscosity[OPS_ACC10(-1,-1,-1)] ) ); yvel1[OPS_ACC8(0,0,0)] = yvel1[OPS_ACC8(0,0,0)] - stepbymass[OPS_ACC2(0,0,0)] * ( yarea[OPS_ACC9(0,0,0)] * ( viscosity[OPS_ACC10(0,0,0)] - viscosity[OPS_ACC10(0,-1,0)] ) + yarea[OPS_ACC9(-1,0,0)] * ( viscosity[OPS_ACC10(-1,0,0)] - viscosity[OPS_ACC10(-1,-1,0)] ) + yarea[OPS_ACC9(0,0,-1)] * ( viscosity[OPS_ACC10(0,0,-1)] - viscosity[OPS_ACC10(0,-1,-1)] ) + yarea[OPS_ACC9(-1,0,-1)]* ( viscosity[OPS_ACC10(-1,0,-1)]- viscosity[OPS_ACC10(-1,-1,-1)] ) ); zvel1[OPS_ACC12(0,0,0)] = zvel1[OPS_ACC12(0,0,0)] - stepbymass[OPS_ACC2(0,0,0)] * ( zarea[OPS_ACC13(0,0,0)] * ( viscosity[OPS_ACC10(0,0,0)] - viscosity[OPS_ACC10(0,0,-1)] ) + zarea[OPS_ACC13(0,-1,0)] * ( viscosity[OPS_ACC10(0,-1,0)] - viscosity[OPS_ACC10(0,-1,-1)] ) + zarea[OPS_ACC13(-1,0,0)] * ( viscosity[OPS_ACC10(-1,0,0)] - viscosity[OPS_ACC10(-1,0,-1)] ) + zarea[OPS_ACC13(-1,-1,0)]* ( viscosity[OPS_ACC10(-1,-1,0)]- viscosity[OPS_ACC10(-1,-1,-1)] ) ); } #undef OPS_ACC0 #undef OPS_ACC1 #undef OPS_ACC2 #undef OPS_ACC3 #undef OPS_ACC4 #undef OPS_ACC5 #undef OPS_ACC6 #undef OPS_ACC7 #undef OPS_ACC8 #undef OPS_ACC9 #undef OPS_ACC10 #undef OPS_ACC11 #undef OPS_ACC12 #undef OPS_ACC13 __global__ void ops_accelerate_kernel( const double* __restrict arg0, const double* __restrict arg1, double* __restrict arg2, const double* __restrict arg3, double* __restrict arg4, const double* __restrict arg5, const double* __restrict arg6, const double* __restrict arg7, double* __restrict arg8, const double* __restrict arg9, const double* __restrict arg10, const double* __restrict arg11, double* __restrict arg12, const double* __restrict arg13, int size0, int size1, int size2 ){ int idx_z = blockDim.z * blockIdx.z + threadIdx.z; int idx_y = blockDim.y * blockIdx.y + threadIdx.y; int idx_x = blockDim.x * blockIdx.x + threadIdx.x; arg0 += idx_x * 1 + idx_y * 1 * xdim0_accelerate_kernel + idx_z * 1 * xdim0_accelerate_kernel * ydim0_accelerate_kernel; arg1 += idx_x * 1 + idx_y * 1 * xdim1_accelerate_kernel + idx_z * 1 * xdim1_accelerate_kernel * ydim1_accelerate_kernel; arg2 += idx_x * 1 + idx_y * 1 * xdim2_accelerate_kernel + idx_z * 1 * xdim2_accelerate_kernel * ydim2_accelerate_kernel; arg3 += idx_x * 1 + idx_y * 1 * xdim3_accelerate_kernel + idx_z * 1 * xdim3_accelerate_kernel * ydim3_accelerate_kernel; arg4 += idx_x * 1 + idx_y * 1 * xdim4_accelerate_kernel + idx_z * 1 * xdim4_accelerate_kernel * ydim4_accelerate_kernel; arg5 += idx_x * 1 + idx_y * 1 * xdim5_accelerate_kernel + idx_z * 1 * xdim5_accelerate_kernel * ydim5_accelerate_kernel; arg6 += idx_x * 1 + idx_y * 1 * xdim6_accelerate_kernel + idx_z * 1 * xdim6_accelerate_kernel * ydim6_accelerate_kernel; arg7 += idx_x * 1 + idx_y * 1 * xdim7_accelerate_kernel + idx_z * 1 * xdim7_accelerate_kernel * ydim7_accelerate_kernel; arg8 += idx_x * 1 + idx_y * 1 * xdim8_accelerate_kernel + idx_z * 1 * xdim8_accelerate_kernel * ydim8_accelerate_kernel; arg9 += idx_x * 1 + idx_y * 1 * xdim9_accelerate_kernel + idx_z * 1 * xdim9_accelerate_kernel * ydim9_accelerate_kernel; arg10 += idx_x * 1 + idx_y * 1 * xdim10_accelerate_kernel + idx_z * 1 * xdim10_accelerate_kernel * ydim10_accelerate_kernel; arg11 += idx_x * 1 + idx_y * 1 * xdim11_accelerate_kernel + idx_z * 1 * xdim11_accelerate_kernel * ydim11_accelerate_kernel; arg12 += idx_x * 1 + idx_y * 1 * xdim12_accelerate_kernel + idx_z * 1 * xdim12_accelerate_kernel * ydim12_accelerate_kernel; arg13 += idx_x * 1 + idx_y * 1 * xdim13_accelerate_kernel + idx_z * 1 * xdim13_accelerate_kernel * ydim13_accelerate_kernel; if (idx_x < size0 && idx_y < size1 && idx_z < size2) { accelerate_kernel(arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8, arg9, arg10, arg11, arg12, arg13); } } // host stub function void ops_par_loop_accelerate_kernel(char const *name, ops_block block, int dim, int* range, ops_arg arg0, ops_arg arg1, ops_arg arg2, ops_arg arg3, ops_arg arg4, ops_arg arg5, ops_arg arg6, ops_arg arg7, ops_arg arg8, ops_arg arg9, ops_arg arg10, ops_arg arg11, ops_arg arg12, ops_arg arg13) { ops_arg args[14] = { arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8, arg9, arg10, arg11, arg12, arg13}; ops_timing_realloc(1,"accelerate_kernel"); OPS_kernels[1].count++; //compute locally allocated range for the sub-block int start[3]; int end[3]; #ifdef OPS_MPI sub_block_list sb = OPS_sub_block_list[block->index]; if (!sb->owned) return; for ( int n=0; n<3; n++ ){ start[n] = sb->decomp_disp[n];end[n] = sb->decomp_disp[n]+sb->decomp_size[n]; if (start[n] >= range[2*n]) { start[n] = 0; } else { start[n] = range[2*n] - start[n]; } if (sb->id_m[n]==MPI_PROC_NULL && range[2*n] < 0) start[n] = range[2*n]; if (end[n] >= range[2*n+1]) { end[n] = range[2*n+1] - sb->decomp_disp[n]; } else { end[n] = sb->decomp_size[n]; } if (sb->id_p[n]==MPI_PROC_NULL && (range[2*n+1] > sb->decomp_disp[n]+sb->decomp_size[n])) end[n] += (range[2*n+1]-sb->decomp_disp[n]-sb->decomp_size[n]); } #else //OPS_MPI for ( int n=0; n<3; n++ ){ start[n] = range[2*n];end[n] = range[2*n+1]; } #endif //OPS_MPI int x_size = MAX(0,end[0]-start[0]); int y_size = MAX(0,end[1]-start[1]); int z_size = MAX(0,end[2]-start[2]); int xdim0 = args[0].dat->size[0]*args[0].dat->dim; int ydim0 = args[0].dat->size[1]; int xdim1 = args[1].dat->size[0]*args[1].dat->dim; int ydim1 = args[1].dat->size[1]; int xdim2 = args[2].dat->size[0]*args[2].dat->dim; int ydim2 = args[2].dat->size[1]; int xdim3 = args[3].dat->size[0]*args[3].dat->dim; int ydim3 = args[3].dat->size[1]; int xdim4 = args[4].dat->size[0]*args[4].dat->dim; int ydim4 = args[4].dat->size[1]; int xdim5 = args[5].dat->size[0]*args[5].dat->dim; int ydim5 = args[5].dat->size[1]; int xdim6 = args[6].dat->size[0]*args[6].dat->dim; int ydim6 = args[6].dat->size[1]; int xdim7 = args[7].dat->size[0]*args[7].dat->dim; int ydim7 = args[7].dat->size[1]; int xdim8 = args[8].dat->size[0]*args[8].dat->dim; int ydim8 = args[8].dat->size[1]; int xdim9 = args[9].dat->size[0]*args[9].dat->dim; int ydim9 = args[9].dat->size[1]; int xdim10 = args[10].dat->size[0]*args[10].dat->dim; int ydim10 = args[10].dat->size[1]; int xdim11 = args[11].dat->size[0]*args[11].dat->dim; int ydim11 = args[11].dat->size[1]; int xdim12 = args[12].dat->size[0]*args[12].dat->dim; int ydim12 = args[12].dat->size[1]; int xdim13 = args[13].dat->size[0]*args[13].dat->dim; int ydim13 = args[13].dat->size[1]; //Timing double t1,t2,c1,c2; ops_timers_core(&c2,&t2); if (xdim0 != xdim0_accelerate_kernel_h || ydim0 != ydim0_accelerate_kernel_h || xdim1 != xdim1_accelerate_kernel_h || ydim1 != ydim1_accelerate_kernel_h || xdim2 != xdim2_accelerate_kernel_h || ydim2 != ydim2_accelerate_kernel_h || xdim3 != xdim3_accelerate_kernel_h || ydim3 != ydim3_accelerate_kernel_h || xdim4 != xdim4_accelerate_kernel_h || ydim4 != ydim4_accelerate_kernel_h || xdim5 != xdim5_accelerate_kernel_h || ydim5 != ydim5_accelerate_kernel_h || xdim6 != xdim6_accelerate_kernel_h || ydim6 != ydim6_accelerate_kernel_h || xdim7 != xdim7_accelerate_kernel_h || ydim7 != ydim7_accelerate_kernel_h || xdim8 != xdim8_accelerate_kernel_h || ydim8 != ydim8_accelerate_kernel_h || xdim9 != xdim9_accelerate_kernel_h || ydim9 != ydim9_accelerate_kernel_h || xdim10 != xdim10_accelerate_kernel_h || ydim10 != ydim10_accelerate_kernel_h || xdim11 != xdim11_accelerate_kernel_h || ydim11 != ydim11_accelerate_kernel_h || xdim12 != xdim12_accelerate_kernel_h || ydim12 != ydim12_accelerate_kernel_h || xdim13 != xdim13_accelerate_kernel_h || ydim13 != ydim13_accelerate_kernel_h) { cudaMemcpyToSymbol( xdim0_accelerate_kernel, &xdim0, sizeof(int) ); xdim0_accelerate_kernel_h = xdim0; cudaMemcpyToSymbol( ydim0_accelerate_kernel, &ydim0, sizeof(int) ); ydim0_accelerate_kernel_h = ydim0; cudaMemcpyToSymbol( xdim1_accelerate_kernel, &xdim1, sizeof(int) ); xdim1_accelerate_kernel_h = xdim1; cudaMemcpyToSymbol( ydim1_accelerate_kernel, &ydim1, sizeof(int) ); ydim1_accelerate_kernel_h = ydim1; cudaMemcpyToSymbol( xdim2_accelerate_kernel, &xdim2, sizeof(int) ); xdim2_accelerate_kernel_h = xdim2; cudaMemcpyToSymbol( ydim2_accelerate_kernel, &ydim2, sizeof(int) ); ydim2_accelerate_kernel_h = ydim2; cudaMemcpyToSymbol( xdim3_accelerate_kernel, &xdim3, sizeof(int) ); xdim3_accelerate_kernel_h = xdim3; cudaMemcpyToSymbol( ydim3_accelerate_kernel, &ydim3, sizeof(int) ); ydim3_accelerate_kernel_h = ydim3; cudaMemcpyToSymbol( xdim4_accelerate_kernel, &xdim4, sizeof(int) ); xdim4_accelerate_kernel_h = xdim4; cudaMemcpyToSymbol( ydim4_accelerate_kernel, &ydim4, sizeof(int) ); ydim4_accelerate_kernel_h = ydim4; cudaMemcpyToSymbol( xdim5_accelerate_kernel, &xdim5, sizeof(int) ); xdim5_accelerate_kernel_h = xdim5; cudaMemcpyToSymbol( ydim5_accelerate_kernel, &ydim5, sizeof(int) ); ydim5_accelerate_kernel_h = ydim5; cudaMemcpyToSymbol( xdim6_accelerate_kernel, &xdim6, sizeof(int) ); xdim6_accelerate_kernel_h = xdim6; cudaMemcpyToSymbol( ydim6_accelerate_kernel, &ydim6, sizeof(int) ); ydim6_accelerate_kernel_h = ydim6; cudaMemcpyToSymbol( xdim7_accelerate_kernel, &xdim7, sizeof(int) ); xdim7_accelerate_kernel_h = xdim7; cudaMemcpyToSymbol( ydim7_accelerate_kernel, &ydim7, sizeof(int) ); ydim7_accelerate_kernel_h = ydim7; cudaMemcpyToSymbol( xdim8_accelerate_kernel, &xdim8, sizeof(int) ); xdim8_accelerate_kernel_h = xdim8; cudaMemcpyToSymbol( ydim8_accelerate_kernel, &ydim8, sizeof(int) ); ydim8_accelerate_kernel_h = ydim8; cudaMemcpyToSymbol( xdim9_accelerate_kernel, &xdim9, sizeof(int) ); xdim9_accelerate_kernel_h = xdim9; cudaMemcpyToSymbol( ydim9_accelerate_kernel, &ydim9, sizeof(int) ); ydim9_accelerate_kernel_h = ydim9; cudaMemcpyToSymbol( xdim10_accelerate_kernel, &xdim10, sizeof(int) ); xdim10_accelerate_kernel_h = xdim10; cudaMemcpyToSymbol( ydim10_accelerate_kernel, &ydim10, sizeof(int) ); ydim10_accelerate_kernel_h = ydim10; cudaMemcpyToSymbol( xdim11_accelerate_kernel, &xdim11, sizeof(int) ); xdim11_accelerate_kernel_h = xdim11; cudaMemcpyToSymbol( ydim11_accelerate_kernel, &ydim11, sizeof(int) ); ydim11_accelerate_kernel_h = ydim11; cudaMemcpyToSymbol( xdim12_accelerate_kernel, &xdim12, sizeof(int) ); xdim12_accelerate_kernel_h = xdim12; cudaMemcpyToSymbol( ydim12_accelerate_kernel, &ydim12, sizeof(int) ); ydim12_accelerate_kernel_h = ydim12; cudaMemcpyToSymbol( xdim13_accelerate_kernel, &xdim13, sizeof(int) ); xdim13_accelerate_kernel_h = xdim13; cudaMemcpyToSymbol( ydim13_accelerate_kernel, &ydim13, sizeof(int) ); ydim13_accelerate_kernel_h = ydim13; } dim3 grid( (x_size-1)/OPS_block_size_x+ 1, (y_size-1)/OPS_block_size_y + 1, z_size); dim3 tblock(OPS_block_size_x,OPS_block_size_y,1); int dat0 = args[0].dat->elem_size; int dat1 = args[1].dat->elem_size; int dat2 = args[2].dat->elem_size; int dat3 = args[3].dat->elem_size; int dat4 = args[4].dat->elem_size; int dat5 = args[5].dat->elem_size; int dat6 = args[6].dat->elem_size; int dat7 = args[7].dat->elem_size; int dat8 = args[8].dat->elem_size; int dat9 = args[9].dat->elem_size; int dat10 = args[10].dat->elem_size; int dat11 = args[11].dat->elem_size; int dat12 = args[12].dat->elem_size; int dat13 = args[13].dat->elem_size; char *p_a[14]; //set up initial pointers int d_m[OPS_MAX_DIM]; #ifdef OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[0].dat->d_m[d] + OPS_sub_dat_list[args[0].dat->index]->d_im[d]; #else //OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[0].dat->d_m[d]; #endif //OPS_MPI int base0 = dat0 * 1 * (start[0] * args[0].stencil->stride[0] - args[0].dat->base[0] - d_m[0]); base0 = base0+ dat0 * args[0].dat->size[0] * (start[1] * args[0].stencil->stride[1] - args[0].dat->base[1] - d_m[1]); base0 = base0+ dat0 * args[0].dat->size[0] * args[0].dat->size[1] * (start[2] * args[0].stencil->stride[2] - args[0].dat->base[2] - d_m[2]); p_a[0] = (char *)args[0].data_d + base0; #ifdef OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[1].dat->d_m[d] + OPS_sub_dat_list[args[1].dat->index]->d_im[d]; #else //OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[1].dat->d_m[d]; #endif //OPS_MPI int base1 = dat1 * 1 * (start[0] * args[1].stencil->stride[0] - args[1].dat->base[0] - d_m[0]); base1 = base1+ dat1 * args[1].dat->size[0] * (start[1] * args[1].stencil->stride[1] - args[1].dat->base[1] - d_m[1]); base1 = base1+ dat1 * args[1].dat->size[0] * args[1].dat->size[1] * (start[2] * args[1].stencil->stride[2] - args[1].dat->base[2] - d_m[2]); p_a[1] = (char *)args[1].data_d + base1; #ifdef OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[2].dat->d_m[d] + OPS_sub_dat_list[args[2].dat->index]->d_im[d]; #else //OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[2].dat->d_m[d]; #endif //OPS_MPI int base2 = dat2 * 1 * (start[0] * args[2].stencil->stride[0] - args[2].dat->base[0] - d_m[0]); base2 = base2+ dat2 * args[2].dat->size[0] * (start[1] * args[2].stencil->stride[1] - args[2].dat->base[1] - d_m[1]); base2 = base2+ dat2 * args[2].dat->size[0] * args[2].dat->size[1] * (start[2] * args[2].stencil->stride[2] - args[2].dat->base[2] - d_m[2]); p_a[2] = (char *)args[2].data_d + base2; #ifdef OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[3].dat->d_m[d] + OPS_sub_dat_list[args[3].dat->index]->d_im[d]; #else //OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[3].dat->d_m[d]; #endif //OPS_MPI int base3 = dat3 * 1 * (start[0] * args[3].stencil->stride[0] - args[3].dat->base[0] - d_m[0]); base3 = base3+ dat3 * args[3].dat->size[0] * (start[1] * args[3].stencil->stride[1] - args[3].dat->base[1] - d_m[1]); base3 = base3+ dat3 * args[3].dat->size[0] * args[3].dat->size[1] * (start[2] * args[3].stencil->stride[2] - args[3].dat->base[2] - d_m[2]); p_a[3] = (char *)args[3].data_d + base3; #ifdef OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[4].dat->d_m[d] + OPS_sub_dat_list[args[4].dat->index]->d_im[d]; #else //OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[4].dat->d_m[d]; #endif //OPS_MPI int base4 = dat4 * 1 * (start[0] * args[4].stencil->stride[0] - args[4].dat->base[0] - d_m[0]); base4 = base4+ dat4 * args[4].dat->size[0] * (start[1] * args[4].stencil->stride[1] - args[4].dat->base[1] - d_m[1]); base4 = base4+ dat4 * args[4].dat->size[0] * args[4].dat->size[1] * (start[2] * args[4].stencil->stride[2] - args[4].dat->base[2] - d_m[2]); p_a[4] = (char *)args[4].data_d + base4; #ifdef OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[5].dat->d_m[d] + OPS_sub_dat_list[args[5].dat->index]->d_im[d]; #else //OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[5].dat->d_m[d]; #endif //OPS_MPI int base5 = dat5 * 1 * (start[0] * args[5].stencil->stride[0] - args[5].dat->base[0] - d_m[0]); base5 = base5+ dat5 * args[5].dat->size[0] * (start[1] * args[5].stencil->stride[1] - args[5].dat->base[1] - d_m[1]); base5 = base5+ dat5 * args[5].dat->size[0] * args[5].dat->size[1] * (start[2] * args[5].stencil->stride[2] - args[5].dat->base[2] - d_m[2]); p_a[5] = (char *)args[5].data_d + base5; #ifdef OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[6].dat->d_m[d] + OPS_sub_dat_list[args[6].dat->index]->d_im[d]; #else //OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[6].dat->d_m[d]; #endif //OPS_MPI int base6 = dat6 * 1 * (start[0] * args[6].stencil->stride[0] - args[6].dat->base[0] - d_m[0]); base6 = base6+ dat6 * args[6].dat->size[0] * (start[1] * args[6].stencil->stride[1] - args[6].dat->base[1] - d_m[1]); base6 = base6+ dat6 * args[6].dat->size[0] * args[6].dat->size[1] * (start[2] * args[6].stencil->stride[2] - args[6].dat->base[2] - d_m[2]); p_a[6] = (char *)args[6].data_d + base6; #ifdef OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[7].dat->d_m[d] + OPS_sub_dat_list[args[7].dat->index]->d_im[d]; #else //OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[7].dat->d_m[d]; #endif //OPS_MPI int base7 = dat7 * 1 * (start[0] * args[7].stencil->stride[0] - args[7].dat->base[0] - d_m[0]); base7 = base7+ dat7 * args[7].dat->size[0] * (start[1] * args[7].stencil->stride[1] - args[7].dat->base[1] - d_m[1]); base7 = base7+ dat7 * args[7].dat->size[0] * args[7].dat->size[1] * (start[2] * args[7].stencil->stride[2] - args[7].dat->base[2] - d_m[2]); p_a[7] = (char *)args[7].data_d + base7; #ifdef OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[8].dat->d_m[d] + OPS_sub_dat_list[args[8].dat->index]->d_im[d]; #else //OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[8].dat->d_m[d]; #endif //OPS_MPI int base8 = dat8 * 1 * (start[0] * args[8].stencil->stride[0] - args[8].dat->base[0] - d_m[0]); base8 = base8+ dat8 * args[8].dat->size[0] * (start[1] * args[8].stencil->stride[1] - args[8].dat->base[1] - d_m[1]); base8 = base8+ dat8 * args[8].dat->size[0] * args[8].dat->size[1] * (start[2] * args[8].stencil->stride[2] - args[8].dat->base[2] - d_m[2]); p_a[8] = (char *)args[8].data_d + base8; #ifdef OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[9].dat->d_m[d] + OPS_sub_dat_list[args[9].dat->index]->d_im[d]; #else //OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[9].dat->d_m[d]; #endif //OPS_MPI int base9 = dat9 * 1 * (start[0] * args[9].stencil->stride[0] - args[9].dat->base[0] - d_m[0]); base9 = base9+ dat9 * args[9].dat->size[0] * (start[1] * args[9].stencil->stride[1] - args[9].dat->base[1] - d_m[1]); base9 = base9+ dat9 * args[9].dat->size[0] * args[9].dat->size[1] * (start[2] * args[9].stencil->stride[2] - args[9].dat->base[2] - d_m[2]); p_a[9] = (char *)args[9].data_d + base9; #ifdef OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[10].dat->d_m[d] + OPS_sub_dat_list[args[10].dat->index]->d_im[d]; #else //OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[10].dat->d_m[d]; #endif //OPS_MPI int base10 = dat10 * 1 * (start[0] * args[10].stencil->stride[0] - args[10].dat->base[0] - d_m[0]); base10 = base10+ dat10 * args[10].dat->size[0] * (start[1] * args[10].stencil->stride[1] - args[10].dat->base[1] - d_m[1]); base10 = base10+ dat10 * args[10].dat->size[0] * args[10].dat->size[1] * (start[2] * args[10].stencil->stride[2] - args[10].dat->base[2] - d_m[2]); p_a[10] = (char *)args[10].data_d + base10; #ifdef OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[11].dat->d_m[d] + OPS_sub_dat_list[args[11].dat->index]->d_im[d]; #else //OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[11].dat->d_m[d]; #endif //OPS_MPI int base11 = dat11 * 1 * (start[0] * args[11].stencil->stride[0] - args[11].dat->base[0] - d_m[0]); base11 = base11+ dat11 * args[11].dat->size[0] * (start[1] * args[11].stencil->stride[1] - args[11].dat->base[1] - d_m[1]); base11 = base11+ dat11 * args[11].dat->size[0] * args[11].dat->size[1] * (start[2] * args[11].stencil->stride[2] - args[11].dat->base[2] - d_m[2]); p_a[11] = (char *)args[11].data_d + base11; #ifdef OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[12].dat->d_m[d] + OPS_sub_dat_list[args[12].dat->index]->d_im[d]; #else //OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[12].dat->d_m[d]; #endif //OPS_MPI int base12 = dat12 * 1 * (start[0] * args[12].stencil->stride[0] - args[12].dat->base[0] - d_m[0]); base12 = base12+ dat12 * args[12].dat->size[0] * (start[1] * args[12].stencil->stride[1] - args[12].dat->base[1] - d_m[1]); base12 = base12+ dat12 * args[12].dat->size[0] * args[12].dat->size[1] * (start[2] * args[12].stencil->stride[2] - args[12].dat->base[2] - d_m[2]); p_a[12] = (char *)args[12].data_d + base12; #ifdef OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[13].dat->d_m[d] + OPS_sub_dat_list[args[13].dat->index]->d_im[d]; #else //OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[13].dat->d_m[d]; #endif //OPS_MPI int base13 = dat13 * 1 * (start[0] * args[13].stencil->stride[0] - args[13].dat->base[0] - d_m[0]); base13 = base13+ dat13 * args[13].dat->size[0] * (start[1] * args[13].stencil->stride[1] - args[13].dat->base[1] - d_m[1]); base13 = base13+ dat13 * args[13].dat->size[0] * args[13].dat->size[1] * (start[2] * args[13].stencil->stride[2] - args[13].dat->base[2] - d_m[2]); p_a[13] = (char *)args[13].data_d + base13; ops_H_D_exchanges_device(args, 14); ops_halo_exchanges(args,14,range); ops_timers_core(&c1,&t1); OPS_kernels[1].mpi_time += t1-t2; //call kernel wrapper function, passing in pointers to data ops_accelerate_kernel<<<grid, tblock >>> ( (double *)p_a[0], (double *)p_a[1], (double *)p_a[2], (double *)p_a[3], (double *)p_a[4], (double *)p_a[5], (double *)p_a[6], (double *)p_a[7], (double *)p_a[8], (double *)p_a[9], (double *)p_a[10], (double *)p_a[11], (double *)p_a[12], (double *)p_a[13],x_size, y_size, z_size); if (OPS_diags>1) { cutilSafeCall(cudaDeviceSynchronize()); } ops_timers_core(&c2,&t2); OPS_kernels[1].time += t2-t1; ops_set_dirtybit_device(args, 14); ops_set_halo_dirtybit3(&args[2],range); ops_set_halo_dirtybit3(&args[4],range); ops_set_halo_dirtybit3(&args[8],range); ops_set_halo_dirtybit3(&args[12],range); //Update kernel record OPS_kernels[1].transfer += ops_compute_transfer(dim, range, &arg0); OPS_kernels[1].transfer += ops_compute_transfer(dim, range, &arg1); OPS_kernels[1].transfer += ops_compute_transfer(dim, range, &arg2); OPS_kernels[1].transfer += ops_compute_transfer(dim, range, &arg3); OPS_kernels[1].transfer += ops_compute_transfer(dim, range, &arg4); OPS_kernels[1].transfer += ops_compute_transfer(dim, range, &arg5); OPS_kernels[1].transfer += ops_compute_transfer(dim, range, &arg6); OPS_kernels[1].transfer += ops_compute_transfer(dim, range, &arg7); OPS_kernels[1].transfer += ops_compute_transfer(dim, range, &arg8); OPS_kernels[1].transfer += ops_compute_transfer(dim, range, &arg9); OPS_kernels[1].transfer += ops_compute_transfer(dim, range, &arg10); OPS_kernels[1].transfer += ops_compute_transfer(dim, range, &arg11); OPS_kernels[1].transfer += ops_compute_transfer(dim, range, &arg12); OPS_kernels[1].transfer += ops_compute_transfer(dim, range, &arg13); }
7de1d79dab4b256c7cd602cb2dbfc4966287e123.hip
// !!! This is a file automatically generated by hipify!!! /* * Copyright (c) 2019-2020, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <cmath> #include <cuml/cuml.hpp> #include <cuml/ensemble/randomforest.hpp> #include <utility> #include "benchmark.cuh" namespace ML { namespace Bench { namespace rf { struct Params { DatasetParams data; BlobsParams blobs; RF_params rf; }; template <typename D> struct RFClassifierModel {}; template <> struct RFClassifierModel<float> { ML::RandomForestClassifierF model; }; template <> struct RFClassifierModel<double> { ML::RandomForestClassifierD model; }; template <typename D> class RFClassifier : public BlobsFixture<D> { public: RFClassifier(const std::string& name, const Params& p) : BlobsFixture<D>(name, p.data, p.blobs), rfParams(p.rf) {} protected: void runBenchmark(::benchmark::State& state) override { using MLCommon::Bench::CudaEventTimer; if (this->params.rowMajor) { state.SkipWithError("RFClassifier only supports col-major inputs"); } this->loopOnState(state, [this]() { auto* mPtr = &model.model; mPtr->trees = nullptr; fit(*this->handle, mPtr, this->data.X, this->params.nrows, this->params.ncols, this->data.y, this->params.nclasses, rfParams); CUDA_CHECK(hipStreamSynchronize(this->stream)); }); } private: RFClassifierModel<D> model; RF_params rfParams; }; template <typename D> std::vector<Params> getInputs() { struct Triplets { int nrows, ncols, nclasses; }; std::vector<Params> out; Params p; p.data.rowMajor = false; p.blobs = {10.0, // cluster_std false, // shuffle -10.0, // center_box_min 10.0, // center_box_max 2152953ULL}; //seed set_rf_params(p.rf, // Output RF parameters 500, // n_trees true, // bootstrap 1.f, // max_samples 1234, // seed 8); // n_streams set_tree_params(p.rf.tree_params, // Output tree parameters 10, // max_depth, this is anyway changed below (1 << 20), // max_leaves 0.3, // max_features, just a placeholder value, // anyway changed below 32, // n_bins 1, // split_algo 3, // min_samples_leaf 3, // min_samples_split 0.0f, // min_impurity_decrease true, // bootstrap_features ML::CRITERION::GINI, // split_criterion false, // quantile_per_tree false, // use_experimental_backend 128); // max_batch_size std::vector<Triplets> rowcols = { {160000, 64, 2}, {640000, 64, 8}, {1184000, 968, 2}, // Mimicking Bosch dataset }; for (auto& rc : rowcols) { // Let's run Bosch only for float type if (!std::is_same<D, float>::value && rc.ncols == 968) continue; p.data.nrows = rc.nrows; p.data.ncols = rc.ncols; p.data.nclasses = rc.nclasses; p.rf.tree_params.max_features = 1.f / std::sqrt(float(rc.ncols)); for (auto max_depth : std::vector<int>({7, 9})) { p.rf.tree_params.max_depth = max_depth; out.push_back(p); } } return out; } ML_BENCH_REGISTER(Params, RFClassifier<float>, "blobs", getInputs<float>()); ML_BENCH_REGISTER(Params, RFClassifier<double>, "blobs", getInputs<double>()); } // end namespace rf } // end namespace Bench } // end namespace ML
7de1d79dab4b256c7cd602cb2dbfc4966287e123.cu
/* * Copyright (c) 2019-2020, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <cmath> #include <cuml/cuml.hpp> #include <cuml/ensemble/randomforest.hpp> #include <utility> #include "benchmark.cuh" namespace ML { namespace Bench { namespace rf { struct Params { DatasetParams data; BlobsParams blobs; RF_params rf; }; template <typename D> struct RFClassifierModel {}; template <> struct RFClassifierModel<float> { ML::RandomForestClassifierF model; }; template <> struct RFClassifierModel<double> { ML::RandomForestClassifierD model; }; template <typename D> class RFClassifier : public BlobsFixture<D> { public: RFClassifier(const std::string& name, const Params& p) : BlobsFixture<D>(name, p.data, p.blobs), rfParams(p.rf) {} protected: void runBenchmark(::benchmark::State& state) override { using MLCommon::Bench::CudaEventTimer; if (this->params.rowMajor) { state.SkipWithError("RFClassifier only supports col-major inputs"); } this->loopOnState(state, [this]() { auto* mPtr = &model.model; mPtr->trees = nullptr; fit(*this->handle, mPtr, this->data.X, this->params.nrows, this->params.ncols, this->data.y, this->params.nclasses, rfParams); CUDA_CHECK(cudaStreamSynchronize(this->stream)); }); } private: RFClassifierModel<D> model; RF_params rfParams; }; template <typename D> std::vector<Params> getInputs() { struct Triplets { int nrows, ncols, nclasses; }; std::vector<Params> out; Params p; p.data.rowMajor = false; p.blobs = {10.0, // cluster_std false, // shuffle -10.0, // center_box_min 10.0, // center_box_max 2152953ULL}; //seed set_rf_params(p.rf, // Output RF parameters 500, // n_trees true, // bootstrap 1.f, // max_samples 1234, // seed 8); // n_streams set_tree_params(p.rf.tree_params, // Output tree parameters 10, // max_depth, this is anyway changed below (1 << 20), // max_leaves 0.3, // max_features, just a placeholder value, // anyway changed below 32, // n_bins 1, // split_algo 3, // min_samples_leaf 3, // min_samples_split 0.0f, // min_impurity_decrease true, // bootstrap_features ML::CRITERION::GINI, // split_criterion false, // quantile_per_tree false, // use_experimental_backend 128); // max_batch_size std::vector<Triplets> rowcols = { {160000, 64, 2}, {640000, 64, 8}, {1184000, 968, 2}, // Mimicking Bosch dataset }; for (auto& rc : rowcols) { // Let's run Bosch only for float type if (!std::is_same<D, float>::value && rc.ncols == 968) continue; p.data.nrows = rc.nrows; p.data.ncols = rc.ncols; p.data.nclasses = rc.nclasses; p.rf.tree_params.max_features = 1.f / std::sqrt(float(rc.ncols)); for (auto max_depth : std::vector<int>({7, 9})) { p.rf.tree_params.max_depth = max_depth; out.push_back(p); } } return out; } ML_BENCH_REGISTER(Params, RFClassifier<float>, "blobs", getInputs<float>()); ML_BENCH_REGISTER(Params, RFClassifier<double>, "blobs", getInputs<double>()); } // end namespace rf } // end namespace Bench } // end namespace ML
d2e610a5b58f1db76ea97453a35922428e2eab86.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <cstdio> static const int NUM_THREADS = 128; static const int MAX_BLOCKS = 32768; __global__ void picoIntIntSorterComputeA(const int blocksSoFar, const int * const inputKeys, int * const uniqueFlags, const int numKeys) { const int index = (blocksSoFar + blockIdx.x) * blockDim.x + threadIdx.x; if (index == 0) uniqueFlags[index] = 1; else if (inputKeys[index] != inputKeys[index - 1]) uniqueFlags[index] = 1; else uniqueFlags[index] = 0; } __global__ void picoIntIntSorterComputeC(const int * const gpuA, const int * const gpuB, int * const gpuC, const int numKeys) { __shared__ int b0; if (threadIdx.x == 0) b0 = gpuB[0]; __syncthreads(); for (int index = blockIdx.x * blockDim.x + threadIdx.x; index < numKeys; index += gridDim.x * blockDim.x) { if (gpuA[index]) gpuC[b0 - gpuB[index]] = index; } } __global__ void picoIntIntSorterComputeD(const int blocksSoFar, const int * const gpuC, int * const gpuD, const int numUniqueKeys, const int numKeys) { const int index = (blocksSoFar + blockIdx.x) * blockDim.x + threadIdx.x; if (index == numUniqueKeys - 1) gpuD[index] = numKeys - gpuC[index]; else gpuD[index] = gpuC[index + 1] - gpuC[index]; } __global__ void picoIntIntSorterSetCompactedKeysKernel(const int blocksSoFar, const int * const keys, const int * const input, int * const output, const int numKeys) { const int index = (blocksSoFar + blockIdx.x) * blockDim.x + threadIdx.x; if (index < numKeys) output[index] = keys[input[index]]; } void picoIntIntSorterMarkUnique(const void * const gpuInputKeys, void * const gpuUniqueFlags, const int numKeys) { const int NUM_BLOCKS = (numKeys + NUM_THREADS - 1) / NUM_THREADS; int blocksSoFar = 0; int numBlocks; while (blocksSoFar < NUM_BLOCKS) { numBlocks = (NUM_BLOCKS - blocksSoFar > MAX_BLOCKS ? MAX_BLOCKS : NUM_BLOCKS - blocksSoFar); hipLaunchKernelGGL(( picoIntIntSorterComputeA), dim3(numBlocks), dim3(NUM_THREADS), 0, 0, blocksSoFar, reinterpret_cast<const int * >(gpuInputKeys), reinterpret_cast<int * >(gpuUniqueFlags), numKeys); blocksSoFar += numBlocks; } } void picoIntIntSorterFindOffsets(const void * const gpuKeys, const void * const gpuA, const void * const gpuB, void * const gpuC, void * const gpuD, const int numKeys, const int numUniqueKeys) { const int NUM_BLOCKS_1 = 60; const int NUM_BLOCKS = (numUniqueKeys + NUM_THREADS - 1) / NUM_THREADS; int blocksSoFar = 0; int numBlocks; hipLaunchKernelGGL(( picoIntIntSorterComputeC), dim3(NUM_BLOCKS_1), dim3(NUM_THREADS), 0, 0, reinterpret_cast<const int * >(gpuA), reinterpret_cast<const int * >(gpuB), reinterpret_cast<int * >(gpuC), numKeys); while (blocksSoFar < NUM_BLOCKS) { numBlocks = (NUM_BLOCKS - blocksSoFar > MAX_BLOCKS ? MAX_BLOCKS : NUM_BLOCKS - blocksSoFar); hipLaunchKernelGGL(( picoIntIntSorterComputeD), dim3(numBlocks), dim3(NUM_THREADS), 0, 0, blocksSoFar, reinterpret_cast<const int * >(gpuC), reinterpret_cast<int * >(gpuD), numUniqueKeys, numKeys); blocksSoFar += numBlocks; } } void picoIntIntSorterSetCompactedKeys(const void * const gpuKeys, const void * const gpuInput, void * const gpuOutput, const int numUniqueKeys) { const int NUM_BLOCKS = (numUniqueKeys + NUM_THREADS - 1) / NUM_THREADS; int blocksSoFar = 0; int numBlocks; while (blocksSoFar < NUM_BLOCKS) { numBlocks = (NUM_BLOCKS - blocksSoFar > MAX_BLOCKS ? MAX_BLOCKS : NUM_BLOCKS - blocksSoFar); hipLaunchKernelGGL(( picoIntIntSorterSetCompactedKeysKernel), dim3(numBlocks), dim3(NUM_THREADS), 0, 0, blocksSoFar, reinterpret_cast<const int * >(gpuKeys), reinterpret_cast<const int * >(gpuInput), reinterpret_cast< int * >(gpuOutput), numUniqueKeys); blocksSoFar += numBlocks; } }
d2e610a5b58f1db76ea97453a35922428e2eab86.cu
#include <cstdio> static const int NUM_THREADS = 128; static const int MAX_BLOCKS = 32768; __global__ void picoIntIntSorterComputeA(const int blocksSoFar, const int * const inputKeys, int * const uniqueFlags, const int numKeys) { const int index = (blocksSoFar + blockIdx.x) * blockDim.x + threadIdx.x; if (index == 0) uniqueFlags[index] = 1; else if (inputKeys[index] != inputKeys[index - 1]) uniqueFlags[index] = 1; else uniqueFlags[index] = 0; } __global__ void picoIntIntSorterComputeC(const int * const gpuA, const int * const gpuB, int * const gpuC, const int numKeys) { __shared__ int b0; if (threadIdx.x == 0) b0 = gpuB[0]; __syncthreads(); for (int index = blockIdx.x * blockDim.x + threadIdx.x; index < numKeys; index += gridDim.x * blockDim.x) { if (gpuA[index]) gpuC[b0 - gpuB[index]] = index; } } __global__ void picoIntIntSorterComputeD(const int blocksSoFar, const int * const gpuC, int * const gpuD, const int numUniqueKeys, const int numKeys) { const int index = (blocksSoFar + blockIdx.x) * blockDim.x + threadIdx.x; if (index == numUniqueKeys - 1) gpuD[index] = numKeys - gpuC[index]; else gpuD[index] = gpuC[index + 1] - gpuC[index]; } __global__ void picoIntIntSorterSetCompactedKeysKernel(const int blocksSoFar, const int * const keys, const int * const input, int * const output, const int numKeys) { const int index = (blocksSoFar + blockIdx.x) * blockDim.x + threadIdx.x; if (index < numKeys) output[index] = keys[input[index]]; } void picoIntIntSorterMarkUnique(const void * const gpuInputKeys, void * const gpuUniqueFlags, const int numKeys) { const int NUM_BLOCKS = (numKeys + NUM_THREADS - 1) / NUM_THREADS; int blocksSoFar = 0; int numBlocks; while (blocksSoFar < NUM_BLOCKS) { numBlocks = (NUM_BLOCKS - blocksSoFar > MAX_BLOCKS ? MAX_BLOCKS : NUM_BLOCKS - blocksSoFar); picoIntIntSorterComputeA<<<numBlocks, NUM_THREADS>>>(blocksSoFar, reinterpret_cast<const int * >(gpuInputKeys), reinterpret_cast<int * >(gpuUniqueFlags), numKeys); blocksSoFar += numBlocks; } } void picoIntIntSorterFindOffsets(const void * const gpuKeys, const void * const gpuA, const void * const gpuB, void * const gpuC, void * const gpuD, const int numKeys, const int numUniqueKeys) { const int NUM_BLOCKS_1 = 60; const int NUM_BLOCKS = (numUniqueKeys + NUM_THREADS - 1) / NUM_THREADS; int blocksSoFar = 0; int numBlocks; picoIntIntSorterComputeC<<<NUM_BLOCKS_1, NUM_THREADS>>>(reinterpret_cast<const int * >(gpuA), reinterpret_cast<const int * >(gpuB), reinterpret_cast<int * >(gpuC), numKeys); while (blocksSoFar < NUM_BLOCKS) { numBlocks = (NUM_BLOCKS - blocksSoFar > MAX_BLOCKS ? MAX_BLOCKS : NUM_BLOCKS - blocksSoFar); picoIntIntSorterComputeD<<<numBlocks, NUM_THREADS>>>(blocksSoFar, reinterpret_cast<const int * >(gpuC), reinterpret_cast<int * >(gpuD), numUniqueKeys, numKeys); blocksSoFar += numBlocks; } } void picoIntIntSorterSetCompactedKeys(const void * const gpuKeys, const void * const gpuInput, void * const gpuOutput, const int numUniqueKeys) { const int NUM_BLOCKS = (numUniqueKeys + NUM_THREADS - 1) / NUM_THREADS; int blocksSoFar = 0; int numBlocks; while (blocksSoFar < NUM_BLOCKS) { numBlocks = (NUM_BLOCKS - blocksSoFar > MAX_BLOCKS ? MAX_BLOCKS : NUM_BLOCKS - blocksSoFar); picoIntIntSorterSetCompactedKeysKernel<<<numBlocks, NUM_THREADS>>>(blocksSoFar, reinterpret_cast<const int * >(gpuKeys), reinterpret_cast<const int * >(gpuInput), reinterpret_cast< int * >(gpuOutput), numUniqueKeys); blocksSoFar += numBlocks; } }
7080c45b96d4f79829e9b2f23245e3d429d13911.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <assert.h> #include <stdio.h> #include "j2d9pt-512-10-256_kernel.hu" #define BENCH_DIM 2 #define BENCH_FPP 18 #define BENCH_RAD 2 #include "common.h" double kernel_stencil(SB_TYPE *A1, int compsize, int timestep, bool scop) { double start_time = sb_time(), end_time = 0.0; int dimsize = compsize + BENCH_RAD * 2; SB_TYPE (*A)[dimsize][dimsize] = (SB_TYPE (*)[dimsize][dimsize])A1; if (scop) { if (dimsize >= 5 && timestep >= 1) { #define cudaCheckReturn(ret) \ do { \ hipError_t cudaCheckReturn_e = (ret); \ if (cudaCheckReturn_e != hipSuccess) { \ fprintf(stderr, "CUDA error: %s\n", hipGetErrorString(cudaCheckReturn_e)); \ fflush(stderr); \ } \ assert(cudaCheckReturn_e == hipSuccess); \ } while(0) #define cudaCheckKernel() \ do { \ cudaCheckReturn(hipGetLastError()); \ } while(0) float *dev_A; cudaCheckReturn(hipMalloc((void **) &dev_A, (size_t)(2) * (size_t)(dimsize) * (size_t)(dimsize) * sizeof(float))); { cudaCheckReturn(hipMemcpy(dev_A, A, (size_t)(2) * (size_t)(dimsize) * (size_t)(dimsize) * sizeof(float), hipMemcpyHostToDevice)); #ifdef STENCILBENCH hipDeviceSynchronize(); SB_START_INSTRUMENTS; #endif } { #ifndef AN5D_TYPE #define AN5D_TYPE unsigned #endif const AN5D_TYPE __c0Len = (timestep - 0); const AN5D_TYPE __c0Pad = (0); #define __c0 c0 const AN5D_TYPE __c1Len = (dimsize - 2 - 2); const AN5D_TYPE __c1Pad = (2); #define __c1 c1 const AN5D_TYPE __c2Len = (dimsize - 2 - 2); const AN5D_TYPE __c2Pad = (2); #define __c2 c2 const AN5D_TYPE __halo1 = 2; const AN5D_TYPE __halo2 = 2; AN5D_TYPE c0; AN5D_TYPE __side0LenMax; { const AN5D_TYPE __side0Len = 10; const AN5D_TYPE __side1Len = 256; const AN5D_TYPE __side2Len = 472; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1); AN5D_TYPE __c0Padr = (__c0Len % 2) != (((__c0Len + __side0Len - 1) / __side0Len) % 2) && __c0Len % __side0Len < 2 ? 1 : 0; __side0LenMax = __side0Len; for (c0 = __c0Pad; c0 < __c0Pad + __c0Len / __side0Len - __c0Padr; c0 += 1) { hipLaunchKernelGGL(( kernel0_10), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0); } } if ((__c0Len % 2) != (((__c0Len + __side0LenMax - 1) / __side0LenMax) % 2)) { if (__c0Len % __side0LenMax == 0) { { const AN5D_TYPE __side0Len = 5; const AN5D_TYPE __side1Len = 256; const AN5D_TYPE __side2Len = 492; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1); hipLaunchKernelGGL(( kernel0_5), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0); } c0 += 1; { const AN5D_TYPE __side0Len = 5; const AN5D_TYPE __side1Len = 256; const AN5D_TYPE __side2Len = 492; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1); hipLaunchKernelGGL(( kernel0_5), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0); } } else if (__c0Len % __side0LenMax == 1) { { const AN5D_TYPE __side0Len = 5; const AN5D_TYPE __side1Len = 256; const AN5D_TYPE __side2Len = 492; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1); hipLaunchKernelGGL(( kernel0_5), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0); } c0 += 1; { const AN5D_TYPE __side0Len = 3; const AN5D_TYPE __side1Len = 256; const AN5D_TYPE __side2Len = 500; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1); hipLaunchKernelGGL(( kernel0_3), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0); } c0 += 1; { const AN5D_TYPE __side0Len = 3; const AN5D_TYPE __side1Len = 256; const AN5D_TYPE __side2Len = 500; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1); hipLaunchKernelGGL(( kernel0_3), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0); } } else if (__c0Len % __side0LenMax == 2) { { const AN5D_TYPE __side0Len = 1; const AN5D_TYPE __side1Len = 256; const AN5D_TYPE __side2Len = 508; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1); hipLaunchKernelGGL(( kernel0_1), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0); } c0 += 1; { const AN5D_TYPE __side0Len = 1; const AN5D_TYPE __side1Len = 256; const AN5D_TYPE __side2Len = 508; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1); hipLaunchKernelGGL(( kernel0_1), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0); } } else if (__c0Len % __side0LenMax == 3) { { const AN5D_TYPE __side0Len = 2; const AN5D_TYPE __side1Len = 256; const AN5D_TYPE __side2Len = 504; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1); hipLaunchKernelGGL(( kernel0_2), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0); } c0 += 1; { const AN5D_TYPE __side0Len = 1; const AN5D_TYPE __side1Len = 256; const AN5D_TYPE __side2Len = 508; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1); hipLaunchKernelGGL(( kernel0_1), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0); } } else if (__c0Len % __side0LenMax == 4) { { const AN5D_TYPE __side0Len = 2; const AN5D_TYPE __side1Len = 256; const AN5D_TYPE __side2Len = 504; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1); hipLaunchKernelGGL(( kernel0_2), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0); } c0 += 1; { const AN5D_TYPE __side0Len = 2; const AN5D_TYPE __side1Len = 256; const AN5D_TYPE __side2Len = 504; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1); hipLaunchKernelGGL(( kernel0_2), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0); } } else if (__c0Len % __side0LenMax == 5) { { const AN5D_TYPE __side0Len = 3; const AN5D_TYPE __side1Len = 256; const AN5D_TYPE __side2Len = 500; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1); hipLaunchKernelGGL(( kernel0_3), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0); } c0 += 1; { const AN5D_TYPE __side0Len = 2; const AN5D_TYPE __side1Len = 256; const AN5D_TYPE __side2Len = 504; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1); hipLaunchKernelGGL(( kernel0_2), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0); } } else if (__c0Len % __side0LenMax == 6) { { const AN5D_TYPE __side0Len = 3; const AN5D_TYPE __side1Len = 256; const AN5D_TYPE __side2Len = 500; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1); hipLaunchKernelGGL(( kernel0_3), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0); } c0 += 1; { const AN5D_TYPE __side0Len = 3; const AN5D_TYPE __side1Len = 256; const AN5D_TYPE __side2Len = 500; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1); hipLaunchKernelGGL(( kernel0_3), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0); } } else if (__c0Len % __side0LenMax == 7) { { const AN5D_TYPE __side0Len = 4; const AN5D_TYPE __side1Len = 256; const AN5D_TYPE __side2Len = 496; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1); hipLaunchKernelGGL(( kernel0_4), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0); } c0 += 1; { const AN5D_TYPE __side0Len = 3; const AN5D_TYPE __side1Len = 256; const AN5D_TYPE __side2Len = 500; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1); hipLaunchKernelGGL(( kernel0_3), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0); } } else if (__c0Len % __side0LenMax == 8) { { const AN5D_TYPE __side0Len = 4; const AN5D_TYPE __side1Len = 256; const AN5D_TYPE __side2Len = 496; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1); hipLaunchKernelGGL(( kernel0_4), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0); } c0 += 1; { const AN5D_TYPE __side0Len = 4; const AN5D_TYPE __side1Len = 256; const AN5D_TYPE __side2Len = 496; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1); hipLaunchKernelGGL(( kernel0_4), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0); } } else if (__c0Len % __side0LenMax == 9) { { const AN5D_TYPE __side0Len = 5; const AN5D_TYPE __side1Len = 256; const AN5D_TYPE __side2Len = 492; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1); hipLaunchKernelGGL(( kernel0_5), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0); } c0 += 1; { const AN5D_TYPE __side0Len = 4; const AN5D_TYPE __side1Len = 256; const AN5D_TYPE __side2Len = 496; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1); hipLaunchKernelGGL(( kernel0_4), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0); } } } else if (__c0Len % __side0LenMax) { if (__c0Len % __side0LenMax == 1) { const AN5D_TYPE __side0Len = 1; const AN5D_TYPE __side1Len = 256; const AN5D_TYPE __side2Len = 508; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1); hipLaunchKernelGGL(( kernel0_1), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0); } else if (__c0Len % __side0LenMax == 2) { const AN5D_TYPE __side0Len = 2; const AN5D_TYPE __side1Len = 256; const AN5D_TYPE __side2Len = 504; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1); hipLaunchKernelGGL(( kernel0_2), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0); } else if (__c0Len % __side0LenMax == 3) { const AN5D_TYPE __side0Len = 3; const AN5D_TYPE __side1Len = 256; const AN5D_TYPE __side2Len = 500; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1); hipLaunchKernelGGL(( kernel0_3), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0); } else if (__c0Len % __side0LenMax == 4) { const AN5D_TYPE __side0Len = 4; const AN5D_TYPE __side1Len = 256; const AN5D_TYPE __side2Len = 496; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1); hipLaunchKernelGGL(( kernel0_4), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0); } else if (__c0Len % __side0LenMax == 5) { const AN5D_TYPE __side0Len = 5; const AN5D_TYPE __side1Len = 256; const AN5D_TYPE __side2Len = 492; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1); hipLaunchKernelGGL(( kernel0_5), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0); } else if (__c0Len % __side0LenMax == 6) { const AN5D_TYPE __side0Len = 6; const AN5D_TYPE __side1Len = 256; const AN5D_TYPE __side2Len = 488; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1); hipLaunchKernelGGL(( kernel0_6), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0); } else if (__c0Len % __side0LenMax == 7) { const AN5D_TYPE __side0Len = 7; const AN5D_TYPE __side1Len = 256; const AN5D_TYPE __side2Len = 484; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1); hipLaunchKernelGGL(( kernel0_7), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0); } else if (__c0Len % __side0LenMax == 8) { const AN5D_TYPE __side0Len = 8; const AN5D_TYPE __side1Len = 256; const AN5D_TYPE __side2Len = 480; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1); hipLaunchKernelGGL(( kernel0_8), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0); } else if (__c0Len % __side0LenMax == 9) { const AN5D_TYPE __side0Len = 9; const AN5D_TYPE __side1Len = 256; const AN5D_TYPE __side2Len = 476; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1); hipLaunchKernelGGL(( kernel0_9), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0); } } } cudaCheckKernel(); { #ifdef STENCILBENCH hipDeviceSynchronize(); SB_STOP_INSTRUMENTS; #endif cudaCheckReturn(hipMemcpy(A, dev_A, (size_t)(2) * (size_t)(dimsize) * (size_t)(dimsize) * sizeof(float), hipMemcpyDeviceToHost)); } cudaCheckReturn(hipFree(dev_A)); } } else { for (int t = 0; t < timestep; t++) #pragma omp parallel for for (int i = BENCH_RAD; i < dimsize - BENCH_RAD; i++) for (int j = BENCH_RAD; j < dimsize - BENCH_RAD; j++) A[(t+1)%2][i][j] = ( 7.1f * A[t%2][i-2][j] + 5.1f * A[t%2][i-1][j] + 9.2f * A[t%2][i][j-2] + 12.1f * A[t%2][i][j-1] + 15.f * A[t%2][i][j] + 12.2f * A[t%2][i][j+1] + 9.1f * A[t%2][i][j+2] + 5.2f * A[t%2][i+1][j] + 7.2f * A[t%2][i+2][j]) / 118; } return (((end_time != 0.0) ? end_time : sb_time()) - start_time); }
7080c45b96d4f79829e9b2f23245e3d429d13911.cu
#include <assert.h> #include <stdio.h> #include "j2d9pt-512-10-256_kernel.hu" #define BENCH_DIM 2 #define BENCH_FPP 18 #define BENCH_RAD 2 #include "common.h" double kernel_stencil(SB_TYPE *A1, int compsize, int timestep, bool scop) { double start_time = sb_time(), end_time = 0.0; int dimsize = compsize + BENCH_RAD * 2; SB_TYPE (*A)[dimsize][dimsize] = (SB_TYPE (*)[dimsize][dimsize])A1; if (scop) { if (dimsize >= 5 && timestep >= 1) { #define cudaCheckReturn(ret) \ do { \ cudaError_t cudaCheckReturn_e = (ret); \ if (cudaCheckReturn_e != cudaSuccess) { \ fprintf(stderr, "CUDA error: %s\n", cudaGetErrorString(cudaCheckReturn_e)); \ fflush(stderr); \ } \ assert(cudaCheckReturn_e == cudaSuccess); \ } while(0) #define cudaCheckKernel() \ do { \ cudaCheckReturn(cudaGetLastError()); \ } while(0) float *dev_A; cudaCheckReturn(cudaMalloc((void **) &dev_A, (size_t)(2) * (size_t)(dimsize) * (size_t)(dimsize) * sizeof(float))); { cudaCheckReturn(cudaMemcpy(dev_A, A, (size_t)(2) * (size_t)(dimsize) * (size_t)(dimsize) * sizeof(float), cudaMemcpyHostToDevice)); #ifdef STENCILBENCH cudaDeviceSynchronize(); SB_START_INSTRUMENTS; #endif } { #ifndef AN5D_TYPE #define AN5D_TYPE unsigned #endif const AN5D_TYPE __c0Len = (timestep - 0); const AN5D_TYPE __c0Pad = (0); #define __c0 c0 const AN5D_TYPE __c1Len = (dimsize - 2 - 2); const AN5D_TYPE __c1Pad = (2); #define __c1 c1 const AN5D_TYPE __c2Len = (dimsize - 2 - 2); const AN5D_TYPE __c2Pad = (2); #define __c2 c2 const AN5D_TYPE __halo1 = 2; const AN5D_TYPE __halo2 = 2; AN5D_TYPE c0; AN5D_TYPE __side0LenMax; { const AN5D_TYPE __side0Len = 10; const AN5D_TYPE __side1Len = 256; const AN5D_TYPE __side2Len = 472; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1); AN5D_TYPE __c0Padr = (__c0Len % 2) != (((__c0Len + __side0Len - 1) / __side0Len) % 2) && __c0Len % __side0Len < 2 ? 1 : 0; __side0LenMax = __side0Len; for (c0 = __c0Pad; c0 < __c0Pad + __c0Len / __side0Len - __c0Padr; c0 += 1) { kernel0_10<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0); } } if ((__c0Len % 2) != (((__c0Len + __side0LenMax - 1) / __side0LenMax) % 2)) { if (__c0Len % __side0LenMax == 0) { { const AN5D_TYPE __side0Len = 5; const AN5D_TYPE __side1Len = 256; const AN5D_TYPE __side2Len = 492; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1); kernel0_5<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0); } c0 += 1; { const AN5D_TYPE __side0Len = 5; const AN5D_TYPE __side1Len = 256; const AN5D_TYPE __side2Len = 492; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1); kernel0_5<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0); } } else if (__c0Len % __side0LenMax == 1) { { const AN5D_TYPE __side0Len = 5; const AN5D_TYPE __side1Len = 256; const AN5D_TYPE __side2Len = 492; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1); kernel0_5<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0); } c0 += 1; { const AN5D_TYPE __side0Len = 3; const AN5D_TYPE __side1Len = 256; const AN5D_TYPE __side2Len = 500; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1); kernel0_3<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0); } c0 += 1; { const AN5D_TYPE __side0Len = 3; const AN5D_TYPE __side1Len = 256; const AN5D_TYPE __side2Len = 500; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1); kernel0_3<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0); } } else if (__c0Len % __side0LenMax == 2) { { const AN5D_TYPE __side0Len = 1; const AN5D_TYPE __side1Len = 256; const AN5D_TYPE __side2Len = 508; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1); kernel0_1<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0); } c0 += 1; { const AN5D_TYPE __side0Len = 1; const AN5D_TYPE __side1Len = 256; const AN5D_TYPE __side2Len = 508; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1); kernel0_1<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0); } } else if (__c0Len % __side0LenMax == 3) { { const AN5D_TYPE __side0Len = 2; const AN5D_TYPE __side1Len = 256; const AN5D_TYPE __side2Len = 504; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1); kernel0_2<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0); } c0 += 1; { const AN5D_TYPE __side0Len = 1; const AN5D_TYPE __side1Len = 256; const AN5D_TYPE __side2Len = 508; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1); kernel0_1<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0); } } else if (__c0Len % __side0LenMax == 4) { { const AN5D_TYPE __side0Len = 2; const AN5D_TYPE __side1Len = 256; const AN5D_TYPE __side2Len = 504; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1); kernel0_2<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0); } c0 += 1; { const AN5D_TYPE __side0Len = 2; const AN5D_TYPE __side1Len = 256; const AN5D_TYPE __side2Len = 504; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1); kernel0_2<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0); } } else if (__c0Len % __side0LenMax == 5) { { const AN5D_TYPE __side0Len = 3; const AN5D_TYPE __side1Len = 256; const AN5D_TYPE __side2Len = 500; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1); kernel0_3<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0); } c0 += 1; { const AN5D_TYPE __side0Len = 2; const AN5D_TYPE __side1Len = 256; const AN5D_TYPE __side2Len = 504; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1); kernel0_2<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0); } } else if (__c0Len % __side0LenMax == 6) { { const AN5D_TYPE __side0Len = 3; const AN5D_TYPE __side1Len = 256; const AN5D_TYPE __side2Len = 500; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1); kernel0_3<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0); } c0 += 1; { const AN5D_TYPE __side0Len = 3; const AN5D_TYPE __side1Len = 256; const AN5D_TYPE __side2Len = 500; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1); kernel0_3<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0); } } else if (__c0Len % __side0LenMax == 7) { { const AN5D_TYPE __side0Len = 4; const AN5D_TYPE __side1Len = 256; const AN5D_TYPE __side2Len = 496; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1); kernel0_4<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0); } c0 += 1; { const AN5D_TYPE __side0Len = 3; const AN5D_TYPE __side1Len = 256; const AN5D_TYPE __side2Len = 500; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1); kernel0_3<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0); } } else if (__c0Len % __side0LenMax == 8) { { const AN5D_TYPE __side0Len = 4; const AN5D_TYPE __side1Len = 256; const AN5D_TYPE __side2Len = 496; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1); kernel0_4<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0); } c0 += 1; { const AN5D_TYPE __side0Len = 4; const AN5D_TYPE __side1Len = 256; const AN5D_TYPE __side2Len = 496; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1); kernel0_4<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0); } } else if (__c0Len % __side0LenMax == 9) { { const AN5D_TYPE __side0Len = 5; const AN5D_TYPE __side1Len = 256; const AN5D_TYPE __side2Len = 492; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1); kernel0_5<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0); } c0 += 1; { const AN5D_TYPE __side0Len = 4; const AN5D_TYPE __side1Len = 256; const AN5D_TYPE __side2Len = 496; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1); kernel0_4<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0); } } } else if (__c0Len % __side0LenMax) { if (__c0Len % __side0LenMax == 1) { const AN5D_TYPE __side0Len = 1; const AN5D_TYPE __side1Len = 256; const AN5D_TYPE __side2Len = 508; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1); kernel0_1<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0); } else if (__c0Len % __side0LenMax == 2) { const AN5D_TYPE __side0Len = 2; const AN5D_TYPE __side1Len = 256; const AN5D_TYPE __side2Len = 504; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1); kernel0_2<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0); } else if (__c0Len % __side0LenMax == 3) { const AN5D_TYPE __side0Len = 3; const AN5D_TYPE __side1Len = 256; const AN5D_TYPE __side2Len = 500; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1); kernel0_3<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0); } else if (__c0Len % __side0LenMax == 4) { const AN5D_TYPE __side0Len = 4; const AN5D_TYPE __side1Len = 256; const AN5D_TYPE __side2Len = 496; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1); kernel0_4<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0); } else if (__c0Len % __side0LenMax == 5) { const AN5D_TYPE __side0Len = 5; const AN5D_TYPE __side1Len = 256; const AN5D_TYPE __side2Len = 492; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1); kernel0_5<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0); } else if (__c0Len % __side0LenMax == 6) { const AN5D_TYPE __side0Len = 6; const AN5D_TYPE __side1Len = 256; const AN5D_TYPE __side2Len = 488; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1); kernel0_6<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0); } else if (__c0Len % __side0LenMax == 7) { const AN5D_TYPE __side0Len = 7; const AN5D_TYPE __side1Len = 256; const AN5D_TYPE __side2Len = 484; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1); kernel0_7<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0); } else if (__c0Len % __side0LenMax == 8) { const AN5D_TYPE __side0Len = 8; const AN5D_TYPE __side1Len = 256; const AN5D_TYPE __side2Len = 480; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1); kernel0_8<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0); } else if (__c0Len % __side0LenMax == 9) { const AN5D_TYPE __side0Len = 9; const AN5D_TYPE __side1Len = 256; const AN5D_TYPE __side2Len = 476; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1); kernel0_9<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0); } } } cudaCheckKernel(); { #ifdef STENCILBENCH cudaDeviceSynchronize(); SB_STOP_INSTRUMENTS; #endif cudaCheckReturn(cudaMemcpy(A, dev_A, (size_t)(2) * (size_t)(dimsize) * (size_t)(dimsize) * sizeof(float), cudaMemcpyDeviceToHost)); } cudaCheckReturn(cudaFree(dev_A)); } } else { for (int t = 0; t < timestep; t++) #pragma omp parallel for for (int i = BENCH_RAD; i < dimsize - BENCH_RAD; i++) for (int j = BENCH_RAD; j < dimsize - BENCH_RAD; j++) A[(t+1)%2][i][j] = ( 7.1f * A[t%2][i-2][j] + 5.1f * A[t%2][i-1][j] + 9.2f * A[t%2][i][j-2] + 12.1f * A[t%2][i][j-1] + 15.f * A[t%2][i][j] + 12.2f * A[t%2][i][j+1] + 9.1f * A[t%2][i][j+2] + 5.2f * A[t%2][i+1][j] + 7.2f * A[t%2][i+2][j]) / 118; } return (((end_time != 0.0) ? end_time : sb_time()) - start_time); }
36290c1941921bc917e26de48929f65f6e1656a2.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "hiprand/hiprand.h" #include "rocblas.h" extern "C" { #include "darknet/dropout_layer.h" #include "darknet/cuda.h" #include "darknet/utils.h" } __global__ void yoloswag420blazeit360noscope(float *input, int size, float *rand, float prob, float scale) { int id = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if(id < size) input[id] = (rand[id] < prob) ? 0 : input[id]*scale; } void forward_dropout_layer_gpu(dropout_layer layer, network_state state) { if (!state.train) return; int size = layer.inputs*layer.batch; cuda_random(layer.rand_gpu, size); /* int i; for(i = 0; i < size; ++i){ layer.rand[i] = rand_uniform(); } cuda_push_array(layer.rand_gpu, layer.rand, size); */ hipLaunchKernelGGL(( yoloswag420blazeit360noscope), dim3(cuda_gridsize(size)), dim3(BLOCK), 0, 0, state.input, size, layer.rand_gpu, layer.probability, layer.scale); check_error(hipPeekAtLastError()); } void backward_dropout_layer_gpu(dropout_layer layer, network_state state) { if(!state.delta) return; int size = layer.inputs*layer.batch; hipLaunchKernelGGL(( yoloswag420blazeit360noscope), dim3(cuda_gridsize(size)), dim3(BLOCK), 0, 0, state.delta, size, layer.rand_gpu, layer.probability, layer.scale); check_error(hipPeekAtLastError()); }
36290c1941921bc917e26de48929f65f6e1656a2.cu
#include "cuda_runtime.h" #include "curand.h" #include "cublas_v2.h" extern "C" { #include "darknet/dropout_layer.h" #include "darknet/cuda.h" #include "darknet/utils.h" } __global__ void yoloswag420blazeit360noscope(float *input, int size, float *rand, float prob, float scale) { int id = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if(id < size) input[id] = (rand[id] < prob) ? 0 : input[id]*scale; } void forward_dropout_layer_gpu(dropout_layer layer, network_state state) { if (!state.train) return; int size = layer.inputs*layer.batch; cuda_random(layer.rand_gpu, size); /* int i; for(i = 0; i < size; ++i){ layer.rand[i] = rand_uniform(); } cuda_push_array(layer.rand_gpu, layer.rand, size); */ yoloswag420blazeit360noscope<<<cuda_gridsize(size), BLOCK>>>(state.input, size, layer.rand_gpu, layer.probability, layer.scale); check_error(cudaPeekAtLastError()); } void backward_dropout_layer_gpu(dropout_layer layer, network_state state) { if(!state.delta) return; int size = layer.inputs*layer.batch; yoloswag420blazeit360noscope<<<cuda_gridsize(size), BLOCK>>>(state.delta, size, layer.rand_gpu, layer.probability, layer.scale); check_error(cudaPeekAtLastError()); }
c600774df2adb83a1dc719b2e72f4d0cf5fee1c8.hip
// !!! This is a file automatically generated by hipify!!! #include <iostream> #include "kernel.hip" int main(int argc, char const *argv[]) { int count; hipDeviceProp_t props; hipGetDeviceCount(&count); for (int i = 0; i < count; i++) { hipGetDeviceProperties(&props, i); std::cout << "Device name: " << props.name << std::endl; std::cout << "Clock rate: " << props.clockRate << std::endl; std::cout << "Max Threads Per Block: " << props.maxThreadsPerBlock << std::endl; std::cout << "Max Threads Dim: " << props.maxThreadsDim[0] << ", " << props.maxThreadsDim[1] << ", " << props.maxThreadsDim[2] << std::endl; std::cout << "Max Grid Size: " << props.maxGridSize[0] << ", " << props.maxGridSize[1] << ", " << props.maxGridSize[2] << std::endl; } }
c600774df2adb83a1dc719b2e72f4d0cf5fee1c8.cu
#include <iostream> #include "kernel.cu" int main(int argc, char const *argv[]) { int count; cudaDeviceProp props; cudaGetDeviceCount(&count); for (int i = 0; i < count; i++) { cudaGetDeviceProperties(&props, i); std::cout << "Device name: " << props.name << std::endl; std::cout << "Clock rate: " << props.clockRate << std::endl; std::cout << "Max Threads Per Block: " << props.maxThreadsPerBlock << std::endl; std::cout << "Max Threads Dim: " << props.maxThreadsDim[0] << ", " << props.maxThreadsDim[1] << ", " << props.maxThreadsDim[2] << std::endl; std::cout << "Max Grid Size: " << props.maxGridSize[0] << ", " << props.maxGridSize[1] << ", " << props.maxGridSize[2] << std::endl; } }
51fcf773150285a1e8df26b98012224f3defb065.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include <iostream> #include <hip/device_functions.h> #include <hip/hip_runtime_api.h> #include <conio.h> #include <time.h> #include <stdio.h> #include <stdlib.h> #include <fstream> #include <windows.h> using namespace std; //Funciones que van a utilizarse a lo largo del programa //CPU void generarTablero(int *tablero, int filas, int columnas, int dificultad); void imprimirTablero(int *tablero, int filas, int columnas); void imprimirColumnas(int columnas); void comprobarLleno(int *tablero, int filas, int columnas, int dificultad, bool &salida); void generarSemillas(int *tablero, int filas, int columnas, int dificultad); void guardarPartida(int *tablero, int filas, int columnas, int dificultad); void cargarPartida(); void modoManual(int *tablero, int filas, int columnas, int dificultad); //GPU __global__ void juegoManual(int *tablero, int fila, int columna, int filas, int columnas, char movimiento); __device__ void compruebaSemillas(int *tablero, int filas, int columnas, char movimiento); __device__ void compruebaArriba(int *tablero, int fila, int columna, int filas, int columnas, char movimiento); __device__ void compruebaAbajo(int *tablero, int fila, int columna, int filas, int columnas, char movimiento); __device__ void compruebaDerecha(int *tablero, int fila, int columna, int filas, int columnas, char movimiento); __device__ void compruebaIzquierda(int *tablero, int fila, int columna, int filas, int columnas, char movimiento); __device__ void moverCeros(int *tablero, int fila, int columna, int filas, int columnas, char movimiento); int main(void){ //Almacenamos las propiedades de la tarjeta para no exceder el numero de hilos posibles en el tablero hipDeviceProp_t prop; hipGetDeviceProperties(&prop, 0); //Propiedades del tablero int *tablero; int filas = 0; int columnas = 0; int dificultad = 0; char modo_juego; //Preguntamos si quiere cargar un juego guardado anteriormente o si quiere empezar de nuevo cout << "Quieres continuar una partida anterior o empezar de nuevo? (C: Cargar / N: Nueva partida)\n"; char partida = 'X'; cin >> partida; while (partida != 'C' && partida != 'N') { cout << "Introduce un valor valido para iniciar el juego\n"; cin >> partida; } if (partida == 'N'){ //Recogemos los datos de filas y columnas del tablero que vamos a usar cout << "Seleccione el numero de filas con las que desea jugar: \n"; cin >> filas; cout << "Seleccione el numero de columnas con las que desea jugar: \n"; cin >> columnas; //Tablero mnimo de 4 por 4 while (filas < 4) { cout << "El numero de filas con las que desea jugar es demasiado pequeo, el minimo aceptado es 4: \n"; cin >> filas; } while (columnas < 4) { cout << "El numero de columnas con las que desea jugar es demasiado pequeo, el minimo aceptado es 4: \n"; cin >> columnas; } while (prop.maxThreadsPerBlock < (filas * columnas)) { cout << "Has excedido el limite de semillas posibles para el tablero, introduce las filas y las columnas de nuevo: \n"; cout << "Seleccione el numero de filas con las que desea jugar: \n"; cin >> filas; cout << "Seleccione el numero de columnas con las que desea jugar: \n"; cin >> columnas; } cout << "Elija dificultad: \n1. Bajo, se lanzaran 15 semillas de 2, 4 y 8 \n" "2. Dificil, se lanzaran 8 semillas de 2 y 4 \n"; cin >> dificultad; while (!(dificultad == 1 || dificultad == 2)){ cout << "Dificultad no vlida \n"; cout << "Selecccione 1 si desea jugar con nivel o 2 si desea jugar con nivel dificil \n"; cin >> dificultad; } cout << "Elija modo de juego: \n A. Automtico \n M. Manual \n"; cin >> modo_juego; while (!(modo_juego == 'M' || modo_juego == 'A')){ cout << "Modo de juego no vlido \n"; cout << "Selecccione A para jugar en modo automtico o M para manual \n"; cin >> modo_juego; } //Reservamos la memoria del tablero y lo inicializamos con generar tablero tablero = new int[filas * columnas]; generarTablero(tablero, filas, columnas, dificultad); modoManual(tablero, filas, columnas, dificultad); } else { cargarPartida(); } system("PAUSE"); } //Generar tablero con nmeros aleatorios void generarTablero(int *tablero, int filas, int columnas, int dificultad){ srand(time(0)); int tamao = filas * columnas; for (int i = 0; i < tamao; i++){ tablero[i] = 0; } generarSemillas(tablero, filas, columnas, dificultad); } void comprobarLleno(int *tablero, int filas, int columnas, int dificultad, bool &salida){ int tamao = filas * columnas; int contador, posicion = 0; if (dificultad == 1){ contador = 15; while (contador > 0 && posicion < tamao){ if (tablero[posicion] == 0) contador--; posicion++; } if (contador == 0) generarSemillas(tablero, filas, columnas, dificultad); else{ cout << "Juego terminado\n"; //exit(0); salida = true; } } if (dificultad == 2){ contador = 8; while (contador > 0 && posicion < tamao){ if (tablero[posicion] == 0) contador--; posicion++; } if (contador == 0) generarSemillas(tablero, filas, columnas, dificultad); else{ cout << "Juego terminado\n"; //exit(0); salida = true; } } } //Genera los nmeros para jugar en el tablero void generarSemillas(int *tablero, int filas, int columnas, int dificultad){ if (dificultad == 1){ int semillas = 0; int valores[3] = { 2, 4, 8 }; while (semillas < 15){ int posicion = rand() % (filas*columnas + 1); int valor = rand() % 3; if (tablero[posicion] == 0){ tablero[posicion] = valores[valor]; semillas++; } } } if (dificultad == 2){ int semillas = 0; int valores[3] = { 2, 4 }; while (semillas < 8){ int posicion = rand() % (filas*columnas + 1); int valor = rand() % 2; if (tablero[posicion] == 0){ tablero[posicion] = valores[valor]; semillas++; } } }/* int tamao = filas * columnas; int contador = 0; while (contador < 3){ int aux = rand() % 3; int i = rand() % tamao; if (tablero[i] == 0){ switch (aux){ case 0: tablero[i] = 2; break; case 1: tablero[i] = 4; break; case 2: tablero[i] = 8; break; } contador++; } }*/ } //Funcin que imprime el nmero de columnas que va a tener el tablero para que sea ms facil elegir semillas void imprimirColumnas(int columnas) { for (int i = 0; i < columnas; i++) { if (i == 0) { cout << " " << i + 1; } else { if (i < 9) { cout << " " << i + 1; } else { cout << " " << i + 1; } } } cout << "\n"; for (int i = 0; i < columnas; i++) { if (i == 0) { cout << " |"; } else { cout << " |"; } } cout << "\n"; } //Imprimimos el tablero void imprimirTablero(int *tablero, int filas, int columnas) { cout << "SE HAN GENERADO " << filas << " FILAS Y " << columnas << " COLUMNAS\n"; cout << "+-+-+-TABLERO DE JUEGO-+-+-+\n\n"; imprimirColumnas(columnas); for (int i = 0; i < filas; i++) { if (i < 9) { cout << i + 1 << " - "; } else { cout << i + 1 << " - "; } for (int k = 0; k < columnas; k++) { //Damos color en funcin del nmero imprimido int bloque = tablero[i * filas + k]; switch (bloque) { case 2: SetConsoleTextAttribute(GetStdHandle(STD_OUTPUT_HANDLE), 14); //Amarillo break; case 4: SetConsoleTextAttribute(GetStdHandle(STD_OUTPUT_HANDLE), 12); //Rojo break; case 8: SetConsoleTextAttribute(GetStdHandle(STD_OUTPUT_HANDLE), 13); //Morado break; case 16: SetConsoleTextAttribute(GetStdHandle(STD_OUTPUT_HANDLE), 9); //Azul break; default: SetConsoleTextAttribute(GetStdHandle(STD_OUTPUT_HANDLE), 7); //Blanco } if (bloque < 10) cout << "| " << bloque << " |"; else cout << "| " << bloque << "|"; } SetConsoleTextAttribute(GetStdHandle(STD_OUTPUT_HANDLE), 7); cout << "\n"; } } //En funcin del movimiento, llama a la comprobacin correspondiente __device__ void compruebaSemillas(int *tablero, int fila, int columna, int filas, int columnas, char movimiento){ switch (movimiento){ case 'W': compruebaAbajo(tablero, fila, columna, filas, columnas, movimiento); break; case 'S': compruebaArriba(tablero, fila, columna, filas, columnas, movimiento); break; case 'D': compruebaIzquierda(tablero, fila, columna, filas, columnas, movimiento); break; case 'A': compruebaDerecha(tablero, fila, columna, filas, columnas, movimiento); break; } } //Desplaza los nmeros respecto a los ceros que haya, en funcin del movimiento __device__ void moverCeros(int *tablero, int fila, int columna, int filas, int columnas, char movimiento){ if (movimiento == 'W'){ for (int i = filas - 1; i > 0; i--){ for (int j = i; j > 0; j--){ if (tablero[(j * columnas) + columna] != 0 && tablero[((j - 1) * columnas) + columna] == 0){ tablero[((j - 1) * columnas) + columna] = tablero[(j * columnas) + columna]; tablero[(j * columnas) + columna] = 0; } } } } else if (movimiento == 'S'){ for (int i = 0; i < filas - 1; i++){ for (int j = i; j < filas - 1; j++){ if (tablero[(j * columnas) + columna] != 0 && tablero[((j + 1) * columnas) + columna] == 0){ tablero[((j + 1) * columnas) + columna] = tablero[(j * columnas) + columna]; tablero[(j * columnas) + columna] = 0; } } } } else if (movimiento == 'D'){ for (int i = 0; i < columnas - 1; i++){ for (int j = i; j < columnas - 1; j++){ if (tablero[fila * columnas + j] != 0 && tablero[fila * columnas + (j + 1)] == 0 && tablero[fila * columnas + (j + 1)] != columnas){ tablero[fila * columnas + (j + 1)] = tablero[fila * columnas + j]; tablero[fila * columnas + j] = 0; } } } } else if (movimiento == 'A'){ for (int i = columnas - 1; i > 0; i--){ for (int j = i; j > 0; j--){ if (tablero[fila * columnas + j] != 0 && tablero[fila * columnas + (j - 1)] == 0){ tablero[fila * columnas + (j - 1)] = tablero[fila * columnas + j]; tablero[fila * columnas + j] = 0; } } } } } //Comprueba hacia arriba __device__ void compruebaArriba(int *tablero, int fila, int columna, int filas, int columnas, char movimiento){ moverCeros(tablero, fila, columna, filas, columnas, movimiento); if (tablero[(fila * columnas) + columna] != 0 && tablero[(fila * columnas) + columna] == tablero[((fila - 1) * columnas) + columna]){ tablero[(fila * columnas) + columna] = tablero[(fila * columnas) + columna] * 2; tablero[((fila - 1) * columnas) + columna] = 0; moverCeros(tablero, fila, columna, filas, columnas, movimiento); } } //Comprueba hacia abajo __device__ void compruebaAbajo(int *tablero, int fila, int columna, int filas, int columnas, char movimiento){ moverCeros(tablero, fila, columna, filas, columnas, movimiento); if (tablero[(fila * columnas) + columna] != 0 && tablero[(fila * columnas) + columna] == tablero[((fila + 1) * columnas) + columna]){ tablero[(fila * columnas) + columna] = tablero[(fila * columnas) + columna] * 2; tablero[((fila + 1) * columnas) + columna] = 0; moverCeros(tablero, fila, columna, filas, columnas, movimiento); } } //Comprueba hacia la derecha __device__ void compruebaDerecha(int *tablero, int fila, int columna, int filas, int columnas, char movimiento){ moverCeros(tablero, fila, columna, filas, columnas, movimiento); if (tablero[(fila * columnas) + columna] != 0 && tablero[(fila * columnas) + columna] == tablero[(fila * columnas) + (columna + 1)]){ tablero[(fila * columnas) + columna] = tablero[(fila * columnas) + columna] * 2; tablero[(fila * columnas) + (columna + 1)] = 0; moverCeros(tablero, fila, columna, filas, columnas, movimiento); } } //Comprueba hacia la izquierda __device__ void compruebaIzquierda(int *tablero, int fila, int columna, int filas, int columnas, char movimiento){ moverCeros(tablero, fila, columna, filas, columnas, movimiento); if (tablero[(fila * columnas) + columna] != 0 && tablero[(fila * columnas) + columna] == tablero[(fila * columnas) + (columna - 1)]){ tablero[(fila * columnas) + columna] = tablero[(fila * columnas) + columna] * 2; tablero[(fila * columnas) + (columna - 1)] = 0; moverCeros(tablero, fila, columna, filas, columnas, movimiento); } } __global__ void juegoManual(int *tablero, int filas, int columnas, char movimiento){ //Guardamos la columna y la fila del hilo int columnaHilo = threadIdx.x; int filaHilo = threadIdx.y; compruebaSemillas(tablero, filaHilo, columnaHilo, filas, columnas, movimiento); __syncthreads(); } //Guarda la partida con el tablero, las filas, las columnas y la dificultad void guardarPartida(int *tablero, int filas, int columnas, int dificultad) { ofstream doc; doc.open("partida.txt"); doc << filas << "\n"; doc << columnas << "\n"; doc << dificultad << "\n"; for (int i = 0; i < filas * columnas; i++) { doc << tablero[i] << " "; } doc.close(); system("cls"); cout << "Guardado correctamente.\n\n"; } //Carga la partida guardada void cargarPartida() { const string fichero = "partida.txt"; ifstream leer; leer.open(fichero.c_str()); int d, *tablero; int i = 0; int n = 48; int f = 0; int c = 0; char fila[80]; if (!leer.fail()) { leer.getline(fila, 80, '\n'); while (n > 47 && n < 58) { n = (int)fila[i]; i++; if (n > 47 && n < 58) { f = f * 10 + (n - 48); } } } n = 48; i = 0; if (!leer.fail()) { leer.getline(fila, 80, '\n'); while (n > 47 && n < 58) { n = (int)fila[i]; i++; if (n > 47 && n < 58) { c = c * 10 + (n - 48); } } } if (!leer.fail()) { leer.getline(fila, 80, '\n'); d = (int)fila[0] - 48; } tablero = new int[f*c]; for (int i = 0; i < f * c; i++) { leer.getline(fila, 80, ' '); tablero[i] = (int)fila[0] - 48; } leer.close(); modoManual(tablero, f, c, d); } void modoManual(int *tablero, int filas, int columnas, int dificultad){ //system("cls"); char movimiento = ' '; bool salida = false; while (movimiento != 'Z' && salida == false){ imprimirTablero(tablero, filas, columnas); cout << "Pulsa W, A, S o D para mover los numeros (Z para salir): \n"; cin >> movimiento; //while (movimiento != (ARRIBA || ABAJO || IZQUIERDA || DERECHA)) { while (movimiento != 'W' && movimiento != 'S' && movimiento != 'A' && movimiento != 'D' && movimiento != 'Z') { cout << "Tecla no valida, introduzca una valida:\n"; cin >> movimiento; } //CUDA int *tablero_gpu; //Reservamos memoria y copiamos tablero en GPU hipMalloc((void**)&tablero_gpu, (filas * columnas) * sizeof(int)); hipMemcpy(tablero_gpu, tablero, (filas * columnas) * sizeof(int), hipMemcpyHostToDevice); //Creamos los hilos en un solo bloque dim3 DimGrid(1, 1); dim3 DimBlock(filas, columnas); juegoManual << < DimGrid, DimBlock >> > (tablero_gpu, filas, columnas, movimiento); hipMemcpy(tablero, tablero_gpu, sizeof(int)* filas * columnas, hipMemcpyDeviceToHost); //system("cls"); comprobarLleno(tablero, filas, columnas, dificultad, salida); hipFree(tablero_gpu); } //system("cls"); cout << "Deseas guardar la partida? (S/N)\n"; char guardar = 'x'; cin >> guardar; while (guardar != 'S' && guardar != 'N') { system("cls"); cout << "Valor no valido, quieres guardar la partida? (S/N): \n"; cin >> guardar; } if (guardar == 'S') { guardarPartida(tablero, filas, columnas, dificultad); } else { cout << "Saliendo sin guardar...\n \n"; } }
51fcf773150285a1e8df26b98012224f3defb065.cu
#include "cuda_runtime.h" #include "device_launch_parameters.h" #include <iostream> #include <device_functions.h> #include <cuda_runtime_api.h> #include <conio.h> #include <time.h> #include <stdio.h> #include <stdlib.h> #include <fstream> #include <windows.h> using namespace std; //Funciones que van a utilizarse a lo largo del programa //CPU void generarTablero(int *tablero, int filas, int columnas, int dificultad); void imprimirTablero(int *tablero, int filas, int columnas); void imprimirColumnas(int columnas); void comprobarLleno(int *tablero, int filas, int columnas, int dificultad, bool &salida); void generarSemillas(int *tablero, int filas, int columnas, int dificultad); void guardarPartida(int *tablero, int filas, int columnas, int dificultad); void cargarPartida(); void modoManual(int *tablero, int filas, int columnas, int dificultad); //GPU __global__ void juegoManual(int *tablero, int fila, int columna, int filas, int columnas, char movimiento); __device__ void compruebaSemillas(int *tablero, int filas, int columnas, char movimiento); __device__ void compruebaArriba(int *tablero, int fila, int columna, int filas, int columnas, char movimiento); __device__ void compruebaAbajo(int *tablero, int fila, int columna, int filas, int columnas, char movimiento); __device__ void compruebaDerecha(int *tablero, int fila, int columna, int filas, int columnas, char movimiento); __device__ void compruebaIzquierda(int *tablero, int fila, int columna, int filas, int columnas, char movimiento); __device__ void moverCeros(int *tablero, int fila, int columna, int filas, int columnas, char movimiento); int main(void){ //Almacenamos las propiedades de la tarjeta para no exceder el numero de hilos posibles en el tablero cudaDeviceProp prop; cudaGetDeviceProperties(&prop, 0); //Propiedades del tablero int *tablero; int filas = 0; int columnas = 0; int dificultad = 0; char modo_juego; //Preguntamos si quiere cargar un juego guardado anteriormente o si quiere empezar de nuevo cout << "Quieres continuar una partida anterior o empezar de nuevo? (C: Cargar / N: Nueva partida)\n"; char partida = 'X'; cin >> partida; while (partida != 'C' && partida != 'N') { cout << "Introduce un valor valido para iniciar el juego\n"; cin >> partida; } if (partida == 'N'){ //Recogemos los datos de filas y columnas del tablero que vamos a usar cout << "Seleccione el numero de filas con las que desea jugar: \n"; cin >> filas; cout << "Seleccione el numero de columnas con las que desea jugar: \n"; cin >> columnas; //Tablero mínimo de 4 por 4 while (filas < 4) { cout << "El numero de filas con las que desea jugar es demasiado pequeño, el minimo aceptado es 4: \n"; cin >> filas; } while (columnas < 4) { cout << "El numero de columnas con las que desea jugar es demasiado pequeño, el minimo aceptado es 4: \n"; cin >> columnas; } while (prop.maxThreadsPerBlock < (filas * columnas)) { cout << "Has excedido el limite de semillas posibles para el tablero, introduce las filas y las columnas de nuevo: \n"; cout << "Seleccione el numero de filas con las que desea jugar: \n"; cin >> filas; cout << "Seleccione el numero de columnas con las que desea jugar: \n"; cin >> columnas; } cout << "Elija dificultad: \n1. Bajo, se lanzaran 15 semillas de 2, 4 y 8 \n" "2. Dificil, se lanzaran 8 semillas de 2 y 4 \n"; cin >> dificultad; while (!(dificultad == 1 || dificultad == 2)){ cout << "Dificultad no válida \n"; cout << "Selecccione 1 si desea jugar con nivel o 2 si desea jugar con nivel dificil \n"; cin >> dificultad; } cout << "Elija modo de juego: \n A. Automático \n M. Manual \n"; cin >> modo_juego; while (!(modo_juego == 'M' || modo_juego == 'A')){ cout << "Modo de juego no válido \n"; cout << "Selecccione A para jugar en modo automático o M para manual \n"; cin >> modo_juego; } //Reservamos la memoria del tablero y lo inicializamos con generar tablero tablero = new int[filas * columnas]; generarTablero(tablero, filas, columnas, dificultad); modoManual(tablero, filas, columnas, dificultad); } else { cargarPartida(); } system("PAUSE"); } //Generar tablero con números aleatorios void generarTablero(int *tablero, int filas, int columnas, int dificultad){ srand(time(0)); int tamaño = filas * columnas; for (int i = 0; i < tamaño; i++){ tablero[i] = 0; } generarSemillas(tablero, filas, columnas, dificultad); } void comprobarLleno(int *tablero, int filas, int columnas, int dificultad, bool &salida){ int tamaño = filas * columnas; int contador, posicion = 0; if (dificultad == 1){ contador = 15; while (contador > 0 && posicion < tamaño){ if (tablero[posicion] == 0) contador--; posicion++; } if (contador == 0) generarSemillas(tablero, filas, columnas, dificultad); else{ cout << "Juego terminado\n"; //exit(0); salida = true; } } if (dificultad == 2){ contador = 8; while (contador > 0 && posicion < tamaño){ if (tablero[posicion] == 0) contador--; posicion++; } if (contador == 0) generarSemillas(tablero, filas, columnas, dificultad); else{ cout << "Juego terminado\n"; //exit(0); salida = true; } } } //Genera los números para jugar en el tablero void generarSemillas(int *tablero, int filas, int columnas, int dificultad){ if (dificultad == 1){ int semillas = 0; int valores[3] = { 2, 4, 8 }; while (semillas < 15){ int posicion = rand() % (filas*columnas + 1); int valor = rand() % 3; if (tablero[posicion] == 0){ tablero[posicion] = valores[valor]; semillas++; } } } if (dificultad == 2){ int semillas = 0; int valores[3] = { 2, 4 }; while (semillas < 8){ int posicion = rand() % (filas*columnas + 1); int valor = rand() % 2; if (tablero[posicion] == 0){ tablero[posicion] = valores[valor]; semillas++; } } }/* int tamaño = filas * columnas; int contador = 0; while (contador < 3){ int aux = rand() % 3; int i = rand() % tamaño; if (tablero[i] == 0){ switch (aux){ case 0: tablero[i] = 2; break; case 1: tablero[i] = 4; break; case 2: tablero[i] = 8; break; } contador++; } }*/ } //Función que imprime el número de columnas que va a tener el tablero para que sea más facil elegir semillas void imprimirColumnas(int columnas) { for (int i = 0; i < columnas; i++) { if (i == 0) { cout << " " << i + 1; } else { if (i < 9) { cout << " " << i + 1; } else { cout << " " << i + 1; } } } cout << "\n"; for (int i = 0; i < columnas; i++) { if (i == 0) { cout << " |"; } else { cout << " |"; } } cout << "\n"; } //Imprimimos el tablero void imprimirTablero(int *tablero, int filas, int columnas) { cout << "SE HAN GENERADO " << filas << " FILAS Y " << columnas << " COLUMNAS\n"; cout << "+-+-+-TABLERO DE JUEGO-+-+-+\n\n"; imprimirColumnas(columnas); for (int i = 0; i < filas; i++) { if (i < 9) { cout << i + 1 << " - "; } else { cout << i + 1 << " - "; } for (int k = 0; k < columnas; k++) { //Damos color en función del número imprimido int bloque = tablero[i * filas + k]; switch (bloque) { case 2: SetConsoleTextAttribute(GetStdHandle(STD_OUTPUT_HANDLE), 14); //Amarillo break; case 4: SetConsoleTextAttribute(GetStdHandle(STD_OUTPUT_HANDLE), 12); //Rojo break; case 8: SetConsoleTextAttribute(GetStdHandle(STD_OUTPUT_HANDLE), 13); //Morado break; case 16: SetConsoleTextAttribute(GetStdHandle(STD_OUTPUT_HANDLE), 9); //Azul break; default: SetConsoleTextAttribute(GetStdHandle(STD_OUTPUT_HANDLE), 7); //Blanco } if (bloque < 10) cout << "| " << bloque << " |"; else cout << "| " << bloque << "|"; } SetConsoleTextAttribute(GetStdHandle(STD_OUTPUT_HANDLE), 7); cout << "\n"; } } //En función del movimiento, llama a la comprobación correspondiente __device__ void compruebaSemillas(int *tablero, int fila, int columna, int filas, int columnas, char movimiento){ switch (movimiento){ case 'W': compruebaAbajo(tablero, fila, columna, filas, columnas, movimiento); break; case 'S': compruebaArriba(tablero, fila, columna, filas, columnas, movimiento); break; case 'D': compruebaIzquierda(tablero, fila, columna, filas, columnas, movimiento); break; case 'A': compruebaDerecha(tablero, fila, columna, filas, columnas, movimiento); break; } } //Desplaza los números respecto a los ceros que haya, en función del movimiento __device__ void moverCeros(int *tablero, int fila, int columna, int filas, int columnas, char movimiento){ if (movimiento == 'W'){ for (int i = filas - 1; i > 0; i--){ for (int j = i; j > 0; j--){ if (tablero[(j * columnas) + columna] != 0 && tablero[((j - 1) * columnas) + columna] == 0){ tablero[((j - 1) * columnas) + columna] = tablero[(j * columnas) + columna]; tablero[(j * columnas) + columna] = 0; } } } } else if (movimiento == 'S'){ for (int i = 0; i < filas - 1; i++){ for (int j = i; j < filas - 1; j++){ if (tablero[(j * columnas) + columna] != 0 && tablero[((j + 1) * columnas) + columna] == 0){ tablero[((j + 1) * columnas) + columna] = tablero[(j * columnas) + columna]; tablero[(j * columnas) + columna] = 0; } } } } else if (movimiento == 'D'){ for (int i = 0; i < columnas - 1; i++){ for (int j = i; j < columnas - 1; j++){ if (tablero[fila * columnas + j] != 0 && tablero[fila * columnas + (j + 1)] == 0 && tablero[fila * columnas + (j + 1)] != columnas){ tablero[fila * columnas + (j + 1)] = tablero[fila * columnas + j]; tablero[fila * columnas + j] = 0; } } } } else if (movimiento == 'A'){ for (int i = columnas - 1; i > 0; i--){ for (int j = i; j > 0; j--){ if (tablero[fila * columnas + j] != 0 && tablero[fila * columnas + (j - 1)] == 0){ tablero[fila * columnas + (j - 1)] = tablero[fila * columnas + j]; tablero[fila * columnas + j] = 0; } } } } } //Comprueba hacia arriba __device__ void compruebaArriba(int *tablero, int fila, int columna, int filas, int columnas, char movimiento){ moverCeros(tablero, fila, columna, filas, columnas, movimiento); if (tablero[(fila * columnas) + columna] != 0 && tablero[(fila * columnas) + columna] == tablero[((fila - 1) * columnas) + columna]){ tablero[(fila * columnas) + columna] = tablero[(fila * columnas) + columna] * 2; tablero[((fila - 1) * columnas) + columna] = 0; moverCeros(tablero, fila, columna, filas, columnas, movimiento); } } //Comprueba hacia abajo __device__ void compruebaAbajo(int *tablero, int fila, int columna, int filas, int columnas, char movimiento){ moverCeros(tablero, fila, columna, filas, columnas, movimiento); if (tablero[(fila * columnas) + columna] != 0 && tablero[(fila * columnas) + columna] == tablero[((fila + 1) * columnas) + columna]){ tablero[(fila * columnas) + columna] = tablero[(fila * columnas) + columna] * 2; tablero[((fila + 1) * columnas) + columna] = 0; moverCeros(tablero, fila, columna, filas, columnas, movimiento); } } //Comprueba hacia la derecha __device__ void compruebaDerecha(int *tablero, int fila, int columna, int filas, int columnas, char movimiento){ moverCeros(tablero, fila, columna, filas, columnas, movimiento); if (tablero[(fila * columnas) + columna] != 0 && tablero[(fila * columnas) + columna] == tablero[(fila * columnas) + (columna + 1)]){ tablero[(fila * columnas) + columna] = tablero[(fila * columnas) + columna] * 2; tablero[(fila * columnas) + (columna + 1)] = 0; moverCeros(tablero, fila, columna, filas, columnas, movimiento); } } //Comprueba hacia la izquierda __device__ void compruebaIzquierda(int *tablero, int fila, int columna, int filas, int columnas, char movimiento){ moverCeros(tablero, fila, columna, filas, columnas, movimiento); if (tablero[(fila * columnas) + columna] != 0 && tablero[(fila * columnas) + columna] == tablero[(fila * columnas) + (columna - 1)]){ tablero[(fila * columnas) + columna] = tablero[(fila * columnas) + columna] * 2; tablero[(fila * columnas) + (columna - 1)] = 0; moverCeros(tablero, fila, columna, filas, columnas, movimiento); } } __global__ void juegoManual(int *tablero, int filas, int columnas, char movimiento){ //Guardamos la columna y la fila del hilo int columnaHilo = threadIdx.x; int filaHilo = threadIdx.y; compruebaSemillas(tablero, filaHilo, columnaHilo, filas, columnas, movimiento); __syncthreads(); } //Guarda la partida con el tablero, las filas, las columnas y la dificultad void guardarPartida(int *tablero, int filas, int columnas, int dificultad) { ofstream doc; doc.open("partida.txt"); doc << filas << "\n"; doc << columnas << "\n"; doc << dificultad << "\n"; for (int i = 0; i < filas * columnas; i++) { doc << tablero[i] << " "; } doc.close(); system("cls"); cout << "Guardado correctamente.\n\n"; } //Carga la partida guardada void cargarPartida() { const string fichero = "partida.txt"; ifstream leer; leer.open(fichero.c_str()); int d, *tablero; int i = 0; int n = 48; int f = 0; int c = 0; char fila[80]; if (!leer.fail()) { leer.getline(fila, 80, '\n'); while (n > 47 && n < 58) { n = (int)fila[i]; i++; if (n > 47 && n < 58) { f = f * 10 + (n - 48); } } } n = 48; i = 0; if (!leer.fail()) { leer.getline(fila, 80, '\n'); while (n > 47 && n < 58) { n = (int)fila[i]; i++; if (n > 47 && n < 58) { c = c * 10 + (n - 48); } } } if (!leer.fail()) { leer.getline(fila, 80, '\n'); d = (int)fila[0] - 48; } tablero = new int[f*c]; for (int i = 0; i < f * c; i++) { leer.getline(fila, 80, ' '); tablero[i] = (int)fila[0] - 48; } leer.close(); modoManual(tablero, f, c, d); } void modoManual(int *tablero, int filas, int columnas, int dificultad){ //system("cls"); char movimiento = ' '; bool salida = false; while (movimiento != 'Z' && salida == false){ imprimirTablero(tablero, filas, columnas); cout << "Pulsa W, A, S o D para mover los numeros (Z para salir): \n"; cin >> movimiento; //while (movimiento != (ARRIBA || ABAJO || IZQUIERDA || DERECHA)) { while (movimiento != 'W' && movimiento != 'S' && movimiento != 'A' && movimiento != 'D' && movimiento != 'Z') { cout << "Tecla no valida, introduzca una valida:\n"; cin >> movimiento; } //CUDA int *tablero_gpu; //Reservamos memoria y copiamos tablero en GPU cudaMalloc((void**)&tablero_gpu, (filas * columnas) * sizeof(int)); cudaMemcpy(tablero_gpu, tablero, (filas * columnas) * sizeof(int), cudaMemcpyHostToDevice); //Creamos los hilos en un solo bloque dim3 DimGrid(1, 1); dim3 DimBlock(filas, columnas); juegoManual << < DimGrid, DimBlock >> > (tablero_gpu, filas, columnas, movimiento); cudaMemcpy(tablero, tablero_gpu, sizeof(int)* filas * columnas, cudaMemcpyDeviceToHost); //system("cls"); comprobarLleno(tablero, filas, columnas, dificultad, salida); cudaFree(tablero_gpu); } //system("cls"); cout << "Deseas guardar la partida? (S/N)\n"; char guardar = 'x'; cin >> guardar; while (guardar != 'S' && guardar != 'N') { system("cls"); cout << "Valor no valido, quieres guardar la partida? (S/N): \n"; cin >> guardar; } if (guardar == 'S') { guardarPartida(tablero, filas, columnas, dificultad); } else { cout << "Saliendo sin guardar...\n \n"; } }
b0ae4c8edd4a654e0282e75b8a4bd5c204caead2.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" __global__ void sec_max_cuda_(int nProposal, int C, float *inp, int *offsets, float *out){ for(int p_id = blockIdx.x; p_id < nProposal; p_id += gridDim.x){ int start = offsets[p_id]; int end = offsets[p_id + 1]; for(int plane = threadIdx.x; plane < C; plane += blockDim.x){ float max_val = -1e50; for(int i = start; i < end; i++){ if(inp[i * C + plane] > max_val){ max_val = inp[i * C + plane]; } } out[p_id * C + plane] = max_val; } } }
b0ae4c8edd4a654e0282e75b8a4bd5c204caead2.cu
#include "includes.h" __global__ void sec_max_cuda_(int nProposal, int C, float *inp, int *offsets, float *out){ for(int p_id = blockIdx.x; p_id < nProposal; p_id += gridDim.x){ int start = offsets[p_id]; int end = offsets[p_id + 1]; for(int plane = threadIdx.x; plane < C; plane += blockDim.x){ float max_val = -1e50; for(int i = start; i < end; i++){ if(inp[i * C + plane] > max_val){ max_val = inp[i * C + plane]; } } out[p_id * C + plane] = max_val; } } }
1d49669e27ca0314ddff1c9fe88878a54b83ab42.hip
// !!! This is a file automatically generated by hipify!!! #include"svd.cuh" #include<cusolverDn.h> void svd(int m,int n,hipComplex* T,hipComplex* U,hipComplex* V,float* S){ hipsolverDnHandle_t handle; hipsolverGesvdjInfo_t params=NULL; int* info=NULL; int echo=1; int lda=0; lda=m; int ldu=0; ldu=m; int ldv=0; ldv=n; int lwork=0; hipComplex* work=NULL; float* s=NULL; hipComplex* u=NULL; hipComplex* v=NULL; hipComplex* t=NULL; cusolverStatus_t status=CUSOLVER_STATUS_SUCCESS; status=hipsolverDnCreate(&handle); assert(status==CUSOLVER_STATUS_SUCCESS); status=hipsolverDnCreateGesvdjInfo(&params); assert(status==CUSOLVER_STATUS_SUCCESS); hipError_t stat1=hipSuccess; hipError_t stat2=hipSuccess; hipError_t stat3=hipSuccess; hipError_t stat4=hipSuccess; hipError_t stat5=hipSuccess; hipError_t stat6=hipSuccess; stat1=hipMalloc((void**)&info,sizeof(int)); int* inf=(int*)malloc(sizeof(int)); stat2=hipMalloc((void**)&u,sizeof(hipComplex)*m*((m<n)?m:n)); stat3=hipMalloc((void**)&v,sizeof(hipComplex)*n*((m<n)?m:n)); stat4=hipMalloc((void**)&s,sizeof(float)*((m<n)?m:n)); stat5=hipMalloc((void**)&t,sizeof(hipComplex)*m*n); stat6=hipMemcpy(t,T,sizeof(hipComplex)*m*n,hipMemcpyHostToDevice); if( stat1!=hipSuccess|| stat2!=hipSuccess|| stat3!=hipSuccess|| stat4!=hipSuccess|| stat5!=hipSuccess|| stat6!=hipSuccess){ printf("cuda malloc error\n"); exit(-1); } if(hipsolverDnCgesvdj_bufferSize( handle, HIPSOLVER_EIG_MODE_VECTOR, echo, m, n, t, m, s, u, ldu, v, ldv, &lwork, params)!=CUSOLVER_STATUS_SUCCESS){ printf("hipsolverDnCgesvdj_bufferSize failed\n"); exit(-1); } if(hipDeviceSynchronize()!=hipSuccess){ printf("synchronize failed"); exit(-1); } stat1=hipMalloc((void**)&work,sizeof(hipComplex)*lwork); assert(stat1==hipSuccess); if(hipsolverDnCgesvdj( handle, HIPSOLVER_EIG_MODE_VECTOR, echo, m, n, t, lda, s, u, ldu, v, ldv, work, lwork, info, params)!=CUSOLVER_STATUS_SUCCESS){ printf("hipsolverDnCgesvdj err\n"); return; } if(hipDeviceSynchronize()!=hipSuccess){ printf("cuda synchronize err\n"); return; } stat1=hipMemcpy(U,u,sizeof(hipComplex)*m*((m<n)?m:n),hipMemcpyDeviceToHost); assert(stat1==hipSuccess); stat1=hipMemcpy(V,v,sizeof(hipComplex)*n*((m<n)?m:n),hipMemcpyDeviceToHost); assert(stat1==hipSuccess); stat1=hipMemcpy(S,s,sizeof(float)*((m<n)?m:n),hipMemcpyDeviceToHost); assert(stat1==hipSuccess); hipMemcpy(inf,info,sizeof(int),hipMemcpyDeviceToHost); free(inf); stat1=hipFree(u); assert(stat1==hipSuccess); stat1=hipFree(v); assert(stat1==hipSuccess); stat1=hipFree(s); assert(stat1==hipSuccess); hipFree(info); hipFree(work); status=hipsolverDnDestroy(handle); assert(status==CUSOLVER_STATUS_SUCCESS); status=hipsolverDnDestroyGesvdjInfo(params); assert(status==CUSOLVER_STATUS_SUCCESS); }
1d49669e27ca0314ddff1c9fe88878a54b83ab42.cu
#include"svd.cuh" #include<cusolverDn.h> void svd(int m,int n,cuComplex* T,cuComplex* U,cuComplex* V,float* S){ cusolverDnHandle_t handle; gesvdjInfo_t params=NULL; int* info=NULL; int echo=1; int lda=0; lda=m; int ldu=0; ldu=m; int ldv=0; ldv=n; int lwork=0; cuComplex* work=NULL; float* s=NULL; cuComplex* u=NULL; cuComplex* v=NULL; cuComplex* t=NULL; cusolverStatus_t status=CUSOLVER_STATUS_SUCCESS; status=cusolverDnCreate(&handle); assert(status==CUSOLVER_STATUS_SUCCESS); status=cusolverDnCreateGesvdjInfo(&params); assert(status==CUSOLVER_STATUS_SUCCESS); cudaError_t stat1=cudaSuccess; cudaError_t stat2=cudaSuccess; cudaError_t stat3=cudaSuccess; cudaError_t stat4=cudaSuccess; cudaError_t stat5=cudaSuccess; cudaError_t stat6=cudaSuccess; stat1=cudaMalloc((void**)&info,sizeof(int)); int* inf=(int*)malloc(sizeof(int)); stat2=cudaMalloc((void**)&u,sizeof(cuComplex)*m*((m<n)?m:n)); stat3=cudaMalloc((void**)&v,sizeof(cuComplex)*n*((m<n)?m:n)); stat4=cudaMalloc((void**)&s,sizeof(float)*((m<n)?m:n)); stat5=cudaMalloc((void**)&t,sizeof(cuComplex)*m*n); stat6=cudaMemcpy(t,T,sizeof(cuComplex)*m*n,cudaMemcpyHostToDevice); if( stat1!=cudaSuccess|| stat2!=cudaSuccess|| stat3!=cudaSuccess|| stat4!=cudaSuccess|| stat5!=cudaSuccess|| stat6!=cudaSuccess){ printf("cuda malloc error\n"); exit(-1); } if(cusolverDnCgesvdj_bufferSize( handle, CUSOLVER_EIG_MODE_VECTOR, echo, m, n, t, m, s, u, ldu, v, ldv, &lwork, params)!=CUSOLVER_STATUS_SUCCESS){ printf("cusolverDnCgesvdj_bufferSize failed\n"); exit(-1); } if(cudaDeviceSynchronize()!=cudaSuccess){ printf("synchronize failed"); exit(-1); } stat1=cudaMalloc((void**)&work,sizeof(cuComplex)*lwork); assert(stat1==cudaSuccess); if(cusolverDnCgesvdj( handle, CUSOLVER_EIG_MODE_VECTOR, echo, m, n, t, lda, s, u, ldu, v, ldv, work, lwork, info, params)!=CUSOLVER_STATUS_SUCCESS){ printf("cusolverDnCgesvdj err\n"); return; } if(cudaDeviceSynchronize()!=cudaSuccess){ printf("cuda synchronize err\n"); return; } stat1=cudaMemcpy(U,u,sizeof(cuComplex)*m*((m<n)?m:n),cudaMemcpyDeviceToHost); assert(stat1==cudaSuccess); stat1=cudaMemcpy(V,v,sizeof(cuComplex)*n*((m<n)?m:n),cudaMemcpyDeviceToHost); assert(stat1==cudaSuccess); stat1=cudaMemcpy(S,s,sizeof(float)*((m<n)?m:n),cudaMemcpyDeviceToHost); assert(stat1==cudaSuccess); cudaMemcpy(inf,info,sizeof(int),cudaMemcpyDeviceToHost); free(inf); stat1=cudaFree(u); assert(stat1==cudaSuccess); stat1=cudaFree(v); assert(stat1==cudaSuccess); stat1=cudaFree(s); assert(stat1==cudaSuccess); cudaFree(info); cudaFree(work); status=cusolverDnDestroy(handle); assert(status==CUSOLVER_STATUS_SUCCESS); status=cusolverDnDestroyGesvdjInfo(params); assert(status==CUSOLVER_STATUS_SUCCESS); }
0ecfda8f73b5a9d61f159d69a7d2b52a4daa6102.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <cfloat> #include <hipcub/hipcub.hpp> #include "caffe2/core/context_gpu.h" #include "caffe2/operators/softmax_op.h" #include "caffe2/operators/softmax_with_loss_op.h" #include "caffe2/operators/spatial_softmax_with_loss_op.h" namespace caffe2 { namespace { __global__ void LabelCrossEntropyKernel( const int N, const int D, const float* logPdata, const int* labeldata, const float* weights, float* Ydata) { CUDA_1D_KERNEL_LOOP(i, N) { CUDA_KERNEL_ASSERT(labeldata[i] >= 0 && labeldata[i] < D); float weight = weights ? weights[i] : 1.0; Ydata[i] = -logPdata[i * D + labeldata[i]] * weight; } } __global__ void LabelCrossEntropyGradientKernel( const int N, const int D, const float* Pdata, const int* labeldata, float* dXdata) { CUDA_1D_KERNEL_LOOP(i, N) { int idx = i * D + labeldata[i]; dXdata[idx] = Pdata[idx] - 1.; } } __global__ void LabelCrossEntropyGradientKernelWeighted( const int N, const int D, const float* Pdata, const int* labeldata, float* dXdata, const float* weights) { CUDA_1D_KERNEL_LOOP(i, N * D) { int row = i / D; int d = i % D; float val = Pdata[i] - 1.0 * (d == labeldata[row]); float weight = weights[row]; dXdata[i] = val * weight; } } __global__ void ProbCrossEntropyKernel( const int N, const int D, const float* Pdata, const float* labeldata, const float* weights, float* Ydata) { typedef hipcub::BlockReduce<float, CAFFE_CUDA_NUM_THREADS> BlockReduce; __shared__ typename BlockReduce::TempStorage temp_storage; for (int i = blockIdx.x; i < N; i += gridDim.x) { float weight = weights ? weights[i] : 1.0; float sum = 0.0; float total_prob = 0.0; for (int j = threadIdx.x; j < D; j += blockDim.x) { int idx = i * D + j; CUDA_KERNEL_ASSERT(labeldata[idx] >= 0); total_prob += labeldata[idx]; sum += -logf(fmaxf(Pdata[idx], FLT_MIN)) * labeldata[idx] * weight; } float tot = BlockReduce(temp_storage).Sum(sum); __syncthreads(); float total_prob_sum = BlockReduce(temp_storage).Sum(total_prob); if (threadIdx.x == 0) { Ydata[i] = tot; // Sanity check CUDA_KERNEL_ASSERT(fabsf(1.0 - total_prob_sum) < 1e-5f); } __syncthreads(); } } __global__ void ProbCrossEntropyGradientKernel( const int N, const int D, const float* Pdata, const float* labeldata, float* dXdata, const float* weights) { if (weights == NULL) { CUDA_1D_KERNEL_LOOP(idx, N * D) { dXdata[idx] = Pdata[idx] - labeldata[idx]; } } else { CUDA_1D_KERNEL_LOOP(idx, N * D) { dXdata[idx] = (Pdata[idx] - labeldata[idx]) * weights[idx / D]; } } } __global__ void SpatialSoftmaxKernel( const int num, const int D, const int W, const int H, const float* Xdata, float* Pdata) { CUDA_1D_KERNEL_LOOP(index, num * W * H) { int x = index % W; int y = (index / W) % H; int i = index / W / H; // Subtract max on each cell for numerical reasons float max_val = -FLT_MAX; for(int c = 0; c < D; ++c) { int idx = i * (H * W * D) + c * (H * W) + y * W + x; max_val = fmaxf(max_val, Xdata[idx]); } // Exponentiate float expsum = 0.0f; for(int c = 0; c < D; ++c) { int idx = i * (H * W * D) + c * (H * W) + y * W + x; float expx = expf(Xdata[idx] - max_val); Pdata[idx] = expx; expsum += expx; } // Normalize for(int c=0; c<D; ++c) { int idx = i * (H * W * D) + c * (H * W) + y * W + x; Pdata[idx] /= expsum; } } } #define DONTCARE (-1) __global__ void SpatialCrossEntropyLossKernel( const int N, const int D, const int W, const int H, const float* Pdata, const int* label_data, const float* weights, float* loss_data, float* weight_data) { CUDA_1D_KERNEL_LOOP(index, N * W * H) { int x = index % W; int y = (index / W) % H; int i = index / W / H; const int label = static_cast<int>(label_data[index]); if (label != DONTCARE) { CUDA_KERNEL_ASSERT(label >= 0 && label < D); float weight = (weights == NULL ? 1.0 : weights[index]); loss_data[index] = -logf(fmaxf( Pdata[i * W * H * D + label * W * H + y * W + x], 1e-20f)) * weight; weight_data[index] = weight; } else { loss_data[index] = 0; weight_data[index] = 0; } } } __global__ void SpatialSoftmaxLossGradientKernel(const int N, const int D, const int W, const int H, const int* label_data, const float* weights, float* dX_data, float* weights_) { CUDA_1D_KERNEL_LOOP(index, N * W * H) { int x = index % W; int y = (index / W) % H; int i = index / W / H; const int label = static_cast<int>(label_data[index]); if (label != DONTCARE) { int data_idx = i * (H * W * D) + label * (H * W) + y * W + x; dX_data[data_idx] -= 1.0; if (weights != NULL) { float weight = weights[index]; for (int c = 0; c < D; ++c) { int data_idx = i * (H * W * D) + c * (H * W) + y * W + x; dX_data[data_idx] *= weight; } weights_[index] = weight; } else { weights_[index] = 1.0; } } else { // Ignore-label, so set all gradients for this positions // tp zero for (int c = 0; c < D; ++c) { int data_idx = i * (H * W * D) + c * (H * W) + y * W + x; dX_data[data_idx] = 0.0; } weights_[index] = 0.0; } } } __global__ void SoftmaxNormalizeLogsKernel( const int nthreads, const int D, const float* logits, const float* rowmax, const float* scales, float* out_log) { CUDA_1D_KERNEL_LOOP(index, nthreads) { int n = index / D; out_log[index] = logits[index] - rowmax[n] - logf(fmaxf(scales[n], FLT_MIN)); } } __global__ void SoftmaxNormalizeKernel( const int nthreads, const int D, const float* probs, const float* scales, float* out) { CUDA_1D_KERNEL_LOOP(index, nthreads) { int n = index / D; out[index] = probs[index] / scales[n]; } } void Softmax( const int N, const int D, const float* logits, const float* sum_multiplier, float* scales, float* rowmax, float* probs, bool log_softmax, CUDAContext* context) { const int size = N * D; math::RowwiseMax<float, CUDAContext>(N, D, logits, rowmax, context); // Put the intermediate result X - max(X) into Y context->CopySameDevice<float>(size, logits, probs); // Subtract the scale math::Gemm<float, CUDAContext>( CblasNoTrans, CblasNoTrans, N, D, 1, -1, rowmax, sum_multiplier, 1, probs, context); // Exponentiation math::Exp<float, CUDAContext>(size, probs, probs, context); // Sum exponentiated values math::Gemv<float, CUDAContext>(CblasNoTrans, N, D, 1, probs, sum_multiplier, 0, scales, context); // Normalize if (!log_softmax) { hipLaunchKernelGGL(( SoftmaxNormalizeKernel), dim3(CAFFE_GET_BLOCKS(size)), dim3(CAFFE_CUDA_NUM_THREADS), 0, context->cuda_stream(), size, D, probs, scales, probs); } else { hipLaunchKernelGGL(( SoftmaxNormalizeLogsKernel), dim3(CAFFE_GET_BLOCKS(size)), dim3(CAFFE_CUDA_NUM_THREADS), 0, context->cuda_stream(), size, D, logits, rowmax, scales, probs); } } } // namespace template<> bool SoftmaxWithLossOp<float, CUDAContext>::RunOnDevice() { auto& X = Input(0); // Logits auto& T = Input(1); // Labels / targets const float* weights = (InputSize() > 2 ? Input(2).data<float>() : NULL); const auto canonical_axis = X.canonical_axis_index(axis_); int N, D; N = X.size_to_dim(canonical_axis); // batch size D = X.size_from_dim(canonical_axis); auto* P = Output(0, X.sizes(), at::dtype<float>()); // Probabilities from softmax ReinitializeTensor(&total_weight_ptr_, {1}, at::dtype<float>().device(CUDA)); total_weight_ptr_.Resize(1); if (label_prob_mode_) { CAFFE_ENFORCE_GE(T.dim(), 2); CAFFE_ENFORCE_EQ(T.size_to_dim(canonical_axis), N); CAFFE_ENFORCE_EQ(T.size_from_dim(canonical_axis), D); } else { if (T.dim() == canonical_axis) { CAFFE_ENFORCE_EQ(T.numel(), N); } else { CAFFE_ENFORCE_EQ(T.size_to_dim(canonical_axis), N); CAFFE_ENFORCE_EQ(T.size_from_dim(canonical_axis), 1); } } auto* avg_loss = Output(1, vector<int64_t>(), at::dtype<float>()); // Average loss if (!losses_.defined()) { losses_ = caffe2::empty({N}, at::dtype<float>().device(CUDA)); } else if (losses_.numel() != N) { losses_.Resize(N); } if (!rowmax_.defined()) { rowmax_ = caffe2::empty({N}, at::dtype<float>().device(CUDA)); } else if (rowmax_.numel() != N) { rowmax_.Resize(N); } if (!sum_multiplier_.defined()) { sum_multiplier_ = caffe2::empty({D}, at::dtype<float>().device(CUDA)); math::Set<float, CUDAContext>(D, 1.f, sum_multiplier_.mutable_data<float>(), &context_); } else if (sum_multiplier_.numel() != D) { sum_multiplier_.Resize(D); math::Set<float, CUDAContext>(D, 1.f, sum_multiplier_.mutable_data<float>(), &context_); } Softmax( N, D, X.data<float>(), sum_multiplier_.data<float>(), losses_.mutable_data<float>(), rowmax_.mutable_data<float>(), P->template mutable_data<float>(), !label_prob_mode_, // logarithmic output &context_); // Compute label xent loss per example if (!label_prob_mode_) { hipLaunchKernelGGL(( LabelCrossEntropyKernel), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, context_.cuda_stream(), N, D, P->data<float>(), T.data<int>(), weights, losses_.mutable_data<float>()); // Since we had logarithmic output, we need to exponentiate // them again. math::Exp<float, CUDAContext>( N * D, P->data<float>(), P->template mutable_data<float>(), &context_); } else { hipLaunchKernelGGL(( ProbCrossEntropyKernel), dim3(::min(N, CAFFE_MAXIMUM_NUM_BLOCKS)), dim3(CAFFE_CUDA_NUM_THREADS), 0, context_.cuda_stream(), N, D, P->data<float>(), T.data<float>(), weights, losses_.mutable_data<float>()); } float total_weight = N; if (weights) { // Sum weights math::Sum<float, CUDAContext>( N, weights, total_weight_ptr_.mutable_data<float>(), &context_, &scratch_); CUDA_CHECK(hipMemcpyAsync( &total_weight, total_weight_ptr_.data<float>(), sizeof(float), hipMemcpyDeviceToHost, context_.cuda_stream())); } // Sum of all losses float* avg_loss_data = avg_loss->template mutable_data<float>(); math::Sum<float, CUDAContext>( losses_.numel(), losses_.data<float>(), avg_loss_data, &context_, &scratch_); // Average of input batch size if (total_weight > 0) { math::Scale<float, float, CUDAContext>( 1, scale_ / total_weight, avg_loss_data, avg_loss_data, &context_); } if (OutputSize() > 2) { OutputTensorAlias(2, losses_); } return true; } template <> bool SpatialSoftmaxWithLossOp<float, CUDAContext>::RunOnDevice() { auto& X = Input(0); // Logits auto& T = Input(1); // Labels / targets const float* weights = (InputSize() > 2 ? Input(2).data<float>() : NULL); int N, D; N = X.dim32(0); D = X.dim32(1); auto* P = Output(0, X.sizes(), at::dtype<float>()); // Probabilities from softmax ReinitializeTensor(&total_weight_ptr_, {1}, at::dtype<float>().device(CUDA)); CAFFE_ENFORCE_EQ(X.dim(), 4); CAFFE_ENFORCE_EQ(T.dim(), 3); CAFFE_ENFORCE_EQ(T.dim32(0), N); int H = X.dim32(2); int W = X.dim32(3); if (!losses_.defined()) { losses_ = caffe2::empty({N * W * H}, at::dtype<float>().device(CUDA)); } else if (losses_.numel() != N * W * H) { losses_.Resize(N * W * H); } if (!weights_.defined()) { weights_ = caffe2::empty({N * W * H}, at::dtype<float>().device(CUDA)); } else if (weights_.numel() != N * W * H) { weights_.Resize(N * W * H); } const float* Xdata = X.data<float>(); float* Pdata = P->template mutable_data<float>(); // Softmax for each x,y location hipLaunchKernelGGL(( SpatialSoftmaxKernel), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, context_.cuda_stream(), N, D, W, H, Xdata, Pdata); // Cross entropy auto* avg_loss = Output(1, vector<int64_t>(), at::dtype<float>()); // Average loss float* avg_loss_data = avg_loss->template mutable_data<float>(); math::Set<float, CUDAContext>(1, 0.0f, avg_loss_data, &context_); const int* label_data = T.data<int>(); math::Set<float, CUDAContext>( 1, 0.0f, total_weight_ptr_.mutable_data<float>(), &context_); hipLaunchKernelGGL(( SpatialCrossEntropyLossKernel), dim3(CAFFE_GET_BLOCKS(N * W * H)), dim3(CAFFE_CUDA_NUM_THREADS), 0, context_.cuda_stream(), N, D, W, H, P->data<float>(), label_data, weights, losses_.mutable_data<float>(), weights_.mutable_data<float>()); // Somewhat awkward scalar passing from device to host float h_total_weight; math::Sum<float, CUDAContext>( weights_.numel(), weights_.data<float>(), total_weight_ptr_.mutable_data<float>(), &context_, &scratch_); CUDA_CHECK(hipMemcpyAsync( &h_total_weight, total_weight_ptr_.data<float>(), sizeof(float), hipMemcpyDeviceToHost, context_.cuda_stream())); math::Sum<float, CUDAContext>( losses_.numel(), losses_.data<float>(), avg_loss_data, &context_, &scratch_); // Final scaling if (h_total_weight > 0) { math::Scale<float, float, CUDAContext>( 1, scale_ / h_total_weight, avg_loss_data, avg_loss_data, &context_); } return true; } template <> bool SoftmaxWithLossGradientOp<float, CUDAContext>::RunOnDevice() { auto& X = Input(0); // Logits auto& T = Input(1); // Labels / targets // Input(2) is weights, if given auto& P = Input(InputSize() - 2); // Probabilities from softmax auto& d_avg_loss = Input(InputSize() - 1); // Gradient w.r.t. avg loss const float* weights = (InputSize() > 4 ? Input(2).data<float>() : NULL); Tensor* dX; if (only_loss_) { // Memory saving trick to share the buffer with the softmax output. // Softmax output is thus overwritten. dX = OutputTensorAlias(0, P); dX->ResizeLike(X); } else { dX = Output(0, X.sizes(), at::dtype<float>()); } const auto canonical_axis = X.canonical_axis_index(axis_); int N, D; N = X.size_to_dim(canonical_axis); // batch size D = X.size_from_dim(canonical_axis); ReinitializeTensor(&total_weight_ptr_, {1}, at::dtype<float>().device(CUDA)); if (label_prob_mode_) { CAFFE_ENFORCE_GE(T.dim(), 2); CAFFE_ENFORCE_EQ(T.size_to_dim(canonical_axis), N); CAFFE_ENFORCE_EQ(T.size_from_dim(canonical_axis), D); } else { if (T.dim() == canonical_axis) { CAFFE_ENFORCE_EQ(T.numel(), N); } else { CAFFE_ENFORCE_EQ(T.size_to_dim(canonical_axis), N); CAFFE_ENFORCE_EQ(T.size_from_dim(canonical_axis), 1); } } // Subtract 1 from labeled positions if (!label_prob_mode_) { if (weights == nullptr) { // Copy softmax probabilities into dX if (!only_loss_) { context_.CopySameDevice<float>( P.numel(), P.data<float>(), dX->template mutable_data<float>()); } hipLaunchKernelGGL(( LabelCrossEntropyGradientKernel), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, context_.cuda_stream(), N, D, P.data<float>(), T.data<int>(), dX->template mutable_data<float>()); } else { // Weighted version gets the Pdata values internally hipLaunchKernelGGL(( LabelCrossEntropyGradientKernelWeighted), dim3(CAFFE_GET_BLOCKS(N * D)), dim3(CAFFE_CUDA_NUM_THREADS), 0, context_.cuda_stream(), N, D, P.data<float>(), T.data<int>(), dX->template mutable_data<float>(), weights); } } else { hipLaunchKernelGGL(( ProbCrossEntropyGradientKernel), dim3(CAFFE_GET_BLOCKS(N * D)), dim3(CAFFE_CUDA_NUM_THREADS), 0, context_.cuda_stream(), N, D, P.data<float>(), T.data<float>(), dX->template mutable_data<float>(), weights); } float total_weight = N; if (weights) { // Sum weights math::Sum<float, CUDAContext>( N, weights, total_weight_ptr_.mutable_data<float>(), &context_, &scratch_); CUDA_CHECK(hipMemcpyAsync( &total_weight, total_weight_ptr_.data<float>(), sizeof(float), hipMemcpyDeviceToHost, context_.cuda_stream())); } // Scale by d_avg_loss / N if (total_weight > 0) { math::Scale<float, float, CUDAContext>( dX->numel(), scale_ / total_weight, dX->data<float>(), dX->template mutable_data<float>(), &context_); } math::Scale<float, float, CUDAContext>( dX->numel(), d_avg_loss.data<float>(), dX->data<float>(), dX->template mutable_data<float>(), &context_); return true; } template <> bool SpatialSoftmaxWithLossGradientOp<float, CUDAContext>::RunOnDevice() { auto& X = Input(0); // Logits auto& T = Input(1); // Labels / targets // Input(2) is weights, if given auto& P = Input(InputSize() - 2); // Probabilities from softmax auto& d_avg_loss = Input(InputSize() - 1); // Gradient w.r.t. avg loss const float* weights = (InputSize() > 4 ? Input(2).data<float>() : NULL); Tensor* dX; if (only_loss_) { // Memory saving trick to share the buffer with the softmax output. // Softmax output is thus overwritten. dX = OutputTensorAlias(0, P); dX->ResizeLike(X); } else { dX = Output(0, X.sizes(), at::dtype<float>()); } const auto canonical_axis = X.canonical_axis_index(1); int N, D; N = X.dim32(0); D = X.dim32(1); ReinitializeTensor(&total_weight_ptr_, {1}, at::dtype<float>().device(CUDA)); // Spatial mode, compute softmax for each x, y location CAFFE_ENFORCE_EQ(X.dim(), 4); CAFFE_ENFORCE_EQ(T.dim(), 3); int H = X.dim32(2); int W = X.dim32(3); dX->ResizeLike(X); if (!weights_.defined()) { weights_ = caffe2::empty({N * W * H}, at::dtype<float>().device(CUDA)); } else if (weights_.numel() != N * W * H) { weights_.Resize(N * W * H); } const float* Pdata = P.data<float>(); float* dX_data = dX->template mutable_data<float>(); const int* label_data = T.data<int>(); const float* d_avg_loss_data = d_avg_loss.data<float>(); // Copy softmax probabilities into dX. All but the neuron // corresponding to the correct label has gradient equaling e(x_j) // which is the probability under softmax. context_.CopySameDevice<float>(P.numel(), Pdata, dX_data); math::Set<float, CUDAContext>( 1, 0.0f, total_weight_ptr_.mutable_data<float>(), &context_); hipLaunchKernelGGL(( SpatialSoftmaxLossGradientKernel), dim3(CAFFE_GET_BLOCKS(N * W * H)), dim3(CAFFE_CUDA_NUM_THREADS), 0, context_.cuda_stream(), N, D, W, H, label_data, weights, dX_data, weights_.mutable_data<float>()); math::Sum<float, CUDAContext>( weights_.numel(), weights_.data<float>(), total_weight_ptr_.mutable_data<float>(), &context_, &scratch_); // Somewhat awkward scalar passing from device to host float h_total_weight; CUDA_CHECK(hipMemcpyAsync( &h_total_weight, total_weight_ptr_.data<float>(), sizeof(float), hipMemcpyDeviceToHost, context_.cuda_stream())); // Final scaling if (h_total_weight > 0) { math::Scale<float, float, CUDAContext>( dX->numel(), scale_ / h_total_weight, dX->data<float>(), dX->template mutable_data<float>(), &context_); } math::Scale<float, float, CUDAContext>( dX->numel(), d_avg_loss.data<float>(), dX->data<float>(), dX->template mutable_data<float>(), &context_); return true; } // Implementation for the CUDA context. template <> bool SoftmaxOp<float, CUDAContext>::RunOnDevice() { auto& X = Input(0); const auto canonical_axis = X.canonical_axis_index(axis_); const int N = X.size_to_dim(canonical_axis); const int D = X.size_from_dim(canonical_axis); auto* P = Output(0, X.sizes(), at::dtype<float>()); auto* P_data = P->mutable_data<float>(); if (N == 0) { return true; } if (!sum_multiplier_.defined()) { sum_multiplier_ = caffe2::empty({D}, at::dtype<float>().device(CUDA)); math::Set<float, CUDAContext>( D, 1.f, sum_multiplier_.mutable_data<float>(), &context_); } else if (sum_multiplier_.numel() != D) { sum_multiplier_.Resize(D); math::Set<float, CUDAContext>( D, 1.f, sum_multiplier_.mutable_data<float>(), &context_); } if (!scale_.defined()) { scale_ = caffe2::empty({N}, at::dtype<float>().device(CUDA)); } else if (scale_.numel() != N) { scale_.Resize(N); } if (!rowmax_.defined()) { rowmax_ = caffe2::empty({N}, at::dtype<float>().device(CUDA)); } else if (rowmax_.numel() != N) { rowmax_.Resize(N); } Softmax( N, D, X.data<float>(), sum_multiplier_.data<float>(), scale_.mutable_data<float>(), rowmax_.mutable_data<float>(), P_data, false, &context_); return true; } #define SOFTMAX_NUM_THREADS 128 // The softmax gradient kernel. This kernel has to be called with the number of // threads per block being no more than SOFTMAX_NUM_THREADS. namespace { __global__ void softmax_gradient_kernel( const int dim, const float* Y, const float* dY, float* dX) { Y += blockIdx.x * dim; dY += blockIdx.x * dim; dX += blockIdx.x * dim; const int idx = threadIdx.x; __shared__ float reduction_buffer[SOFTMAX_NUM_THREADS]; float tmp; // A two-level reduction to compute the inner products. tmp = 0; for (int i = idx; i < dim; i += blockDim.x) { tmp += dY[i] * Y[i]; } reduction_buffer[idx] = tmp; __syncthreads(); if (idx == 0) { tmp = reduction_buffer[0]; for (int i = 1; i < blockDim.x; ++i) tmp += reduction_buffer[i]; reduction_buffer[0] = tmp; } __syncthreads(); // Compute gradient. tmp = reduction_buffer[0]; for (int i = idx; i < dim; i += blockDim.x) { dX[i] = Y[i] * (dY[i] - tmp); } } } // namespace template <> bool SoftmaxGradientOp<float, CUDAContext>::RunOnDevice() { auto& Y = Input(0); auto& dY = Input(1); const auto canonical_axis = Y.canonical_axis_index(axis_); const int N = Y.size_to_dim(canonical_axis); const int D = Y.size_from_dim(canonical_axis); auto* dX = Output(0, Y.sizes(), at::dtype<float>()); auto* dX_data = dX->mutable_data<float>(); if (N == 0) { return true; } hipLaunchKernelGGL(( softmax_gradient_kernel), dim3(N), dim3(SOFTMAX_NUM_THREADS), 0, context_.cuda_stream(), D, Y.data<float>(), dY.data<float>(), dX_data); return true; } REGISTER_CUDA_OPERATOR(SoftmaxWithLoss, SoftmaxWithLossOp<float, CUDAContext>); REGISTER_CUDA_OPERATOR(SoftmaxWithLossGradient, SoftmaxWithLossGradientOp<float, CUDAContext>); REGISTER_CUDA_OPERATOR( SpatialSoftmaxWithLoss, SpatialSoftmaxWithLossOp<float, CUDAContext>); REGISTER_CUDA_OPERATOR( SpatialSoftmaxWithLossGradient, SpatialSoftmaxWithLossGradientOp<float, CUDAContext>); REGISTER_CUDA_OPERATOR(Softmax, SoftmaxOp<float, CUDAContext>); REGISTER_CUDA_OPERATOR(SoftmaxGradient, SoftmaxGradientOp<float, CUDAContext>); } // namespace caffe2
0ecfda8f73b5a9d61f159d69a7d2b52a4daa6102.cu
#include <cfloat> #include <cub/block/block_reduce.cuh> #include "caffe2/core/context_gpu.h" #include "caffe2/operators/softmax_op.h" #include "caffe2/operators/softmax_with_loss_op.h" #include "caffe2/operators/spatial_softmax_with_loss_op.h" namespace caffe2 { namespace { __global__ void LabelCrossEntropyKernel( const int N, const int D, const float* logPdata, const int* labeldata, const float* weights, float* Ydata) { CUDA_1D_KERNEL_LOOP(i, N) { CUDA_KERNEL_ASSERT(labeldata[i] >= 0 && labeldata[i] < D); float weight = weights ? weights[i] : 1.0; Ydata[i] = -logPdata[i * D + labeldata[i]] * weight; } } __global__ void LabelCrossEntropyGradientKernel( const int N, const int D, const float* Pdata, const int* labeldata, float* dXdata) { CUDA_1D_KERNEL_LOOP(i, N) { int idx = i * D + labeldata[i]; dXdata[idx] = Pdata[idx] - 1.; } } __global__ void LabelCrossEntropyGradientKernelWeighted( const int N, const int D, const float* Pdata, const int* labeldata, float* dXdata, const float* weights) { CUDA_1D_KERNEL_LOOP(i, N * D) { int row = i / D; int d = i % D; float val = Pdata[i] - 1.0 * (d == labeldata[row]); float weight = weights[row]; dXdata[i] = val * weight; } } __global__ void ProbCrossEntropyKernel( const int N, const int D, const float* Pdata, const float* labeldata, const float* weights, float* Ydata) { typedef cub::BlockReduce<float, CAFFE_CUDA_NUM_THREADS> BlockReduce; __shared__ typename BlockReduce::TempStorage temp_storage; for (int i = blockIdx.x; i < N; i += gridDim.x) { float weight = weights ? weights[i] : 1.0; float sum = 0.0; float total_prob = 0.0; for (int j = threadIdx.x; j < D; j += blockDim.x) { int idx = i * D + j; CUDA_KERNEL_ASSERT(labeldata[idx] >= 0); total_prob += labeldata[idx]; sum += -logf(fmaxf(Pdata[idx], FLT_MIN)) * labeldata[idx] * weight; } float tot = BlockReduce(temp_storage).Sum(sum); __syncthreads(); float total_prob_sum = BlockReduce(temp_storage).Sum(total_prob); if (threadIdx.x == 0) { Ydata[i] = tot; // Sanity check CUDA_KERNEL_ASSERT(fabsf(1.0 - total_prob_sum) < 1e-5f); } __syncthreads(); } } __global__ void ProbCrossEntropyGradientKernel( const int N, const int D, const float* Pdata, const float* labeldata, float* dXdata, const float* weights) { if (weights == NULL) { CUDA_1D_KERNEL_LOOP(idx, N * D) { dXdata[idx] = Pdata[idx] - labeldata[idx]; } } else { CUDA_1D_KERNEL_LOOP(idx, N * D) { dXdata[idx] = (Pdata[idx] - labeldata[idx]) * weights[idx / D]; } } } __global__ void SpatialSoftmaxKernel( const int num, const int D, const int W, const int H, const float* Xdata, float* Pdata) { CUDA_1D_KERNEL_LOOP(index, num * W * H) { int x = index % W; int y = (index / W) % H; int i = index / W / H; // Subtract max on each cell for numerical reasons float max_val = -FLT_MAX; for(int c = 0; c < D; ++c) { int idx = i * (H * W * D) + c * (H * W) + y * W + x; max_val = fmaxf(max_val, Xdata[idx]); } // Exponentiate float expsum = 0.0f; for(int c = 0; c < D; ++c) { int idx = i * (H * W * D) + c * (H * W) + y * W + x; float expx = expf(Xdata[idx] - max_val); Pdata[idx] = expx; expsum += expx; } // Normalize for(int c=0; c<D; ++c) { int idx = i * (H * W * D) + c * (H * W) + y * W + x; Pdata[idx] /= expsum; } } } #define DONTCARE (-1) __global__ void SpatialCrossEntropyLossKernel( const int N, const int D, const int W, const int H, const float* Pdata, const int* label_data, const float* weights, float* loss_data, float* weight_data) { CUDA_1D_KERNEL_LOOP(index, N * W * H) { int x = index % W; int y = (index / W) % H; int i = index / W / H; const int label = static_cast<int>(label_data[index]); if (label != DONTCARE) { CUDA_KERNEL_ASSERT(label >= 0 && label < D); float weight = (weights == NULL ? 1.0 : weights[index]); loss_data[index] = -logf(fmaxf( Pdata[i * W * H * D + label * W * H + y * W + x], 1e-20f)) * weight; weight_data[index] = weight; } else { loss_data[index] = 0; weight_data[index] = 0; } } } __global__ void SpatialSoftmaxLossGradientKernel(const int N, const int D, const int W, const int H, const int* label_data, const float* weights, float* dX_data, float* weights_) { CUDA_1D_KERNEL_LOOP(index, N * W * H) { int x = index % W; int y = (index / W) % H; int i = index / W / H; const int label = static_cast<int>(label_data[index]); if (label != DONTCARE) { int data_idx = i * (H * W * D) + label * (H * W) + y * W + x; dX_data[data_idx] -= 1.0; if (weights != NULL) { float weight = weights[index]; for (int c = 0; c < D; ++c) { int data_idx = i * (H * W * D) + c * (H * W) + y * W + x; dX_data[data_idx] *= weight; } weights_[index] = weight; } else { weights_[index] = 1.0; } } else { // Ignore-label, so set all gradients for this positions // tp zero for (int c = 0; c < D; ++c) { int data_idx = i * (H * W * D) + c * (H * W) + y * W + x; dX_data[data_idx] = 0.0; } weights_[index] = 0.0; } } } __global__ void SoftmaxNormalizeLogsKernel( const int nthreads, const int D, const float* logits, const float* rowmax, const float* scales, float* out_log) { CUDA_1D_KERNEL_LOOP(index, nthreads) { int n = index / D; out_log[index] = logits[index] - rowmax[n] - logf(fmaxf(scales[n], FLT_MIN)); } } __global__ void SoftmaxNormalizeKernel( const int nthreads, const int D, const float* probs, const float* scales, float* out) { CUDA_1D_KERNEL_LOOP(index, nthreads) { int n = index / D; out[index] = probs[index] / scales[n]; } } void Softmax( const int N, const int D, const float* logits, const float* sum_multiplier, float* scales, float* rowmax, float* probs, bool log_softmax, CUDAContext* context) { const int size = N * D; math::RowwiseMax<float, CUDAContext>(N, D, logits, rowmax, context); // Put the intermediate result X - max(X) into Y context->CopySameDevice<float>(size, logits, probs); // Subtract the scale math::Gemm<float, CUDAContext>( CblasNoTrans, CblasNoTrans, N, D, 1, -1, rowmax, sum_multiplier, 1, probs, context); // Exponentiation math::Exp<float, CUDAContext>(size, probs, probs, context); // Sum exponentiated values math::Gemv<float, CUDAContext>(CblasNoTrans, N, D, 1, probs, sum_multiplier, 0, scales, context); // Normalize if (!log_softmax) { SoftmaxNormalizeKernel<<< CAFFE_GET_BLOCKS(size), CAFFE_CUDA_NUM_THREADS, 0, context->cuda_stream()>>>(size, D, probs, scales, probs); } else { SoftmaxNormalizeLogsKernel<<< CAFFE_GET_BLOCKS(size), CAFFE_CUDA_NUM_THREADS, 0, context->cuda_stream()>>>(size, D, logits, rowmax, scales, probs); } } } // namespace template<> bool SoftmaxWithLossOp<float, CUDAContext>::RunOnDevice() { auto& X = Input(0); // Logits auto& T = Input(1); // Labels / targets const float* weights = (InputSize() > 2 ? Input(2).data<float>() : NULL); const auto canonical_axis = X.canonical_axis_index(axis_); int N, D; N = X.size_to_dim(canonical_axis); // batch size D = X.size_from_dim(canonical_axis); auto* P = Output(0, X.sizes(), at::dtype<float>()); // Probabilities from softmax ReinitializeTensor(&total_weight_ptr_, {1}, at::dtype<float>().device(CUDA)); total_weight_ptr_.Resize(1); if (label_prob_mode_) { CAFFE_ENFORCE_GE(T.dim(), 2); CAFFE_ENFORCE_EQ(T.size_to_dim(canonical_axis), N); CAFFE_ENFORCE_EQ(T.size_from_dim(canonical_axis), D); } else { if (T.dim() == canonical_axis) { CAFFE_ENFORCE_EQ(T.numel(), N); } else { CAFFE_ENFORCE_EQ(T.size_to_dim(canonical_axis), N); CAFFE_ENFORCE_EQ(T.size_from_dim(canonical_axis), 1); } } auto* avg_loss = Output(1, vector<int64_t>(), at::dtype<float>()); // Average loss if (!losses_.defined()) { losses_ = caffe2::empty({N}, at::dtype<float>().device(CUDA)); } else if (losses_.numel() != N) { losses_.Resize(N); } if (!rowmax_.defined()) { rowmax_ = caffe2::empty({N}, at::dtype<float>().device(CUDA)); } else if (rowmax_.numel() != N) { rowmax_.Resize(N); } if (!sum_multiplier_.defined()) { sum_multiplier_ = caffe2::empty({D}, at::dtype<float>().device(CUDA)); math::Set<float, CUDAContext>(D, 1.f, sum_multiplier_.mutable_data<float>(), &context_); } else if (sum_multiplier_.numel() != D) { sum_multiplier_.Resize(D); math::Set<float, CUDAContext>(D, 1.f, sum_multiplier_.mutable_data<float>(), &context_); } Softmax( N, D, X.data<float>(), sum_multiplier_.data<float>(), losses_.mutable_data<float>(), rowmax_.mutable_data<float>(), P->template mutable_data<float>(), !label_prob_mode_, // logarithmic output &context_); // Compute label xent loss per example if (!label_prob_mode_) { LabelCrossEntropyKernel<<< CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS, 0, context_.cuda_stream()>>>( N, D, P->data<float>(), T.data<int>(), weights, losses_.mutable_data<float>()); // Since we had logarithmic output, we need to exponentiate // them again. math::Exp<float, CUDAContext>( N * D, P->data<float>(), P->template mutable_data<float>(), &context_); } else { ProbCrossEntropyKernel<<< std::min(N, CAFFE_MAXIMUM_NUM_BLOCKS), CAFFE_CUDA_NUM_THREADS, 0, context_.cuda_stream()>>>( N, D, P->data<float>(), T.data<float>(), weights, losses_.mutable_data<float>()); } float total_weight = N; if (weights) { // Sum weights math::Sum<float, CUDAContext>( N, weights, total_weight_ptr_.mutable_data<float>(), &context_, &scratch_); CUDA_CHECK(cudaMemcpyAsync( &total_weight, total_weight_ptr_.data<float>(), sizeof(float), cudaMemcpyDeviceToHost, context_.cuda_stream())); } // Sum of all losses float* avg_loss_data = avg_loss->template mutable_data<float>(); math::Sum<float, CUDAContext>( losses_.numel(), losses_.data<float>(), avg_loss_data, &context_, &scratch_); // Average of input batch size if (total_weight > 0) { math::Scale<float, float, CUDAContext>( 1, scale_ / total_weight, avg_loss_data, avg_loss_data, &context_); } if (OutputSize() > 2) { OutputTensorAlias(2, losses_); } return true; } template <> bool SpatialSoftmaxWithLossOp<float, CUDAContext>::RunOnDevice() { auto& X = Input(0); // Logits auto& T = Input(1); // Labels / targets const float* weights = (InputSize() > 2 ? Input(2).data<float>() : NULL); int N, D; N = X.dim32(0); D = X.dim32(1); auto* P = Output(0, X.sizes(), at::dtype<float>()); // Probabilities from softmax ReinitializeTensor(&total_weight_ptr_, {1}, at::dtype<float>().device(CUDA)); CAFFE_ENFORCE_EQ(X.dim(), 4); CAFFE_ENFORCE_EQ(T.dim(), 3); CAFFE_ENFORCE_EQ(T.dim32(0), N); int H = X.dim32(2); int W = X.dim32(3); if (!losses_.defined()) { losses_ = caffe2::empty({N * W * H}, at::dtype<float>().device(CUDA)); } else if (losses_.numel() != N * W * H) { losses_.Resize(N * W * H); } if (!weights_.defined()) { weights_ = caffe2::empty({N * W * H}, at::dtype<float>().device(CUDA)); } else if (weights_.numel() != N * W * H) { weights_.Resize(N * W * H); } const float* Xdata = X.data<float>(); float* Pdata = P->template mutable_data<float>(); // Softmax for each x,y location SpatialSoftmaxKernel<<< CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS, 0, context_.cuda_stream()>>>(N, D, W, H, Xdata, Pdata); // Cross entropy auto* avg_loss = Output(1, vector<int64_t>(), at::dtype<float>()); // Average loss float* avg_loss_data = avg_loss->template mutable_data<float>(); math::Set<float, CUDAContext>(1, 0.0f, avg_loss_data, &context_); const int* label_data = T.data<int>(); math::Set<float, CUDAContext>( 1, 0.0f, total_weight_ptr_.mutable_data<float>(), &context_); SpatialCrossEntropyLossKernel<<< CAFFE_GET_BLOCKS(N * W * H), CAFFE_CUDA_NUM_THREADS, 0, context_.cuda_stream()>>>( N, D, W, H, P->data<float>(), label_data, weights, losses_.mutable_data<float>(), weights_.mutable_data<float>()); // Somewhat awkward scalar passing from device to host float h_total_weight; math::Sum<float, CUDAContext>( weights_.numel(), weights_.data<float>(), total_weight_ptr_.mutable_data<float>(), &context_, &scratch_); CUDA_CHECK(cudaMemcpyAsync( &h_total_weight, total_weight_ptr_.data<float>(), sizeof(float), cudaMemcpyDeviceToHost, context_.cuda_stream())); math::Sum<float, CUDAContext>( losses_.numel(), losses_.data<float>(), avg_loss_data, &context_, &scratch_); // Final scaling if (h_total_weight > 0) { math::Scale<float, float, CUDAContext>( 1, scale_ / h_total_weight, avg_loss_data, avg_loss_data, &context_); } return true; } template <> bool SoftmaxWithLossGradientOp<float, CUDAContext>::RunOnDevice() { auto& X = Input(0); // Logits auto& T = Input(1); // Labels / targets // Input(2) is weights, if given auto& P = Input(InputSize() - 2); // Probabilities from softmax auto& d_avg_loss = Input(InputSize() - 1); // Gradient w.r.t. avg loss const float* weights = (InputSize() > 4 ? Input(2).data<float>() : NULL); Tensor* dX; if (only_loss_) { // Memory saving trick to share the buffer with the softmax output. // Softmax output is thus overwritten. dX = OutputTensorAlias(0, P); dX->ResizeLike(X); } else { dX = Output(0, X.sizes(), at::dtype<float>()); } const auto canonical_axis = X.canonical_axis_index(axis_); int N, D; N = X.size_to_dim(canonical_axis); // batch size D = X.size_from_dim(canonical_axis); ReinitializeTensor(&total_weight_ptr_, {1}, at::dtype<float>().device(CUDA)); if (label_prob_mode_) { CAFFE_ENFORCE_GE(T.dim(), 2); CAFFE_ENFORCE_EQ(T.size_to_dim(canonical_axis), N); CAFFE_ENFORCE_EQ(T.size_from_dim(canonical_axis), D); } else { if (T.dim() == canonical_axis) { CAFFE_ENFORCE_EQ(T.numel(), N); } else { CAFFE_ENFORCE_EQ(T.size_to_dim(canonical_axis), N); CAFFE_ENFORCE_EQ(T.size_from_dim(canonical_axis), 1); } } // Subtract 1 from labeled positions if (!label_prob_mode_) { if (weights == nullptr) { // Copy softmax probabilities into dX if (!only_loss_) { context_.CopySameDevice<float>( P.numel(), P.data<float>(), dX->template mutable_data<float>()); } LabelCrossEntropyGradientKernel<<< CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS, 0, context_.cuda_stream()>>>( N, D, P.data<float>(), T.data<int>(), dX->template mutable_data<float>()); } else { // Weighted version gets the Pdata values internally LabelCrossEntropyGradientKernelWeighted<<< CAFFE_GET_BLOCKS(N * D), CAFFE_CUDA_NUM_THREADS, 0, context_.cuda_stream()>>>( N, D, P.data<float>(), T.data<int>(), dX->template mutable_data<float>(), weights); } } else { ProbCrossEntropyGradientKernel<<< CAFFE_GET_BLOCKS(N * D), CAFFE_CUDA_NUM_THREADS, 0, context_.cuda_stream()>>>( N, D, P.data<float>(), T.data<float>(), dX->template mutable_data<float>(), weights); } float total_weight = N; if (weights) { // Sum weights math::Sum<float, CUDAContext>( N, weights, total_weight_ptr_.mutable_data<float>(), &context_, &scratch_); CUDA_CHECK(cudaMemcpyAsync( &total_weight, total_weight_ptr_.data<float>(), sizeof(float), cudaMemcpyDeviceToHost, context_.cuda_stream())); } // Scale by d_avg_loss / N if (total_weight > 0) { math::Scale<float, float, CUDAContext>( dX->numel(), scale_ / total_weight, dX->data<float>(), dX->template mutable_data<float>(), &context_); } math::Scale<float, float, CUDAContext>( dX->numel(), d_avg_loss.data<float>(), dX->data<float>(), dX->template mutable_data<float>(), &context_); return true; } template <> bool SpatialSoftmaxWithLossGradientOp<float, CUDAContext>::RunOnDevice() { auto& X = Input(0); // Logits auto& T = Input(1); // Labels / targets // Input(2) is weights, if given auto& P = Input(InputSize() - 2); // Probabilities from softmax auto& d_avg_loss = Input(InputSize() - 1); // Gradient w.r.t. avg loss const float* weights = (InputSize() > 4 ? Input(2).data<float>() : NULL); Tensor* dX; if (only_loss_) { // Memory saving trick to share the buffer with the softmax output. // Softmax output is thus overwritten. dX = OutputTensorAlias(0, P); dX->ResizeLike(X); } else { dX = Output(0, X.sizes(), at::dtype<float>()); } const auto canonical_axis = X.canonical_axis_index(1); int N, D; N = X.dim32(0); D = X.dim32(1); ReinitializeTensor(&total_weight_ptr_, {1}, at::dtype<float>().device(CUDA)); // Spatial mode, compute softmax for each x, y location CAFFE_ENFORCE_EQ(X.dim(), 4); CAFFE_ENFORCE_EQ(T.dim(), 3); int H = X.dim32(2); int W = X.dim32(3); dX->ResizeLike(X); if (!weights_.defined()) { weights_ = caffe2::empty({N * W * H}, at::dtype<float>().device(CUDA)); } else if (weights_.numel() != N * W * H) { weights_.Resize(N * W * H); } const float* Pdata = P.data<float>(); float* dX_data = dX->template mutable_data<float>(); const int* label_data = T.data<int>(); const float* d_avg_loss_data = d_avg_loss.data<float>(); // Copy softmax probabilities into dX. All but the neuron // corresponding to the correct label has gradient equaling e(x_j) // which is the probability under softmax. context_.CopySameDevice<float>(P.numel(), Pdata, dX_data); math::Set<float, CUDAContext>( 1, 0.0f, total_weight_ptr_.mutable_data<float>(), &context_); SpatialSoftmaxLossGradientKernel<<< CAFFE_GET_BLOCKS(N * W * H), CAFFE_CUDA_NUM_THREADS, 0, context_.cuda_stream()>>>( N, D, W, H, label_data, weights, dX_data, weights_.mutable_data<float>()); math::Sum<float, CUDAContext>( weights_.numel(), weights_.data<float>(), total_weight_ptr_.mutable_data<float>(), &context_, &scratch_); // Somewhat awkward scalar passing from device to host float h_total_weight; CUDA_CHECK(cudaMemcpyAsync( &h_total_weight, total_weight_ptr_.data<float>(), sizeof(float), cudaMemcpyDeviceToHost, context_.cuda_stream())); // Final scaling if (h_total_weight > 0) { math::Scale<float, float, CUDAContext>( dX->numel(), scale_ / h_total_weight, dX->data<float>(), dX->template mutable_data<float>(), &context_); } math::Scale<float, float, CUDAContext>( dX->numel(), d_avg_loss.data<float>(), dX->data<float>(), dX->template mutable_data<float>(), &context_); return true; } // Implementation for the CUDA context. template <> bool SoftmaxOp<float, CUDAContext>::RunOnDevice() { auto& X = Input(0); const auto canonical_axis = X.canonical_axis_index(axis_); const int N = X.size_to_dim(canonical_axis); const int D = X.size_from_dim(canonical_axis); auto* P = Output(0, X.sizes(), at::dtype<float>()); auto* P_data = P->mutable_data<float>(); if (N == 0) { return true; } if (!sum_multiplier_.defined()) { sum_multiplier_ = caffe2::empty({D}, at::dtype<float>().device(CUDA)); math::Set<float, CUDAContext>( D, 1.f, sum_multiplier_.mutable_data<float>(), &context_); } else if (sum_multiplier_.numel() != D) { sum_multiplier_.Resize(D); math::Set<float, CUDAContext>( D, 1.f, sum_multiplier_.mutable_data<float>(), &context_); } if (!scale_.defined()) { scale_ = caffe2::empty({N}, at::dtype<float>().device(CUDA)); } else if (scale_.numel() != N) { scale_.Resize(N); } if (!rowmax_.defined()) { rowmax_ = caffe2::empty({N}, at::dtype<float>().device(CUDA)); } else if (rowmax_.numel() != N) { rowmax_.Resize(N); } Softmax( N, D, X.data<float>(), sum_multiplier_.data<float>(), scale_.mutable_data<float>(), rowmax_.mutable_data<float>(), P_data, false, &context_); return true; } #define SOFTMAX_NUM_THREADS 128 // The softmax gradient kernel. This kernel has to be called with the number of // threads per block being no more than SOFTMAX_NUM_THREADS. namespace { __global__ void softmax_gradient_kernel( const int dim, const float* Y, const float* dY, float* dX) { Y += blockIdx.x * dim; dY += blockIdx.x * dim; dX += blockIdx.x * dim; const int idx = threadIdx.x; __shared__ float reduction_buffer[SOFTMAX_NUM_THREADS]; float tmp; // A two-level reduction to compute the inner products. tmp = 0; for (int i = idx; i < dim; i += blockDim.x) { tmp += dY[i] * Y[i]; } reduction_buffer[idx] = tmp; __syncthreads(); if (idx == 0) { tmp = reduction_buffer[0]; for (int i = 1; i < blockDim.x; ++i) tmp += reduction_buffer[i]; reduction_buffer[0] = tmp; } __syncthreads(); // Compute gradient. tmp = reduction_buffer[0]; for (int i = idx; i < dim; i += blockDim.x) { dX[i] = Y[i] * (dY[i] - tmp); } } } // namespace template <> bool SoftmaxGradientOp<float, CUDAContext>::RunOnDevice() { auto& Y = Input(0); auto& dY = Input(1); const auto canonical_axis = Y.canonical_axis_index(axis_); const int N = Y.size_to_dim(canonical_axis); const int D = Y.size_from_dim(canonical_axis); auto* dX = Output(0, Y.sizes(), at::dtype<float>()); auto* dX_data = dX->mutable_data<float>(); if (N == 0) { return true; } softmax_gradient_kernel<<< N, SOFTMAX_NUM_THREADS, 0, context_.cuda_stream()>>>(D, Y.data<float>(), dY.data<float>(), dX_data); return true; } REGISTER_CUDA_OPERATOR(SoftmaxWithLoss, SoftmaxWithLossOp<float, CUDAContext>); REGISTER_CUDA_OPERATOR(SoftmaxWithLossGradient, SoftmaxWithLossGradientOp<float, CUDAContext>); REGISTER_CUDA_OPERATOR( SpatialSoftmaxWithLoss, SpatialSoftmaxWithLossOp<float, CUDAContext>); REGISTER_CUDA_OPERATOR( SpatialSoftmaxWithLossGradient, SpatialSoftmaxWithLossGradientOp<float, CUDAContext>); REGISTER_CUDA_OPERATOR(Softmax, SoftmaxOp<float, CUDAContext>); REGISTER_CUDA_OPERATOR(SoftmaxGradient, SoftmaxGradientOp<float, CUDAContext>); } // namespace caffe2
11d8c30a8e6a553424185e2ccbc831ea2c9d26d2.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #ifdef __NVCC__ #include <hipcub/hipcub.hpp> #endif #ifdef __HIPCC__ #include <hipcub/hipcub.hpp> namespace cub = hipcub; #endif #include "paddle/fluid/framework/op_registry.h" #include "paddle/fluid/operators/math/blas.h" #include "paddle/fluid/platform/cuda_device_function.h" namespace paddle { namespace operators { template <typename T> static __device__ __forceinline__ T Relu(T x) { return (x > 0) ? x : 0; } static __device__ __forceinline__ float RealSqrt(float x) { return sqrtf(x); } static __device__ __forceinline__ double RealSqrt(double x) { return sqrt(x); } template <typename T> struct PairForLayerNorm { __device__ __forceinline__ PairForLayerNorm() {} __device__ __forceinline__ PairForLayerNorm(const T& first, const T& second) : first_(first), second_(second) {} T first_; T second_; }; template <typename T> struct PairForLayerNormAddFunctor { __device__ __forceinline__ PairForLayerNorm<T> operator()( const PairForLayerNorm<T>& p1, const PairForLayerNorm<T>& p2) { return PairForLayerNorm<T>(p1.first_ + p2.first_, p1.second_ + p2.second_); } }; template <typename T, bool DoRelu, int BlockDim> __global__ void InplaceAddReluAddLayerNormKernel(const T* y, const T* bias_0, const T* bias_1, const T* scale, T* out, T* mean, T* variance, int M, int N, float epsilon) { using BlockReduce = hipcub::BlockReduce<PairForLayerNorm<T>, BlockDim>; __shared__ typename BlockReduce::TempStorage temp_storage; __shared__ T shared_mem[BlockDim + 2]; for (int i = blockIdx.x; i < M; i += gridDim.x) { int index = i * N + threadIdx.x; // The fisrt BlockDim elements will be saved to shared memory. int save_index = threadIdx.x; T* save_ptr = shared_mem; T sum_i = 0; T square_sum_i = 0; for (int j = threadIdx.x; j < N; j += blockDim.x) { T tmp_0 = out[index]; // Add bias T tmp_1 = bias_0 ? tmp_0 + bias_0[j] : tmp_0; // Relu T tmp_2 = DoRelu ? Relu(tmp_1) : tmp_1; // elementwise_add T tmp_3 = tmp_2 + y[index]; // Save save_ptr[save_index] = tmp_3; save_ptr = out; index += blockDim.x; save_index = index; // For layer_norm, reduce to calculate mean and std sum_i += tmp_3; square_sum_i += (tmp_3 * tmp_3); } auto pair = BlockReduce(temp_storage) .Reduce(PairForLayerNorm<T>(sum_i, square_sum_i), PairForLayerNormAddFunctor<T>()); if (threadIdx.x == 0) { T mean_i = static_cast<T>(pair.first_ / N); T variance_i = static_cast<T>(pair.second_ / N - mean_i * mean_i); shared_mem[BlockDim] = mean_i; shared_mem[BlockDim + 1] = variance_i; if (mean) { mean[blockIdx.x] = mean_i; } if (variance) { variance[blockIdx.x] = variance_i; } } __syncthreads(); T mean_i = shared_mem[BlockDim]; T std_i = static_cast<T>(RealSqrt(shared_mem[BlockDim + 1] + epsilon)); index = i * N + threadIdx.x; // First BlockDim elements loading from shared memory. save_index = threadIdx.x; save_ptr = shared_mem; // For layer_norm, calculate out for (int j = threadIdx.x; j < N; j += blockDim.x) { T tmp_0 = (save_ptr[save_index] - mean_i) / std_i; T tmp_1 = scale ? scale[j] * tmp_0 : tmp_0; out[index] = bias_1 ? tmp_1 + bias_1[j] : tmp_1; save_ptr = out; index += blockDim.x; save_index = index; } } } template <typename T> class FusedFCElementwiseLayerNormOpKernel : public framework::OpKernel<T> { public: void Compute(const framework::ExecutionContext& ctx) const override { auto* x = ctx.Input<framework::Tensor>("X"); auto* w = ctx.Input<framework::Tensor>("W"); auto* out = ctx.Output<framework::Tensor>("Out"); auto w_dims = w->dims(); int N = w_dims[1]; int K = w_dims[0]; int M = framework::product(x->dims()) / K; const T* x_data = x->data<T>(); const T* w_data = w->data<T>(); T* out_data = out->mutable_data<T>(ctx.GetPlace()); auto& dev_ctx = ctx.template device_context<platform::CUDADeviceContext>(); auto blas = math::GetBlas<platform::CUDADeviceContext, T>(dev_ctx); blas.GEMM(false, false, M, N, K, static_cast<T>(1.0), x_data, K, w_data, N, static_cast<T>(0.0), out_data, N); auto* y = ctx.Input<framework::Tensor>("Y"); auto* bias_0 = ctx.Input<framework::Tensor>("Bias0"); auto* bias_1 = ctx.Input<framework::Tensor>("Bias1"); auto* scale = ctx.Input<framework::Tensor>("Scale"); const T* y_data = y->data<T>(); const T* bias_0_data = bias_0 ? bias_0->data<T>() : nullptr; const T* bias_1_data = bias_1 ? bias_1->data<T>() : nullptr; const T* scale_data = scale ? scale->data<T>() : nullptr; auto* mean = ctx.Output<framework::Tensor>("Mean"); auto* variance = ctx.Output<framework::Tensor>("Variance"); T* mean_data = mean ? mean->mutable_data<T>(ctx.GetPlace()) : nullptr; T* variance_data = variance ? variance->mutable_data<T>(ctx.GetPlace()) : nullptr; bool with_relu = (ctx.Attr<std::string>("activation_type") == "relu") ? true : false; float epsilon = ctx.Attr<float>("epsilon"); int max_threads = dev_ctx.GetMaxPhysicalThreadCount(); if (with_relu) { switch (platform::RoundToPowerOfTwo(N)) { CUDA_LAUNCH_KERNEL_HELPER( hipLaunchKernelGGL(( InplaceAddReluAddLayerNormKernel< T, true, kPowerOfTwoDim>), dim3(::max(max_threads / kPowerOfTwoDim, 1)), dim3(kPowerOfTwoDim), 0, dev_ctx.stream(), y_data, bias_0_data, bias_1_data, scale_data, out_data, mean_data, variance_data, M, N, epsilon)); } } else { switch (platform::RoundToPowerOfTwo(N)) { CUDA_LAUNCH_KERNEL_HELPER( hipLaunchKernelGGL(( InplaceAddReluAddLayerNormKernel< T, false, kPowerOfTwoDim>), dim3(::max(max_threads / kPowerOfTwoDim, 1)), dim3(kPowerOfTwoDim), 0, dev_ctx.stream(), y_data, bias_0_data, bias_1_data, scale_data, out_data, mean_data, variance_data, M, N, epsilon)); } } } }; } // namespace operators } // namespace paddle namespace ops = paddle::operators; REGISTER_OP_CUDA_KERNEL(fused_fc_elementwise_layernorm, ops::FusedFCElementwiseLayerNormOpKernel<float>);
11d8c30a8e6a553424185e2ccbc831ea2c9d26d2.cu
/* Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #ifdef __NVCC__ #include <cub/cub.cuh> #endif #ifdef __HIPCC__ #include <hipcub/hipcub.hpp> namespace cub = hipcub; #endif #include "paddle/fluid/framework/op_registry.h" #include "paddle/fluid/operators/math/blas.h" #include "paddle/fluid/platform/cuda_device_function.h" namespace paddle { namespace operators { template <typename T> static __device__ __forceinline__ T Relu(T x) { return (x > 0) ? x : 0; } static __device__ __forceinline__ float RealSqrt(float x) { return sqrtf(x); } static __device__ __forceinline__ double RealSqrt(double x) { return sqrt(x); } template <typename T> struct PairForLayerNorm { __device__ __forceinline__ PairForLayerNorm() {} __device__ __forceinline__ PairForLayerNorm(const T& first, const T& second) : first_(first), second_(second) {} T first_; T second_; }; template <typename T> struct PairForLayerNormAddFunctor { __device__ __forceinline__ PairForLayerNorm<T> operator()( const PairForLayerNorm<T>& p1, const PairForLayerNorm<T>& p2) { return PairForLayerNorm<T>(p1.first_ + p2.first_, p1.second_ + p2.second_); } }; template <typename T, bool DoRelu, int BlockDim> __global__ void InplaceAddReluAddLayerNormKernel(const T* y, const T* bias_0, const T* bias_1, const T* scale, T* out, T* mean, T* variance, int M, int N, float epsilon) { using BlockReduce = cub::BlockReduce<PairForLayerNorm<T>, BlockDim>; __shared__ typename BlockReduce::TempStorage temp_storage; __shared__ T shared_mem[BlockDim + 2]; for (int i = blockIdx.x; i < M; i += gridDim.x) { int index = i * N + threadIdx.x; // The fisrt BlockDim elements will be saved to shared memory. int save_index = threadIdx.x; T* save_ptr = shared_mem; T sum_i = 0; T square_sum_i = 0; for (int j = threadIdx.x; j < N; j += blockDim.x) { T tmp_0 = out[index]; // Add bias T tmp_1 = bias_0 ? tmp_0 + bias_0[j] : tmp_0; // Relu T tmp_2 = DoRelu ? Relu(tmp_1) : tmp_1; // elementwise_add T tmp_3 = tmp_2 + y[index]; // Save save_ptr[save_index] = tmp_3; save_ptr = out; index += blockDim.x; save_index = index; // For layer_norm, reduce to calculate mean and std sum_i += tmp_3; square_sum_i += (tmp_3 * tmp_3); } auto pair = BlockReduce(temp_storage) .Reduce(PairForLayerNorm<T>(sum_i, square_sum_i), PairForLayerNormAddFunctor<T>()); if (threadIdx.x == 0) { T mean_i = static_cast<T>(pair.first_ / N); T variance_i = static_cast<T>(pair.second_ / N - mean_i * mean_i); shared_mem[BlockDim] = mean_i; shared_mem[BlockDim + 1] = variance_i; if (mean) { mean[blockIdx.x] = mean_i; } if (variance) { variance[blockIdx.x] = variance_i; } } __syncthreads(); T mean_i = shared_mem[BlockDim]; T std_i = static_cast<T>(RealSqrt(shared_mem[BlockDim + 1] + epsilon)); index = i * N + threadIdx.x; // First BlockDim elements loading from shared memory. save_index = threadIdx.x; save_ptr = shared_mem; // For layer_norm, calculate out for (int j = threadIdx.x; j < N; j += blockDim.x) { T tmp_0 = (save_ptr[save_index] - mean_i) / std_i; T tmp_1 = scale ? scale[j] * tmp_0 : tmp_0; out[index] = bias_1 ? tmp_1 + bias_1[j] : tmp_1; save_ptr = out; index += blockDim.x; save_index = index; } } } template <typename T> class FusedFCElementwiseLayerNormOpKernel : public framework::OpKernel<T> { public: void Compute(const framework::ExecutionContext& ctx) const override { auto* x = ctx.Input<framework::Tensor>("X"); auto* w = ctx.Input<framework::Tensor>("W"); auto* out = ctx.Output<framework::Tensor>("Out"); auto w_dims = w->dims(); int N = w_dims[1]; int K = w_dims[0]; int M = framework::product(x->dims()) / K; const T* x_data = x->data<T>(); const T* w_data = w->data<T>(); T* out_data = out->mutable_data<T>(ctx.GetPlace()); auto& dev_ctx = ctx.template device_context<platform::CUDADeviceContext>(); auto blas = math::GetBlas<platform::CUDADeviceContext, T>(dev_ctx); blas.GEMM(false, false, M, N, K, static_cast<T>(1.0), x_data, K, w_data, N, static_cast<T>(0.0), out_data, N); auto* y = ctx.Input<framework::Tensor>("Y"); auto* bias_0 = ctx.Input<framework::Tensor>("Bias0"); auto* bias_1 = ctx.Input<framework::Tensor>("Bias1"); auto* scale = ctx.Input<framework::Tensor>("Scale"); const T* y_data = y->data<T>(); const T* bias_0_data = bias_0 ? bias_0->data<T>() : nullptr; const T* bias_1_data = bias_1 ? bias_1->data<T>() : nullptr; const T* scale_data = scale ? scale->data<T>() : nullptr; auto* mean = ctx.Output<framework::Tensor>("Mean"); auto* variance = ctx.Output<framework::Tensor>("Variance"); T* mean_data = mean ? mean->mutable_data<T>(ctx.GetPlace()) : nullptr; T* variance_data = variance ? variance->mutable_data<T>(ctx.GetPlace()) : nullptr; bool with_relu = (ctx.Attr<std::string>("activation_type") == "relu") ? true : false; float epsilon = ctx.Attr<float>("epsilon"); int max_threads = dev_ctx.GetMaxPhysicalThreadCount(); if (with_relu) { switch (platform::RoundToPowerOfTwo(N)) { CUDA_LAUNCH_KERNEL_HELPER( InplaceAddReluAddLayerNormKernel< T, true, kPowerOfTwoDim><<<std::max(max_threads / kPowerOfTwoDim, 1), kPowerOfTwoDim, 0, dev_ctx.stream()>>>( y_data, bias_0_data, bias_1_data, scale_data, out_data, mean_data, variance_data, M, N, epsilon)); } } else { switch (platform::RoundToPowerOfTwo(N)) { CUDA_LAUNCH_KERNEL_HELPER( InplaceAddReluAddLayerNormKernel< T, false, kPowerOfTwoDim><<<std::max(max_threads / kPowerOfTwoDim, 1), kPowerOfTwoDim, 0, dev_ctx.stream()>>>( y_data, bias_0_data, bias_1_data, scale_data, out_data, mean_data, variance_data, M, N, epsilon)); } } } }; } // namespace operators } // namespace paddle namespace ops = paddle::operators; REGISTER_OP_CUDA_KERNEL(fused_fc_elementwise_layernorm, ops::FusedFCElementwiseLayerNormOpKernel<float>);
d4f067e20d6d17f7430f1b2fd061092d1d441652.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // Homework 1 // Color to Greyscale Conversion //A common way to represent color images is known as RGBA - the color //is specified by how much Red, Green, and Blue is in it. //The 'A' stands for Alpha and is used for transparency; it will be //ignored in this homework. //Each channel Red, Blue, Green, and Alpha is represented by one byte. //Since we are using one byte for each color there are 256 different //possible values for each color. This means we use 4 bytes per pixel. //Greyscale images are represented by a single intensity value per pixel //which is one byte in size. //To convert an image from color to grayscale one simple method is to //set the intensity to the average of the RGB channels. But we will //use a more sophisticated method that takes into account how the eye //perceives color and weights the channels unequally. //The eye responds most strongly to green followed by red and then blue. //The NTSC (National Television System Committee) recommends the following //formula for color to greyscale conversion: //I = .299f * R + .587f * G + .114f * B //Notice the trailing f's on the numbers which indicate that they are //single precision floating point constants and not double precision //constants. //You should fill in the kernel as well as set the block and grid sizes //so that the entire image is processed. #include "kernel_hip.cuh" #include <stdio.h> __global__ void rgba_to_greyscale_kernel(uchar4 const * const rgbaImage, unsigned char * const greyImage, int numCols) { //TODO //Fill in the kernel to convert from color to greyscale //the mapping from components of a uchar4 to RGBA is: // .x -> R ; .y -> G ; .z -> B ; .w -> A // //The output (greyImage) at each pixel should be the result of //applying the formula: output = .299f * R + .587f * G + .114f * B; //Note: We will be ignoring the alpha channel for this conversion //First create a mapping from the 2D block and grid locations //to an absolute 2D location in the image, then use that to //calculate a 1D offset // http://users.wfu.edu/choss/CUDA/docs/Lecture%205.pdf // full global thread ID in X and Y dimensions int col = threadIdx.x + blockIdx.x * blockDim.x; // x int row = threadIdx.y + blockIdx.y * blockDim.y; // y // ACCESSING MATRICES IN LINEAR MEMORY // we cannot not use two-dimensional indices (e.g. A[row][column]) // to access matrices // We will need to know how the matrix is laid out in memory and // then compute the distance from the beginning of the matrix // C uses row-major order --- rows are stored one after the // other in memory, i.e.row 0 then row 1 etc. int index = numCols * row + col; // W * row + col uchar4 const * texel = (rgbaImage + index); *(greyImage + index) = texel->x * .299f + texel->y * .587f + texel->z * .114f; } void your_rgba_to_greyscale(uchar4 const * const h_rgbaImage, uchar4 * const d_rgbaImage, unsigned char * const d_greyImage, size_t numRows, size_t numCols) { std::cout << "\nLAUNCHING KERNEL:" << std::endl; int const BLOCKS = 32; int const X_THREADS_PER_BLOCK = numCols / BLOCKS; int const Y_THREADS_PER_BLOCK = numRows / BLOCKS; std::cout << "\t// remember max number of threads per block is 1024 in total, no in each dimension" << std::endl; std::cout << "\t- Num of blocks: " << BLOCKS << "x" << BLOCKS << std::endl; std::cout << "\t- Threads per block in X: " << X_THREADS_PER_BLOCK << std::endl; std::cout << "\t- Threads per block in Y: " << Y_THREADS_PER_BLOCK << std::endl; std::cout << "\t- Total threads per block: " << X_THREADS_PER_BLOCK * Y_THREADS_PER_BLOCK << std::endl; //You must fill in the correct sizes for the blockSize and gridSize //currently only one block with one thread is being launched const dim3 blockSize(X_THREADS_PER_BLOCK, Y_THREADS_PER_BLOCK, 1); // How many threads per block? const dim3 gridSize(BLOCKS, BLOCKS, 1); // How many blocks? (grid of blocks) hipLaunchKernelGGL(( rgba_to_greyscale_kernel) , dim3(gridSize), dim3(blockSize), 0, 0, d_rgbaImage, d_greyImage, numCols); // Check for any errors launching the kernel hipError_t cudaStatus = hipGetLastError(); if (cudaStatus != hipSuccess) { std::cerr << "\t- rgba_to_greyscale_kernel launch failed: " << hipGetErrorString(cudaStatus) << std::endl; return; } std::cout << "\t- Kernel launched succesfully" << std::endl; //hipDeviceSynchronize(); checkCudaErrors(hipGetLastError()); } /* int main() { const int arraySize = 5; const int a[arraySize] = { 1, 2, 3, 4, 5 }; const int b[arraySize] = { 10, 20, 30, 40, 50 }; int c[arraySize] = { 0 }; // Add vectors in parallel. hipError_t cudaStatus = addWithCuda(c, a, b, arraySize); if (cudaStatus != hipSuccess) { fprintf(stderr, "addWithCuda failed!"); return 1; } printf("{1,2,3,4,5} + {10,20,30,40,50} = {%d,%d,%d,%d,%d}\n", c[0], c[1], c[2], c[3], c[4]); // hipDeviceReset must be called before exiting in order for profiling and // tracing tools such as Nsight and Visual Profiler to show complete traces. cudaStatus = hipDeviceReset(); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipDeviceReset failed!"); return 1; } return 0; } // Helper function for using CUDA to add vectors in parallel. hipError_t addWithCuda(int *c, const int *a, const int *b, unsigned int size) { int *dev_a = 0; int *dev_b = 0; int *dev_c = 0; hipError_t cudaStatus; // Choose which GPU to run on, change this on a multi-GPU system. cudaStatus = hipSetDevice(0); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipSetDevice failed! Do you have a CUDA-capable GPU installed?"); goto Error; } // Allocate GPU buffers for three vectors (two input, one output) . cudaStatus = hipMalloc((void**)&dev_c, size * sizeof(int)); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMalloc failed!"); goto Error; } cudaStatus = hipMalloc((void**)&dev_a, size * sizeof(int)); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMalloc failed!"); goto Error; } cudaStatus = hipMalloc((void**)&dev_b, size * sizeof(int)); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMalloc failed!"); goto Error; } // Copy input vectors from host memory to GPU buffers. cudaStatus = hipMemcpy(dev_a, a, size * sizeof(int), hipMemcpyHostToDevice); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMemcpy failed!"); goto Error; } cudaStatus = hipMemcpy(dev_b, b, size * sizeof(int), hipMemcpyHostToDevice); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMemcpy failed!"); goto Error; } // Launch a kernel on the GPU with one thread for each element. addKernel<<<1, size>>>(dev_c, dev_a, dev_b); // Check for any errors launching the kernel cudaStatus = hipGetLastError(); if (cudaStatus != hipSuccess) { fprintf(stderr, "addKernel launch failed: %s\n", hipGetErrorString(cudaStatus)); goto Error; } // hipDeviceSynchronize waits for the kernel to finish, and returns // any errors encountered during the launch. cudaStatus = hipDeviceSynchronize(); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus); goto Error; } // Copy output vector from GPU buffer to host memory. cudaStatus = hipMemcpy(c, dev_c, size * sizeof(int), hipMemcpyDeviceToHost); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMemcpy failed!"); goto Error; } Error: hipFree(dev_c); hipFree(dev_a); hipFree(dev_b); return cudaStatus; }*/
d4f067e20d6d17f7430f1b2fd061092d1d441652.cu
// Homework 1 // Color to Greyscale Conversion //A common way to represent color images is known as RGBA - the color //is specified by how much Red, Green, and Blue is in it. //The 'A' stands for Alpha and is used for transparency; it will be //ignored in this homework. //Each channel Red, Blue, Green, and Alpha is represented by one byte. //Since we are using one byte for each color there are 256 different //possible values for each color. This means we use 4 bytes per pixel. //Greyscale images are represented by a single intensity value per pixel //which is one byte in size. //To convert an image from color to grayscale one simple method is to //set the intensity to the average of the RGB channels. But we will //use a more sophisticated method that takes into account how the eye //perceives color and weights the channels unequally. //The eye responds most strongly to green followed by red and then blue. //The NTSC (National Television System Committee) recommends the following //formula for color to greyscale conversion: //I = .299f * R + .587f * G + .114f * B //Notice the trailing f's on the numbers which indicate that they are //single precision floating point constants and not double precision //constants. //You should fill in the kernel as well as set the block and grid sizes //so that the entire image is processed. #include "kernel.cuh" #include <stdio.h> __global__ void rgba_to_greyscale_kernel(uchar4 const * const rgbaImage, unsigned char * const greyImage, int numCols) { //TODO //Fill in the kernel to convert from color to greyscale //the mapping from components of a uchar4 to RGBA is: // .x -> R ; .y -> G ; .z -> B ; .w -> A // //The output (greyImage) at each pixel should be the result of //applying the formula: output = .299f * R + .587f * G + .114f * B; //Note: We will be ignoring the alpha channel for this conversion //First create a mapping from the 2D block and grid locations //to an absolute 2D location in the image, then use that to //calculate a 1D offset // http://users.wfu.edu/choss/CUDA/docs/Lecture%205.pdf // full global thread ID in X and Y dimensions int col = threadIdx.x + blockIdx.x * blockDim.x; // x int row = threadIdx.y + blockIdx.y * blockDim.y; // y // ACCESSING MATRICES IN LINEAR MEMORY // • we cannot not use two-dimensional indices (e.g. A[row][column]) // to access matrices // • We will need to know how the matrix is laid out in memory and // then compute the distance from the beginning of the matrix // • C uses row-major order --- rows are stored one after the // other in memory, i.e.row 0 then row 1 etc. int index = numCols * row + col; // W * row + col uchar4 const * texel = (rgbaImage + index); *(greyImage + index) = texel->x * .299f + texel->y * .587f + texel->z * .114f; } void your_rgba_to_greyscale(uchar4 const * const h_rgbaImage, uchar4 * const d_rgbaImage, unsigned char * const d_greyImage, size_t numRows, size_t numCols) { std::cout << "\nLAUNCHING KERNEL:" << std::endl; int const BLOCKS = 32; int const X_THREADS_PER_BLOCK = numCols / BLOCKS; int const Y_THREADS_PER_BLOCK = numRows / BLOCKS; std::cout << "\t// remember max number of threads per block is 1024 in total, no in each dimension" << std::endl; std::cout << "\t- Num of blocks: " << BLOCKS << "x" << BLOCKS << std::endl; std::cout << "\t- Threads per block in X: " << X_THREADS_PER_BLOCK << std::endl; std::cout << "\t- Threads per block in Y: " << Y_THREADS_PER_BLOCK << std::endl; std::cout << "\t- Total threads per block: " << X_THREADS_PER_BLOCK * Y_THREADS_PER_BLOCK << std::endl; //You must fill in the correct sizes for the blockSize and gridSize //currently only one block with one thread is being launched const dim3 blockSize(X_THREADS_PER_BLOCK, Y_THREADS_PER_BLOCK, 1); // How many threads per block? const dim3 gridSize(BLOCKS, BLOCKS, 1); // How many blocks? (grid of blocks) rgba_to_greyscale_kernel <<<gridSize, blockSize>>> (d_rgbaImage, d_greyImage, numCols); // Check for any errors launching the kernel cudaError_t cudaStatus = cudaGetLastError(); if (cudaStatus != cudaSuccess) { std::cerr << "\t- rgba_to_greyscale_kernel launch failed: " << cudaGetErrorString(cudaStatus) << std::endl; return; } std::cout << "\t- Kernel launched succesfully" << std::endl; //cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError()); } /* int main() { const int arraySize = 5; const int a[arraySize] = { 1, 2, 3, 4, 5 }; const int b[arraySize] = { 10, 20, 30, 40, 50 }; int c[arraySize] = { 0 }; // Add vectors in parallel. cudaError_t cudaStatus = addWithCuda(c, a, b, arraySize); if (cudaStatus != cudaSuccess) { fprintf(stderr, "addWithCuda failed!"); return 1; } printf("{1,2,3,4,5} + {10,20,30,40,50} = {%d,%d,%d,%d,%d}\n", c[0], c[1], c[2], c[3], c[4]); // cudaDeviceReset must be called before exiting in order for profiling and // tracing tools such as Nsight and Visual Profiler to show complete traces. cudaStatus = cudaDeviceReset(); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaDeviceReset failed!"); return 1; } return 0; } // Helper function for using CUDA to add vectors in parallel. cudaError_t addWithCuda(int *c, const int *a, const int *b, unsigned int size) { int *dev_a = 0; int *dev_b = 0; int *dev_c = 0; cudaError_t cudaStatus; // Choose which GPU to run on, change this on a multi-GPU system. cudaStatus = cudaSetDevice(0); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaSetDevice failed! Do you have a CUDA-capable GPU installed?"); goto Error; } // Allocate GPU buffers for three vectors (two input, one output) . cudaStatus = cudaMalloc((void**)&dev_c, size * sizeof(int)); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMalloc failed!"); goto Error; } cudaStatus = cudaMalloc((void**)&dev_a, size * sizeof(int)); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMalloc failed!"); goto Error; } cudaStatus = cudaMalloc((void**)&dev_b, size * sizeof(int)); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMalloc failed!"); goto Error; } // Copy input vectors from host memory to GPU buffers. cudaStatus = cudaMemcpy(dev_a, a, size * sizeof(int), cudaMemcpyHostToDevice); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemcpy failed!"); goto Error; } cudaStatus = cudaMemcpy(dev_b, b, size * sizeof(int), cudaMemcpyHostToDevice); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemcpy failed!"); goto Error; } // Launch a kernel on the GPU with one thread for each element. addKernel<<<1, size>>>(dev_c, dev_a, dev_b); // Check for any errors launching the kernel cudaStatus = cudaGetLastError(); if (cudaStatus != cudaSuccess) { fprintf(stderr, "addKernel launch failed: %s\n", cudaGetErrorString(cudaStatus)); goto Error; } // cudaDeviceSynchronize waits for the kernel to finish, and returns // any errors encountered during the launch. cudaStatus = cudaDeviceSynchronize(); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus); goto Error; } // Copy output vector from GPU buffer to host memory. cudaStatus = cudaMemcpy(c, dev_c, size * sizeof(int), cudaMemcpyDeviceToHost); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemcpy failed!"); goto Error; } Error: cudaFree(dev_c); cudaFree(dev_a); cudaFree(dev_b); return cudaStatus; }*/
14de20976259b50292b113cb06dcd4ac3d45dcb1.hip
// !!! This is a file automatically generated by hipify!!! #include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <hiprand/hiprand_kernel.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <sys/time.h> #include "vec_negatef.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { hipSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; size_t n = XSIZE*YSIZE; float *result = NULL; hipMalloc(&result, XSIZE*YSIZE); float *x = NULL; hipMalloc(&x, XSIZE*YSIZE); int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); hipFree(0);hipLaunchKernelGGL(( vec_negatef), dim3(gridBlock),dim3(threadBlock), 0, 0, n,result,x); hipDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL(( vec_negatef), dim3(gridBlock),dim3(threadBlock), 0, 0, n,result,x); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL(( vec_negatef), dim3(gridBlock),dim3(threadBlock), 0, 0, n,result,x); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
14de20976259b50292b113cb06dcd4ac3d45dcb1.cu
#include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <curand_kernel.h> #include <stdlib.h> #include <cuda.h> #include <sys/time.h> #include "vec_negatef.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { cudaSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; size_t n = XSIZE*YSIZE; float *result = NULL; cudaMalloc(&result, XSIZE*YSIZE); float *x = NULL; cudaMalloc(&x, XSIZE*YSIZE); int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); cudaFree(0); vec_negatef<<<gridBlock,threadBlock>>>(n,result,x); cudaDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) { vec_negatef<<<gridBlock,threadBlock>>>(n,result,x); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) { vec_negatef<<<gridBlock,threadBlock>>>(n,result,x); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
cc63124aedf0db4cb1e720b00ce9df0b420dee0b.hip
// !!! This is a file automatically generated by hipify!!! #include <random> #include <vector> #include <tuple> #include <cstdio> #include <cstdlib> #include <functional> #include <algorithm> #include "../utils/SyncedMemory.h" #include "../utils/Timer.h" #include "counting.h" using namespace std; #define CHECK {\ auto e = hipDeviceSynchronize();\ if (e != hipSuccess) {\ printf("At " __FILE__ ":%d, %s\n", __LINE__, hipGetErrorString(e));\ abort();\ }\ } template <typename Engine> tuple<vector<char>, vector<int>, vector<int>> GenerateTestCase(Engine &eng, const int N) { poisson_distribution<int> pd(14.0); bernoulli_distribution bd(0.1); uniform_int_distribution<int> id1(1, 20); uniform_int_distribution<int> id2(1, 5); uniform_int_distribution<int> id3('a', 'z'); tuple<vector<char>, vector<int>, vector<int>> ret; auto &text = get<0>(ret); auto &pos = get<1>(ret); auto &head = get<2>(ret); auto gen_rand_word_len = [&] () -> int { return max(1, min(500, pd(eng) - 5 + (bd(eng) ? id1(eng)*20 : 0))); }; auto gen_rand_space_len = [&] () -> int { return id2(eng); }; auto gen_rand_char = [&] () { return id3(eng); }; auto AddWord = [&] () { head.push_back(text.size()); int n = gen_rand_word_len(); for (int i = 0; i < n; ++i) { text.push_back(gen_rand_char()); pos.push_back(i+1); } }; auto AddSpace = [&] () { int n = gen_rand_space_len(); for (int i = 0; i < n; ++i) { text.push_back('\n'); pos.push_back(0); } }; AddWord(); while (text.size() < N) { AddSpace(); AddWord(); } return ret; } int main(int argc, char **argv) { // Initialize random text default_random_engine engine(12345); auto text_pos_head = GenerateTestCase(engine, 1);//40000000); // 40 MB data vector<char> &text = get<0>(text_pos_head); vector<int> &pos = get<1>(text_pos_head); vector<int> &head = get<2>(text_pos_head); // Prepare buffers int n = text.size(); char *text_gpu; hipMalloc(&text_gpu, sizeof(char)*n); SyncedMemory<char> text_sync(text.data(), text_gpu, n); text_sync.get_cpu_wo(); // touch the cpu data MemoryBuffer<int> pos_yours(n), head_yours(n); auto pos_yours_sync = pos_yours.CreateSync(n); auto head_yours_sync = head_yours.CreateSync(n); // Create timers Timer timer_count_position; // Part I timer_count_position.Start(); int *pos_yours_gpu = pos_yours_sync.get_gpu_wo(); CountPosition(text_sync.get_gpu_ro(), pos_yours_gpu, n); //puts(text_sync.get_cpu_ro()); /* for(int i=0;i<n; i++){ printf("%d ", pos_yours_sync.get_cpu_ro()[i]); }*/ timer_count_position.Pause(); CHECK; printf_timer(timer_count_position); // Part I check const int *golden = pos.data(); const int *yours = pos_yours_sync.get_cpu_ro(); int n_match1 = mismatch(golden, golden+n, yours).first - golden; if (n_match1 != n) { puts("Part I WA!"); copy_n(golden, n, pos_yours_sync.get_cpu_wo()); } // Part II int *head_yours_gpu = head_yours_sync.get_gpu_wo(); int n_head = ExtractHead(pos_yours_sync.get_gpu_ro(), head_yours_gpu, n); CHECK; // Part II check do { if (n_head != head.size()) { n_head = head.size(); puts("Part II WA (wrong number of heads)!"); } else { int n_match2 = mismatch(head.begin(), head.end(), head_yours_sync.get_cpu_ro()).first - head.begin(); if (n_match2 != n_head) { puts("Part II WA (wrong heads)!"); } else { break; } } copy_n(head.begin(), n_head, head_yours_sync.get_cpu_wo()); } while(false); // Part III // Do whatever your want Part3(text_gpu, pos_yours_sync.get_gpu_rw(), head_yours_sync.get_gpu_rw(), n, n_head); CHECK; hipFree(text_gpu); return 0; }
cc63124aedf0db4cb1e720b00ce9df0b420dee0b.cu
#include <random> #include <vector> #include <tuple> #include <cstdio> #include <cstdlib> #include <functional> #include <algorithm> #include "../utils/SyncedMemory.h" #include "../utils/Timer.h" #include "counting.h" using namespace std; #define CHECK {\ auto e = cudaDeviceSynchronize();\ if (e != cudaSuccess) {\ printf("At " __FILE__ ":%d, %s\n", __LINE__, cudaGetErrorString(e));\ abort();\ }\ } template <typename Engine> tuple<vector<char>, vector<int>, vector<int>> GenerateTestCase(Engine &eng, const int N) { poisson_distribution<int> pd(14.0); bernoulli_distribution bd(0.1); uniform_int_distribution<int> id1(1, 20); uniform_int_distribution<int> id2(1, 5); uniform_int_distribution<int> id3('a', 'z'); tuple<vector<char>, vector<int>, vector<int>> ret; auto &text = get<0>(ret); auto &pos = get<1>(ret); auto &head = get<2>(ret); auto gen_rand_word_len = [&] () -> int { return max(1, min(500, pd(eng) - 5 + (bd(eng) ? id1(eng)*20 : 0))); }; auto gen_rand_space_len = [&] () -> int { return id2(eng); }; auto gen_rand_char = [&] () { return id3(eng); }; auto AddWord = [&] () { head.push_back(text.size()); int n = gen_rand_word_len(); for (int i = 0; i < n; ++i) { text.push_back(gen_rand_char()); pos.push_back(i+1); } }; auto AddSpace = [&] () { int n = gen_rand_space_len(); for (int i = 0; i < n; ++i) { text.push_back('\n'); pos.push_back(0); } }; AddWord(); while (text.size() < N) { AddSpace(); AddWord(); } return ret; } int main(int argc, char **argv) { // Initialize random text default_random_engine engine(12345); auto text_pos_head = GenerateTestCase(engine, 1);//40000000); // 40 MB data vector<char> &text = get<0>(text_pos_head); vector<int> &pos = get<1>(text_pos_head); vector<int> &head = get<2>(text_pos_head); // Prepare buffers int n = text.size(); char *text_gpu; cudaMalloc(&text_gpu, sizeof(char)*n); SyncedMemory<char> text_sync(text.data(), text_gpu, n); text_sync.get_cpu_wo(); // touch the cpu data MemoryBuffer<int> pos_yours(n), head_yours(n); auto pos_yours_sync = pos_yours.CreateSync(n); auto head_yours_sync = head_yours.CreateSync(n); // Create timers Timer timer_count_position; // Part I timer_count_position.Start(); int *pos_yours_gpu = pos_yours_sync.get_gpu_wo(); CountPosition(text_sync.get_gpu_ro(), pos_yours_gpu, n); //puts(text_sync.get_cpu_ro()); /* for(int i=0;i<n; i++){ printf("%d ", pos_yours_sync.get_cpu_ro()[i]); }*/ timer_count_position.Pause(); CHECK; printf_timer(timer_count_position); // Part I check const int *golden = pos.data(); const int *yours = pos_yours_sync.get_cpu_ro(); int n_match1 = mismatch(golden, golden+n, yours).first - golden; if (n_match1 != n) { puts("Part I WA!"); copy_n(golden, n, pos_yours_sync.get_cpu_wo()); } // Part II int *head_yours_gpu = head_yours_sync.get_gpu_wo(); int n_head = ExtractHead(pos_yours_sync.get_gpu_ro(), head_yours_gpu, n); CHECK; // Part II check do { if (n_head != head.size()) { n_head = head.size(); puts("Part II WA (wrong number of heads)!"); } else { int n_match2 = mismatch(head.begin(), head.end(), head_yours_sync.get_cpu_ro()).first - head.begin(); if (n_match2 != n_head) { puts("Part II WA (wrong heads)!"); } else { break; } } copy_n(head.begin(), n_head, head_yours_sync.get_cpu_wo()); } while(false); // Part III // Do whatever your want Part3(text_gpu, pos_yours_sync.get_gpu_rw(), head_yours_sync.get_gpu_rw(), n, n_head); CHECK; cudaFree(text_gpu); return 0; }
b5ae412be92d898955592157613a1fd2e51702d7.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #ifdef __cplusplus extern "C" { #endif #include <stdio.h> #include <math.h> #include <float.h> #include "roi_align_rel_kernel.h" #define CUDA_1D_KERNEL_LOOP(i, n) \ for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < n; \ i += blockDim.x * gridDim.x) __global__ void ROIAlignRelForward(const int nthreads, const float* bottom_data, const float spatial_scale, const int height, const int width, const int channels, const int aligned_height, const int aligned_width, const float* bottom_rois, float* top_data) { CUDA_1D_KERNEL_LOOP(index, nthreads) { // (n, c, ph, pw) is an element in the aligned output // int n = index; // int pw = n % aligned_width; // n /= aligned_width; // int ph = n % aligned_height; // n /= aligned_height; // int c = n % channels; // n /= channels; int pw = index % aligned_width; int ph = (index / aligned_width) % aligned_height; int c = (index / aligned_width / aligned_height) % channels; int n = index / aligned_width / aligned_height / channels; // bottom_rois += n * 5; float roi_batch_ind = bottom_rois[n * 5 + 0]; float roi_start_w = bottom_rois[n * 5 + 1] * spatial_scale; float roi_start_h = bottom_rois[n * 5 + 2] * spatial_scale; float roi_end_w = bottom_rois[n * 5 + 3] * spatial_scale; float roi_end_h = bottom_rois[n * 5 + 4] * spatial_scale; // Force malformed ROIs to be 1x1 float roi_width = fmaxf(roi_end_w - roi_start_w + 1., 0.); float roi_height = fmaxf(roi_end_h - roi_start_h + 1., 0.); float bin_size_h = roi_height / (aligned_height - 1.); float bin_size_w = roi_width / (aligned_width - 1.); float h = (float)(ph) * bin_size_h + roi_start_h; float w = (float)(pw) * bin_size_w + roi_start_w; int hstart = fminf(floor(h), height - 2); int wstart = fminf(floor(w), width - 2); int img_start = roi_batch_ind * channels * height * width; // bilinear interpolation if (h < 0 || h >= height || w < 0 || w >= width) { top_data[index] = 0.; } else { float h_ratio = h - (float)(hstart); float w_ratio = w - (float)(wstart); int upleft = img_start + (c * height + hstart) * width + wstart; int upright = upleft + 1; int downleft = upleft + width; int downright = downleft + 1; top_data[index] = bottom_data[upleft] * (1. - h_ratio) * (1. - w_ratio) + bottom_data[upright] * (1. - h_ratio) * w_ratio + bottom_data[downleft] * h_ratio * (1. - w_ratio) + bottom_data[downright] * h_ratio * w_ratio; } } } int ROIAlignRelForwardLaucher(const float* bottom_data, const float spatial_scale, const int num_rois, const int height, const int width, const int channels, const int aligned_height, const int aligned_width, const float* bottom_rois, float* top_data, hipStream_t stream) { const int kThreadsPerBlock = 1024; const int output_size = num_rois * aligned_height * aligned_width * channels; hipError_t err; hipLaunchKernelGGL(( ROIAlignForward), dim3((output_size + kThreadsPerBlock - 1) / kThreadsPerBlock), dim3(kThreadsPerBlock), 0, stream, output_size, bottom_data, spatial_scale, height, width, channels, aligned_height, aligned_width, bottom_rois, top_data); err = hipGetLastError(); if(hipSuccess != err) { fprintf( stderr, "cudaCheckError() failed : %s\n", hipGetErrorString( err ) ); exit( -1 ); } return 1; } __global__ void ROIAlignRelBackward(const int nthreads, const float* top_diff, const float spatial_scale, const int height, const int width, const int channels, const int aligned_height, const int aligned_width, float* bottom_diff, const float* bottom_rois) { CUDA_1D_KERNEL_LOOP(index, nthreads) { // (n, c, ph, pw) is an element in the aligned output int pw = index % aligned_width; int ph = (index / aligned_width) % aligned_height; int c = (index / aligned_width / aligned_height) % channels; int n = index / aligned_width / aligned_height / channels; float roi_batch_ind = bottom_rois[n * 5 + 0]; float roi_start_w = bottom_rois[n * 5 + 1] * spatial_scale; float roi_start_h = bottom_rois[n * 5 + 2] * spatial_scale; float roi_end_w = bottom_rois[n * 5 + 3] * spatial_scale; float roi_end_h = bottom_rois[n * 5 + 4] * spatial_scale; /* int roi_start_w = round(bottom_rois[1] * spatial_scale); */ /* int roi_start_h = round(bottom_rois[2] * spatial_scale); */ /* int roi_end_w = round(bottom_rois[3] * spatial_scale); */ /* int roi_end_h = round(bottom_rois[4] * spatial_scale); */ // Force malformed ROIs to be 1x1 float roi_width = fmaxf(roi_end_w - roi_start_w + 1., 0.); float roi_height = fmaxf(roi_end_h - roi_start_h + 1., 0.); float bin_size_h = roi_height / (aligned_height - 1.); float bin_size_w = roi_width / (aligned_width - 1.); float h = (float)(ph) * bin_size_h + roi_start_h; float w = (float)(pw) * bin_size_w + roi_start_w; int hstart = fminf(floor(h), height - 2); int wstart = fminf(floor(w), width - 2); int img_start = roi_batch_ind * channels * height * width; // bilinear interpolation if (!(h < 0 || h >= height || w < 0 || w >= width)) { float h_ratio = h - (float)(hstart); float w_ratio = w - (float)(wstart); int upleft = img_start + (c * height + hstart) * width + wstart; int upright = upleft + 1; int downleft = upleft + width; int downright = downleft + 1; atomicAdd(bottom_diff + upleft, top_diff[index] * (1. - h_ratio) * (1 - w_ratio)); atomicAdd(bottom_diff + upright, top_diff[index] * (1. - h_ratio) * w_ratio); atomicAdd(bottom_diff + downleft, top_diff[index] * h_ratio * (1 - w_ratio)); atomicAdd(bottom_diff + downright, top_diff[index] * h_ratio * w_ratio); } } } int ROIAlignRelBackwardLaucher(const float* top_diff, const float spatial_scale, const int batch_size, const int num_rois, const int height, const int width, const int channels, const int aligned_height, const int aligned_width, const float* bottom_rois, float* bottom_diff, hipStream_t stream) { const int kThreadsPerBlock = 1024; const int output_size = num_rois * aligned_height * aligned_width * channels; hipError_t err; hipLaunchKernelGGL(( ROIAlignBackward), dim3((output_size + kThreadsPerBlock - 1) / kThreadsPerBlock), dim3(kThreadsPerBlock), 0, stream, output_size, top_diff, spatial_scale, height, width, channels, aligned_height, aligned_width, bottom_diff, bottom_rois); err = hipGetLastError(); if(hipSuccess != err) { fprintf( stderr, "cudaCheckError() failed : %s\n", hipGetErrorString( err ) ); exit( -1 ); } return 1; } #ifdef __cplusplus } #endif
b5ae412be92d898955592157613a1fd2e51702d7.cu
#ifdef __cplusplus extern "C" { #endif #include <stdio.h> #include <math.h> #include <float.h> #include "roi_align_rel_kernel.h" #define CUDA_1D_KERNEL_LOOP(i, n) \ for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < n; \ i += blockDim.x * gridDim.x) __global__ void ROIAlignRelForward(const int nthreads, const float* bottom_data, const float spatial_scale, const int height, const int width, const int channels, const int aligned_height, const int aligned_width, const float* bottom_rois, float* top_data) { CUDA_1D_KERNEL_LOOP(index, nthreads) { // (n, c, ph, pw) is an element in the aligned output // int n = index; // int pw = n % aligned_width; // n /= aligned_width; // int ph = n % aligned_height; // n /= aligned_height; // int c = n % channels; // n /= channels; int pw = index % aligned_width; int ph = (index / aligned_width) % aligned_height; int c = (index / aligned_width / aligned_height) % channels; int n = index / aligned_width / aligned_height / channels; // bottom_rois += n * 5; float roi_batch_ind = bottom_rois[n * 5 + 0]; float roi_start_w = bottom_rois[n * 5 + 1] * spatial_scale; float roi_start_h = bottom_rois[n * 5 + 2] * spatial_scale; float roi_end_w = bottom_rois[n * 5 + 3] * spatial_scale; float roi_end_h = bottom_rois[n * 5 + 4] * spatial_scale; // Force malformed ROIs to be 1x1 float roi_width = fmaxf(roi_end_w - roi_start_w + 1., 0.); float roi_height = fmaxf(roi_end_h - roi_start_h + 1., 0.); float bin_size_h = roi_height / (aligned_height - 1.); float bin_size_w = roi_width / (aligned_width - 1.); float h = (float)(ph) * bin_size_h + roi_start_h; float w = (float)(pw) * bin_size_w + roi_start_w; int hstart = fminf(floor(h), height - 2); int wstart = fminf(floor(w), width - 2); int img_start = roi_batch_ind * channels * height * width; // bilinear interpolation if (h < 0 || h >= height || w < 0 || w >= width) { top_data[index] = 0.; } else { float h_ratio = h - (float)(hstart); float w_ratio = w - (float)(wstart); int upleft = img_start + (c * height + hstart) * width + wstart; int upright = upleft + 1; int downleft = upleft + width; int downright = downleft + 1; top_data[index] = bottom_data[upleft] * (1. - h_ratio) * (1. - w_ratio) + bottom_data[upright] * (1. - h_ratio) * w_ratio + bottom_data[downleft] * h_ratio * (1. - w_ratio) + bottom_data[downright] * h_ratio * w_ratio; } } } int ROIAlignRelForwardLaucher(const float* bottom_data, const float spatial_scale, const int num_rois, const int height, const int width, const int channels, const int aligned_height, const int aligned_width, const float* bottom_rois, float* top_data, cudaStream_t stream) { const int kThreadsPerBlock = 1024; const int output_size = num_rois * aligned_height * aligned_width * channels; cudaError_t err; ROIAlignForward<<<(output_size + kThreadsPerBlock - 1) / kThreadsPerBlock, kThreadsPerBlock, 0, stream>>>( output_size, bottom_data, spatial_scale, height, width, channels, aligned_height, aligned_width, bottom_rois, top_data); err = cudaGetLastError(); if(cudaSuccess != err) { fprintf( stderr, "cudaCheckError() failed : %s\n", cudaGetErrorString( err ) ); exit( -1 ); } return 1; } __global__ void ROIAlignRelBackward(const int nthreads, const float* top_diff, const float spatial_scale, const int height, const int width, const int channels, const int aligned_height, const int aligned_width, float* bottom_diff, const float* bottom_rois) { CUDA_1D_KERNEL_LOOP(index, nthreads) { // (n, c, ph, pw) is an element in the aligned output int pw = index % aligned_width; int ph = (index / aligned_width) % aligned_height; int c = (index / aligned_width / aligned_height) % channels; int n = index / aligned_width / aligned_height / channels; float roi_batch_ind = bottom_rois[n * 5 + 0]; float roi_start_w = bottom_rois[n * 5 + 1] * spatial_scale; float roi_start_h = bottom_rois[n * 5 + 2] * spatial_scale; float roi_end_w = bottom_rois[n * 5 + 3] * spatial_scale; float roi_end_h = bottom_rois[n * 5 + 4] * spatial_scale; /* int roi_start_w = round(bottom_rois[1] * spatial_scale); */ /* int roi_start_h = round(bottom_rois[2] * spatial_scale); */ /* int roi_end_w = round(bottom_rois[3] * spatial_scale); */ /* int roi_end_h = round(bottom_rois[4] * spatial_scale); */ // Force malformed ROIs to be 1x1 float roi_width = fmaxf(roi_end_w - roi_start_w + 1., 0.); float roi_height = fmaxf(roi_end_h - roi_start_h + 1., 0.); float bin_size_h = roi_height / (aligned_height - 1.); float bin_size_w = roi_width / (aligned_width - 1.); float h = (float)(ph) * bin_size_h + roi_start_h; float w = (float)(pw) * bin_size_w + roi_start_w; int hstart = fminf(floor(h), height - 2); int wstart = fminf(floor(w), width - 2); int img_start = roi_batch_ind * channels * height * width; // bilinear interpolation if (!(h < 0 || h >= height || w < 0 || w >= width)) { float h_ratio = h - (float)(hstart); float w_ratio = w - (float)(wstart); int upleft = img_start + (c * height + hstart) * width + wstart; int upright = upleft + 1; int downleft = upleft + width; int downright = downleft + 1; atomicAdd(bottom_diff + upleft, top_diff[index] * (1. - h_ratio) * (1 - w_ratio)); atomicAdd(bottom_diff + upright, top_diff[index] * (1. - h_ratio) * w_ratio); atomicAdd(bottom_diff + downleft, top_diff[index] * h_ratio * (1 - w_ratio)); atomicAdd(bottom_diff + downright, top_diff[index] * h_ratio * w_ratio); } } } int ROIAlignRelBackwardLaucher(const float* top_diff, const float spatial_scale, const int batch_size, const int num_rois, const int height, const int width, const int channels, const int aligned_height, const int aligned_width, const float* bottom_rois, float* bottom_diff, cudaStream_t stream) { const int kThreadsPerBlock = 1024; const int output_size = num_rois * aligned_height * aligned_width * channels; cudaError_t err; ROIAlignBackward<<<(output_size + kThreadsPerBlock - 1) / kThreadsPerBlock, kThreadsPerBlock, 0, stream>>>( output_size, top_diff, spatial_scale, height, width, channels, aligned_height, aligned_width, bottom_diff, bottom_rois); err = cudaGetLastError(); if(cudaSuccess != err) { fprintf( stderr, "cudaCheckError() failed : %s\n", cudaGetErrorString( err ) ); exit( -1 ); } return 1; } #ifdef __cplusplus } #endif
9d00f04f26ced7771f409280aa99d4dacd2f771b.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include "paddle/fluid/framework/op_registry.h" #include "paddle/fluid/operators/affine_grid_op.h" #include "paddle/fluid/platform/device/gpu/gpu_device_function.h" #include "paddle/fluid/platform/device/gpu/gpu_info.h" #include "paddle/fluid/platform/device/gpu/gpu_primitives.h" namespace paddle { namespace operators { using Tensor = framework::Tensor; template <typename T> __global__ void LinspaceKernel(T start, T step, int64_t size, T* out) { CUDA_KERNEL_LOOP(index, size) { out[index] = start + step * index; } } template <typename T> struct Linspace<paddle::platform::CUDADeviceContext, T> { void operator()(T start, T end, int count, bool align_corners, framework::Tensor* numbers, const framework::ExecutionContext& ctx) { T* number_data = numbers->mutable_data<T>({count}, ctx.GetPlace()); T slice = (end - start) / (T)(count - 1); if (!align_corners) { slice = (end - start) / (T)count; start *= (T)(count - 1) / (T)count; } auto stream = ctx.cuda_device_context().stream(); int block = 512; int grid = (count + block - 1) / block; hipLaunchKernelGGL(( LinspaceKernel<T>) , dim3(grid), dim3(block), 0, stream, start, slice, count, number_data); } }; template <typename T> __global__ void affine_grid_kernel(const int count, int n, int out_h, int out_w, T h_start, T w_start, T h_step, T w_step, const T* theta, // N, 2, 3 T* output) { CUDA_KERNEL_LOOP(index, count) { int w = index % out_w; int h = (index / out_w) % out_h; int n = index / (out_w * out_h); T h_coor = h_step * static_cast<T>(h) + static_cast<T>(h_start); T w_coor = w_step * static_cast<T>(w) + static_cast<T>(w_start); int theta_offset = n * 6; // 2 * 3; // affine from (h_coor, w_coor) to (x, y) output[index * 2] = theta[theta_offset] * w_coor + theta[theta_offset + 1] * h_coor + theta[theta_offset + 2]; output[index * 2 + 1] = theta[theta_offset + 3] * w_coor + theta[theta_offset + 4] * h_coor + theta[theta_offset + 5]; } } template <typename T> __global__ void affine_grid_grad_kernel(const int count, int n, int out_h, int out_w, T h_start, T w_start, T h_step, T w_step, const T* out_grad, // N, H, W, 2 T* theta_grad) { // N, 2, 3 CUDA_KERNEL_LOOP(index, count) { int w = index % out_w; int h = (index / out_w) % out_h; int n = index / (out_w * out_h); T h_coor = h_step * static_cast<T>(h) + static_cast<T>(h_start); T w_coor = w_step * static_cast<T>(w) + static_cast<T>(w_start); int theta_offset = n * 6; // 2 * 3; T out_grad_x = out_grad[index * 2]; platform::CudaAtomicAdd(theta_grad + theta_offset, out_grad_x * w_coor); platform::CudaAtomicAdd(theta_grad + theta_offset + 1, out_grad_x * h_coor); platform::CudaAtomicAdd(theta_grad + theta_offset + 2, out_grad_x); T out_grad_y = out_grad[index * 2 + 1]; platform::CudaAtomicAdd(theta_grad + theta_offset + 3, out_grad_y * w_coor); platform::CudaAtomicAdd(theta_grad + theta_offset + 4, out_grad_y * h_coor); platform::CudaAtomicAdd(theta_grad + theta_offset + 5, out_grad_y); } } template <typename T> class AffineGridOpCUDAKernel : public framework::OpKernel<T> { public: void Compute(const framework::ExecutionContext& ctx) const override { auto* theta = ctx.Input<Tensor>("Theta"); int n = theta->dims()[0]; auto size_attr = ctx.Attr<std::vector<int>>("output_shape"); auto align_corners = ctx.Attr<bool>("align_corners"); int h = 0; int w = 0; if (size_attr.size() == 0) { auto* output_shape = ctx.Input<Tensor>("OutputShape"); Tensor h_sizes; framework::TensorCopy(*output_shape, platform::CPUPlace(), &h_sizes); const int* h_size_data = h_sizes.data<int>(); h = h_size_data[2]; w = h_size_data[3]; } else { h = size_attr[2]; w = size_attr[3]; } auto* output = ctx.Output<Tensor>("Output"); T* out_data = output->mutable_data<T>({n, h, w, 2}, ctx.GetPlace()); T h_step; T w_step; T h_start = -1; T w_start = -1; if (align_corners) { h_step = static_cast<T>(2) / static_cast<T>(h - 1); w_step = static_cast<T>(2) / static_cast<T>(w - 1); } else { h_step = static_cast<T>(2) / static_cast<T>(h); w_step = static_cast<T>(2) / static_cast<T>(w); h_start *= static_cast<T>(h - 1) / static_cast<T>(h); w_start *= static_cast<T>(w - 1) / static_cast<T>(w); } const int count = n * h * w; int block = 512; int grid = (count + block - 1) / block; auto cu_stream = ctx.cuda_device_context().stream(); hipLaunchKernelGGL(( affine_grid_kernel), dim3(grid), dim3(block), 0, cu_stream, count, n, h, w, h_start, w_start, h_step, w_step, theta->data<T>(), // N, 2, 3 out_data); } }; template <typename T> class AffineGridGradOpCUDAKernel : public framework::OpKernel<T> { public: void Compute(const framework::ExecutionContext& ctx) const override { auto output_grad = ctx.Input<Tensor>(framework::GradVarName("Output")); auto theta_grad = ctx.Output<Tensor>(framework::GradVarName("Theta")); int n = output_grad->dims()[0]; auto size_attr = ctx.Attr<std::vector<int>>("output_shape"); auto align_corners = ctx.Attr<bool>("align_corners"); int h = 0; int w = 0; if (size_attr.size() == 0) { auto* output_shape = ctx.Input<Tensor>("OutputShape"); Tensor h_sizes; framework::TensorCopy(*output_shape, platform::CPUPlace(), &h_sizes); const int* h_size_data = h_sizes.data<int>(); h = h_size_data[2]; w = h_size_data[3]; } else { h = size_attr[2]; w = size_attr[3]; } T* theta_grad_data = theta_grad->mutable_data<T>({n, 2, 3}, ctx.GetPlace()); phi::funcs::SetConstant<paddle::platform::CUDADeviceContext, T>()( ctx.cuda_device_context(), theta_grad, static_cast<T>(0)); T h_step; T w_step; T h_start = -1; T w_start = -1; if (align_corners) { h_step = static_cast<T>(2) / static_cast<T>(h - 1); w_step = static_cast<T>(2) / static_cast<T>(w - 1); } else { h_step = static_cast<T>(2) / static_cast<T>(h); w_step = static_cast<T>(2) / static_cast<T>(w); h_start *= static_cast<T>(h - 1) / static_cast<T>(h); w_start *= static_cast<T>(w - 1) / static_cast<T>(w); } const int count = n * h * w; VLOG(3) << "count: " << count << "; h_step: " << h_step << "; w_step: " << w_step << "; h_start: " << h_start << "; w_start: " << w_start; int block = 512; int grid = (count + block - 1) / block; auto cu_stream = ctx.cuda_device_context().stream(); hipLaunchKernelGGL(( affine_grid_grad_kernel), dim3(grid), dim3(block), 0, cu_stream, count, n, h, w, h_start, w_start, h_step, w_step, output_grad->data<T>(), theta_grad_data); } }; } // namespace operators } // namespace paddle namespace ops = paddle::operators; REGISTER_OP_CUDA_KERNEL(affine_grid, ops::AffineGridOpCUDAKernel<float>, ops::AffineGridOpCUDAKernel<double>); REGISTER_OP_CUDA_KERNEL(affine_grid_grad, ops::AffineGridGradOpCUDAKernel<float>, ops::AffineGridGradOpCUDAKernel<double>);
9d00f04f26ced7771f409280aa99d4dacd2f771b.cu
/* Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include "paddle/fluid/framework/op_registry.h" #include "paddle/fluid/operators/affine_grid_op.h" #include "paddle/fluid/platform/device/gpu/gpu_device_function.h" #include "paddle/fluid/platform/device/gpu/gpu_info.h" #include "paddle/fluid/platform/device/gpu/gpu_primitives.h" namespace paddle { namespace operators { using Tensor = framework::Tensor; template <typename T> __global__ void LinspaceKernel(T start, T step, int64_t size, T* out) { CUDA_KERNEL_LOOP(index, size) { out[index] = start + step * index; } } template <typename T> struct Linspace<paddle::platform::CUDADeviceContext, T> { void operator()(T start, T end, int count, bool align_corners, framework::Tensor* numbers, const framework::ExecutionContext& ctx) { T* number_data = numbers->mutable_data<T>({count}, ctx.GetPlace()); T slice = (end - start) / (T)(count - 1); if (!align_corners) { slice = (end - start) / (T)count; start *= (T)(count - 1) / (T)count; } auto stream = ctx.cuda_device_context().stream(); int block = 512; int grid = (count + block - 1) / block; LinspaceKernel<T> <<<grid, block, 0, stream>>>(start, slice, count, number_data); } }; template <typename T> __global__ void affine_grid_kernel(const int count, int n, int out_h, int out_w, T h_start, T w_start, T h_step, T w_step, const T* theta, // N, 2, 3 T* output) { CUDA_KERNEL_LOOP(index, count) { int w = index % out_w; int h = (index / out_w) % out_h; int n = index / (out_w * out_h); T h_coor = h_step * static_cast<T>(h) + static_cast<T>(h_start); T w_coor = w_step * static_cast<T>(w) + static_cast<T>(w_start); int theta_offset = n * 6; // 2 * 3; // affine from (h_coor, w_coor) to (x, y) output[index * 2] = theta[theta_offset] * w_coor + theta[theta_offset + 1] * h_coor + theta[theta_offset + 2]; output[index * 2 + 1] = theta[theta_offset + 3] * w_coor + theta[theta_offset + 4] * h_coor + theta[theta_offset + 5]; } } template <typename T> __global__ void affine_grid_grad_kernel(const int count, int n, int out_h, int out_w, T h_start, T w_start, T h_step, T w_step, const T* out_grad, // N, H, W, 2 T* theta_grad) { // N, 2, 3 CUDA_KERNEL_LOOP(index, count) { int w = index % out_w; int h = (index / out_w) % out_h; int n = index / (out_w * out_h); T h_coor = h_step * static_cast<T>(h) + static_cast<T>(h_start); T w_coor = w_step * static_cast<T>(w) + static_cast<T>(w_start); int theta_offset = n * 6; // 2 * 3; T out_grad_x = out_grad[index * 2]; platform::CudaAtomicAdd(theta_grad + theta_offset, out_grad_x * w_coor); platform::CudaAtomicAdd(theta_grad + theta_offset + 1, out_grad_x * h_coor); platform::CudaAtomicAdd(theta_grad + theta_offset + 2, out_grad_x); T out_grad_y = out_grad[index * 2 + 1]; platform::CudaAtomicAdd(theta_grad + theta_offset + 3, out_grad_y * w_coor); platform::CudaAtomicAdd(theta_grad + theta_offset + 4, out_grad_y * h_coor); platform::CudaAtomicAdd(theta_grad + theta_offset + 5, out_grad_y); } } template <typename T> class AffineGridOpCUDAKernel : public framework::OpKernel<T> { public: void Compute(const framework::ExecutionContext& ctx) const override { auto* theta = ctx.Input<Tensor>("Theta"); int n = theta->dims()[0]; auto size_attr = ctx.Attr<std::vector<int>>("output_shape"); auto align_corners = ctx.Attr<bool>("align_corners"); int h = 0; int w = 0; if (size_attr.size() == 0) { auto* output_shape = ctx.Input<Tensor>("OutputShape"); Tensor h_sizes; framework::TensorCopy(*output_shape, platform::CPUPlace(), &h_sizes); const int* h_size_data = h_sizes.data<int>(); h = h_size_data[2]; w = h_size_data[3]; } else { h = size_attr[2]; w = size_attr[3]; } auto* output = ctx.Output<Tensor>("Output"); T* out_data = output->mutable_data<T>({n, h, w, 2}, ctx.GetPlace()); T h_step; T w_step; T h_start = -1; T w_start = -1; if (align_corners) { h_step = static_cast<T>(2) / static_cast<T>(h - 1); w_step = static_cast<T>(2) / static_cast<T>(w - 1); } else { h_step = static_cast<T>(2) / static_cast<T>(h); w_step = static_cast<T>(2) / static_cast<T>(w); h_start *= static_cast<T>(h - 1) / static_cast<T>(h); w_start *= static_cast<T>(w - 1) / static_cast<T>(w); } const int count = n * h * w; int block = 512; int grid = (count + block - 1) / block; auto cu_stream = ctx.cuda_device_context().stream(); affine_grid_kernel<<<grid, block, 0, cu_stream>>>( count, n, h, w, h_start, w_start, h_step, w_step, theta->data<T>(), // N, 2, 3 out_data); } }; template <typename T> class AffineGridGradOpCUDAKernel : public framework::OpKernel<T> { public: void Compute(const framework::ExecutionContext& ctx) const override { auto output_grad = ctx.Input<Tensor>(framework::GradVarName("Output")); auto theta_grad = ctx.Output<Tensor>(framework::GradVarName("Theta")); int n = output_grad->dims()[0]; auto size_attr = ctx.Attr<std::vector<int>>("output_shape"); auto align_corners = ctx.Attr<bool>("align_corners"); int h = 0; int w = 0; if (size_attr.size() == 0) { auto* output_shape = ctx.Input<Tensor>("OutputShape"); Tensor h_sizes; framework::TensorCopy(*output_shape, platform::CPUPlace(), &h_sizes); const int* h_size_data = h_sizes.data<int>(); h = h_size_data[2]; w = h_size_data[3]; } else { h = size_attr[2]; w = size_attr[3]; } T* theta_grad_data = theta_grad->mutable_data<T>({n, 2, 3}, ctx.GetPlace()); phi::funcs::SetConstant<paddle::platform::CUDADeviceContext, T>()( ctx.cuda_device_context(), theta_grad, static_cast<T>(0)); T h_step; T w_step; T h_start = -1; T w_start = -1; if (align_corners) { h_step = static_cast<T>(2) / static_cast<T>(h - 1); w_step = static_cast<T>(2) / static_cast<T>(w - 1); } else { h_step = static_cast<T>(2) / static_cast<T>(h); w_step = static_cast<T>(2) / static_cast<T>(w); h_start *= static_cast<T>(h - 1) / static_cast<T>(h); w_start *= static_cast<T>(w - 1) / static_cast<T>(w); } const int count = n * h * w; VLOG(3) << "count: " << count << "; h_step: " << h_step << "; w_step: " << w_step << "; h_start: " << h_start << "; w_start: " << w_start; int block = 512; int grid = (count + block - 1) / block; auto cu_stream = ctx.cuda_device_context().stream(); affine_grid_grad_kernel<<<grid, block, 0, cu_stream>>>( count, n, h, w, h_start, w_start, h_step, w_step, output_grad->data<T>(), theta_grad_data); } }; } // namespace operators } // namespace paddle namespace ops = paddle::operators; REGISTER_OP_CUDA_KERNEL(affine_grid, ops::AffineGridOpCUDAKernel<float>, ops::AffineGridOpCUDAKernel<double>); REGISTER_OP_CUDA_KERNEL(affine_grid_grad, ops::AffineGridGradOpCUDAKernel<float>, ops::AffineGridGradOpCUDAKernel<double>);
9a65c0a586dbc4ee77f7cd5b2b713c2f58a2d961.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <assert.h> #include <stdio.h> #include "gradient2d-512-8-128_kernel.hu" #define BENCH_DIM 2 #define BENCH_FPP 19 #define BENCH_RAD 1 #include "common.h" double kernel_stencil(SB_TYPE *A1, int compsize, int timestep, bool scop) { double start_time = sb_time(), end_time = 0.0; int dimsize = compsize + BENCH_RAD * 2; SB_TYPE (*A)[dimsize][dimsize] = (SB_TYPE (*)[dimsize][dimsize])A1; if (scop) { if (dimsize >= 3 && timestep >= 1) { #define cudaCheckReturn(ret) \ do { \ hipError_t cudaCheckReturn_e = (ret); \ if (cudaCheckReturn_e != hipSuccess) { \ fprintf(stderr, "CUDA error: %s\n", hipGetErrorString(cudaCheckReturn_e)); \ fflush(stderr); \ } \ assert(cudaCheckReturn_e == hipSuccess); \ } while(0) #define cudaCheckKernel() \ do { \ cudaCheckReturn(hipGetLastError()); \ } while(0) double *dev_A; cudaCheckReturn(hipMalloc((void **) &dev_A, (size_t)(2) * (size_t)(dimsize) * (size_t)(dimsize) * sizeof(double))); { cudaCheckReturn(hipMemcpy(dev_A, A, (size_t)(2) * (size_t)(dimsize) * (size_t)(dimsize) * sizeof(double), hipMemcpyHostToDevice)); #ifdef STENCILBENCH hipDeviceSynchronize(); SB_START_INSTRUMENTS; #endif } { #ifndef AN5D_TYPE #define AN5D_TYPE unsigned #endif const AN5D_TYPE __c0Len = (timestep - 0); const AN5D_TYPE __c0Pad = (0); #define __c0 c0 const AN5D_TYPE __c1Len = (dimsize - 1 - 1); const AN5D_TYPE __c1Pad = (1); #define __c1 c1 const AN5D_TYPE __c2Len = (dimsize - 1 - 1); const AN5D_TYPE __c2Pad = (1); #define __c2 c2 const AN5D_TYPE __halo1 = 1; const AN5D_TYPE __halo2 = 1; AN5D_TYPE c0; AN5D_TYPE __side0LenMax; { const AN5D_TYPE __side0Len = 8; const AN5D_TYPE __side1Len = 128; const AN5D_TYPE __side2Len = 496; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1); AN5D_TYPE __c0Padr = (__c0Len % 2) != (((__c0Len + __side0Len - 1) / __side0Len) % 2) && __c0Len % __side0Len < 2 ? 1 : 0; __side0LenMax = __side0Len; for (c0 = __c0Pad; c0 < __c0Pad + __c0Len / __side0Len - __c0Padr; c0 += 1) { hipLaunchKernelGGL(( kernel0_8), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0); } } if ((__c0Len % 2) != (((__c0Len + __side0LenMax - 1) / __side0LenMax) % 2)) { if (__c0Len % __side0LenMax == 0) { { const AN5D_TYPE __side0Len = 4; const AN5D_TYPE __side1Len = 128; const AN5D_TYPE __side2Len = 504; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1); hipLaunchKernelGGL(( kernel0_4), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0); } c0 += 1; { const AN5D_TYPE __side0Len = 4; const AN5D_TYPE __side1Len = 128; const AN5D_TYPE __side2Len = 504; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1); hipLaunchKernelGGL(( kernel0_4), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0); } } else if (__c0Len % __side0LenMax == 1) { { const AN5D_TYPE __side0Len = 3; const AN5D_TYPE __side1Len = 128; const AN5D_TYPE __side2Len = 506; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1); hipLaunchKernelGGL(( kernel0_3), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0); } c0 += 1; { const AN5D_TYPE __side0Len = 3; const AN5D_TYPE __side1Len = 128; const AN5D_TYPE __side2Len = 506; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1); hipLaunchKernelGGL(( kernel0_3), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0); } c0 += 1; { const AN5D_TYPE __side0Len = 3; const AN5D_TYPE __side1Len = 128; const AN5D_TYPE __side2Len = 506; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1); hipLaunchKernelGGL(( kernel0_3), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0); } } else if (__c0Len % __side0LenMax == 2) { { const AN5D_TYPE __side0Len = 1; const AN5D_TYPE __side1Len = 128; const AN5D_TYPE __side2Len = 510; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1); hipLaunchKernelGGL(( kernel0_1), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0); } c0 += 1; { const AN5D_TYPE __side0Len = 1; const AN5D_TYPE __side1Len = 128; const AN5D_TYPE __side2Len = 510; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1); hipLaunchKernelGGL(( kernel0_1), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0); } } else if (__c0Len % __side0LenMax == 3) { { const AN5D_TYPE __side0Len = 2; const AN5D_TYPE __side1Len = 128; const AN5D_TYPE __side2Len = 508; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1); hipLaunchKernelGGL(( kernel0_2), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0); } c0 += 1; { const AN5D_TYPE __side0Len = 1; const AN5D_TYPE __side1Len = 128; const AN5D_TYPE __side2Len = 510; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1); hipLaunchKernelGGL(( kernel0_1), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0); } } else if (__c0Len % __side0LenMax == 4) { { const AN5D_TYPE __side0Len = 2; const AN5D_TYPE __side1Len = 128; const AN5D_TYPE __side2Len = 508; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1); hipLaunchKernelGGL(( kernel0_2), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0); } c0 += 1; { const AN5D_TYPE __side0Len = 2; const AN5D_TYPE __side1Len = 128; const AN5D_TYPE __side2Len = 508; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1); hipLaunchKernelGGL(( kernel0_2), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0); } } else if (__c0Len % __side0LenMax == 5) { { const AN5D_TYPE __side0Len = 3; const AN5D_TYPE __side1Len = 128; const AN5D_TYPE __side2Len = 506; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1); hipLaunchKernelGGL(( kernel0_3), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0); } c0 += 1; { const AN5D_TYPE __side0Len = 2; const AN5D_TYPE __side1Len = 128; const AN5D_TYPE __side2Len = 508; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1); hipLaunchKernelGGL(( kernel0_2), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0); } } else if (__c0Len % __side0LenMax == 6) { { const AN5D_TYPE __side0Len = 3; const AN5D_TYPE __side1Len = 128; const AN5D_TYPE __side2Len = 506; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1); hipLaunchKernelGGL(( kernel0_3), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0); } c0 += 1; { const AN5D_TYPE __side0Len = 3; const AN5D_TYPE __side1Len = 128; const AN5D_TYPE __side2Len = 506; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1); hipLaunchKernelGGL(( kernel0_3), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0); } } else if (__c0Len % __side0LenMax == 7) { { const AN5D_TYPE __side0Len = 4; const AN5D_TYPE __side1Len = 128; const AN5D_TYPE __side2Len = 504; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1); hipLaunchKernelGGL(( kernel0_4), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0); } c0 += 1; { const AN5D_TYPE __side0Len = 3; const AN5D_TYPE __side1Len = 128; const AN5D_TYPE __side2Len = 506; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1); hipLaunchKernelGGL(( kernel0_3), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0); } } } else if (__c0Len % __side0LenMax) { if (__c0Len % __side0LenMax == 1) { const AN5D_TYPE __side0Len = 1; const AN5D_TYPE __side1Len = 128; const AN5D_TYPE __side2Len = 510; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1); hipLaunchKernelGGL(( kernel0_1), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0); } else if (__c0Len % __side0LenMax == 2) { const AN5D_TYPE __side0Len = 2; const AN5D_TYPE __side1Len = 128; const AN5D_TYPE __side2Len = 508; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1); hipLaunchKernelGGL(( kernel0_2), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0); } else if (__c0Len % __side0LenMax == 3) { const AN5D_TYPE __side0Len = 3; const AN5D_TYPE __side1Len = 128; const AN5D_TYPE __side2Len = 506; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1); hipLaunchKernelGGL(( kernel0_3), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0); } else if (__c0Len % __side0LenMax == 4) { const AN5D_TYPE __side0Len = 4; const AN5D_TYPE __side1Len = 128; const AN5D_TYPE __side2Len = 504; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1); hipLaunchKernelGGL(( kernel0_4), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0); } else if (__c0Len % __side0LenMax == 5) { const AN5D_TYPE __side0Len = 5; const AN5D_TYPE __side1Len = 128; const AN5D_TYPE __side2Len = 502; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1); hipLaunchKernelGGL(( kernel0_5), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0); } else if (__c0Len % __side0LenMax == 6) { const AN5D_TYPE __side0Len = 6; const AN5D_TYPE __side1Len = 128; const AN5D_TYPE __side2Len = 500; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1); hipLaunchKernelGGL(( kernel0_6), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0); } else if (__c0Len % __side0LenMax == 7) { const AN5D_TYPE __side0Len = 7; const AN5D_TYPE __side1Len = 128; const AN5D_TYPE __side2Len = 498; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1); hipLaunchKernelGGL(( kernel0_7), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0); } } } cudaCheckKernel(); { #ifdef STENCILBENCH hipDeviceSynchronize(); SB_STOP_INSTRUMENTS; #endif cudaCheckReturn(hipMemcpy(A, dev_A, (size_t)(2) * (size_t)(dimsize) * (size_t)(dimsize) * sizeof(double), hipMemcpyDeviceToHost)); } cudaCheckReturn(hipFree(dev_A)); } } else { for (int t = 0; t < timestep; t++) #pragma omp parallel for for (int i = BENCH_RAD; i < dimsize - BENCH_RAD; i++) for (int j = BENCH_RAD; j < dimsize - BENCH_RAD; j++) A[(t+1)%2][i][j] = A[t%2][i][j] + 1.0f / sqrt(0.0001f + (A[t%2][i][j]-A[t%2][i-1][j])*(A[t%2][i][j]-A[t%2][i-1][j]) + (A[t%2][i][j]-A[t%2][i+1][j])*(A[t%2][i][j]-A[t%2][i+1][j]) + (A[t%2][i][j]-A[t%2][i][j+1])*(A[t%2][i][j]-A[t%2][i][j+1]) + (A[t%2][i][j]-A[t%2][i][j-1])*(A[t%2][i][j]-A[t%2][i][j-1])); } return (((end_time != 0.0) ? end_time : sb_time()) - start_time); }
9a65c0a586dbc4ee77f7cd5b2b713c2f58a2d961.cu
#include <assert.h> #include <stdio.h> #include "gradient2d-512-8-128_kernel.hu" #define BENCH_DIM 2 #define BENCH_FPP 19 #define BENCH_RAD 1 #include "common.h" double kernel_stencil(SB_TYPE *A1, int compsize, int timestep, bool scop) { double start_time = sb_time(), end_time = 0.0; int dimsize = compsize + BENCH_RAD * 2; SB_TYPE (*A)[dimsize][dimsize] = (SB_TYPE (*)[dimsize][dimsize])A1; if (scop) { if (dimsize >= 3 && timestep >= 1) { #define cudaCheckReturn(ret) \ do { \ cudaError_t cudaCheckReturn_e = (ret); \ if (cudaCheckReturn_e != cudaSuccess) { \ fprintf(stderr, "CUDA error: %s\n", cudaGetErrorString(cudaCheckReturn_e)); \ fflush(stderr); \ } \ assert(cudaCheckReturn_e == cudaSuccess); \ } while(0) #define cudaCheckKernel() \ do { \ cudaCheckReturn(cudaGetLastError()); \ } while(0) double *dev_A; cudaCheckReturn(cudaMalloc((void **) &dev_A, (size_t)(2) * (size_t)(dimsize) * (size_t)(dimsize) * sizeof(double))); { cudaCheckReturn(cudaMemcpy(dev_A, A, (size_t)(2) * (size_t)(dimsize) * (size_t)(dimsize) * sizeof(double), cudaMemcpyHostToDevice)); #ifdef STENCILBENCH cudaDeviceSynchronize(); SB_START_INSTRUMENTS; #endif } { #ifndef AN5D_TYPE #define AN5D_TYPE unsigned #endif const AN5D_TYPE __c0Len = (timestep - 0); const AN5D_TYPE __c0Pad = (0); #define __c0 c0 const AN5D_TYPE __c1Len = (dimsize - 1 - 1); const AN5D_TYPE __c1Pad = (1); #define __c1 c1 const AN5D_TYPE __c2Len = (dimsize - 1 - 1); const AN5D_TYPE __c2Pad = (1); #define __c2 c2 const AN5D_TYPE __halo1 = 1; const AN5D_TYPE __halo2 = 1; AN5D_TYPE c0; AN5D_TYPE __side0LenMax; { const AN5D_TYPE __side0Len = 8; const AN5D_TYPE __side1Len = 128; const AN5D_TYPE __side2Len = 496; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1); AN5D_TYPE __c0Padr = (__c0Len % 2) != (((__c0Len + __side0Len - 1) / __side0Len) % 2) && __c0Len % __side0Len < 2 ? 1 : 0; __side0LenMax = __side0Len; for (c0 = __c0Pad; c0 < __c0Pad + __c0Len / __side0Len - __c0Padr; c0 += 1) { kernel0_8<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0); } } if ((__c0Len % 2) != (((__c0Len + __side0LenMax - 1) / __side0LenMax) % 2)) { if (__c0Len % __side0LenMax == 0) { { const AN5D_TYPE __side0Len = 4; const AN5D_TYPE __side1Len = 128; const AN5D_TYPE __side2Len = 504; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1); kernel0_4<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0); } c0 += 1; { const AN5D_TYPE __side0Len = 4; const AN5D_TYPE __side1Len = 128; const AN5D_TYPE __side2Len = 504; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1); kernel0_4<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0); } } else if (__c0Len % __side0LenMax == 1) { { const AN5D_TYPE __side0Len = 3; const AN5D_TYPE __side1Len = 128; const AN5D_TYPE __side2Len = 506; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1); kernel0_3<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0); } c0 += 1; { const AN5D_TYPE __side0Len = 3; const AN5D_TYPE __side1Len = 128; const AN5D_TYPE __side2Len = 506; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1); kernel0_3<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0); } c0 += 1; { const AN5D_TYPE __side0Len = 3; const AN5D_TYPE __side1Len = 128; const AN5D_TYPE __side2Len = 506; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1); kernel0_3<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0); } } else if (__c0Len % __side0LenMax == 2) { { const AN5D_TYPE __side0Len = 1; const AN5D_TYPE __side1Len = 128; const AN5D_TYPE __side2Len = 510; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1); kernel0_1<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0); } c0 += 1; { const AN5D_TYPE __side0Len = 1; const AN5D_TYPE __side1Len = 128; const AN5D_TYPE __side2Len = 510; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1); kernel0_1<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0); } } else if (__c0Len % __side0LenMax == 3) { { const AN5D_TYPE __side0Len = 2; const AN5D_TYPE __side1Len = 128; const AN5D_TYPE __side2Len = 508; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1); kernel0_2<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0); } c0 += 1; { const AN5D_TYPE __side0Len = 1; const AN5D_TYPE __side1Len = 128; const AN5D_TYPE __side2Len = 510; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1); kernel0_1<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0); } } else if (__c0Len % __side0LenMax == 4) { { const AN5D_TYPE __side0Len = 2; const AN5D_TYPE __side1Len = 128; const AN5D_TYPE __side2Len = 508; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1); kernel0_2<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0); } c0 += 1; { const AN5D_TYPE __side0Len = 2; const AN5D_TYPE __side1Len = 128; const AN5D_TYPE __side2Len = 508; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1); kernel0_2<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0); } } else if (__c0Len % __side0LenMax == 5) { { const AN5D_TYPE __side0Len = 3; const AN5D_TYPE __side1Len = 128; const AN5D_TYPE __side2Len = 506; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1); kernel0_3<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0); } c0 += 1; { const AN5D_TYPE __side0Len = 2; const AN5D_TYPE __side1Len = 128; const AN5D_TYPE __side2Len = 508; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1); kernel0_2<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0); } } else if (__c0Len % __side0LenMax == 6) { { const AN5D_TYPE __side0Len = 3; const AN5D_TYPE __side1Len = 128; const AN5D_TYPE __side2Len = 506; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1); kernel0_3<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0); } c0 += 1; { const AN5D_TYPE __side0Len = 3; const AN5D_TYPE __side1Len = 128; const AN5D_TYPE __side2Len = 506; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1); kernel0_3<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0); } } else if (__c0Len % __side0LenMax == 7) { { const AN5D_TYPE __side0Len = 4; const AN5D_TYPE __side1Len = 128; const AN5D_TYPE __side2Len = 504; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1); kernel0_4<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0); } c0 += 1; { const AN5D_TYPE __side0Len = 3; const AN5D_TYPE __side1Len = 128; const AN5D_TYPE __side2Len = 506; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1); kernel0_3<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0); } } } else if (__c0Len % __side0LenMax) { if (__c0Len % __side0LenMax == 1) { const AN5D_TYPE __side0Len = 1; const AN5D_TYPE __side1Len = 128; const AN5D_TYPE __side2Len = 510; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1); kernel0_1<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0); } else if (__c0Len % __side0LenMax == 2) { const AN5D_TYPE __side0Len = 2; const AN5D_TYPE __side1Len = 128; const AN5D_TYPE __side2Len = 508; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1); kernel0_2<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0); } else if (__c0Len % __side0LenMax == 3) { const AN5D_TYPE __side0Len = 3; const AN5D_TYPE __side1Len = 128; const AN5D_TYPE __side2Len = 506; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1); kernel0_3<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0); } else if (__c0Len % __side0LenMax == 4) { const AN5D_TYPE __side0Len = 4; const AN5D_TYPE __side1Len = 128; const AN5D_TYPE __side2Len = 504; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1); kernel0_4<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0); } else if (__c0Len % __side0LenMax == 5) { const AN5D_TYPE __side0Len = 5; const AN5D_TYPE __side1Len = 128; const AN5D_TYPE __side2Len = 502; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1); kernel0_5<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0); } else if (__c0Len % __side0LenMax == 6) { const AN5D_TYPE __side0Len = 6; const AN5D_TYPE __side1Len = 128; const AN5D_TYPE __side2Len = 500; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1); kernel0_6<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0); } else if (__c0Len % __side0LenMax == 7) { const AN5D_TYPE __side0Len = 7; const AN5D_TYPE __side1Len = 128; const AN5D_TYPE __side2Len = 498; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream"); dim3 k0_dimBlock(__blockSize, 1, 1); dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1); kernel0_7<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0); } } } cudaCheckKernel(); { #ifdef STENCILBENCH cudaDeviceSynchronize(); SB_STOP_INSTRUMENTS; #endif cudaCheckReturn(cudaMemcpy(A, dev_A, (size_t)(2) * (size_t)(dimsize) * (size_t)(dimsize) * sizeof(double), cudaMemcpyDeviceToHost)); } cudaCheckReturn(cudaFree(dev_A)); } } else { for (int t = 0; t < timestep; t++) #pragma omp parallel for for (int i = BENCH_RAD; i < dimsize - BENCH_RAD; i++) for (int j = BENCH_RAD; j < dimsize - BENCH_RAD; j++) A[(t+1)%2][i][j] = A[t%2][i][j] + 1.0f / sqrt(0.0001f + (A[t%2][i][j]-A[t%2][i-1][j])*(A[t%2][i][j]-A[t%2][i-1][j]) + (A[t%2][i][j]-A[t%2][i+1][j])*(A[t%2][i][j]-A[t%2][i+1][j]) + (A[t%2][i][j]-A[t%2][i][j+1])*(A[t%2][i][j]-A[t%2][i][j+1]) + (A[t%2][i][j]-A[t%2][i][j-1])*(A[t%2][i][j]-A[t%2][i][j-1])); } return (((end_time != 0.0) ? end_time : sb_time()) - start_time); }
f65e848be3aa39e20e392792c8b88001125bb1e1.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /** * @brief * utils * * @copyright * Copyright (c) 2020 Xiaomi Corporation (authors: Daniel Povey, Haowen Qiu) * * @copyright * See LICENSE for clarification regarding multiple authors */ #include <algorithm> #include "k2/csrc/math.h" #include "k2/csrc/utils.h" namespace k2 { // See FillValues() where this is invoked. It fills a region with // a constant value. __global__ void FillValuesKernel(int32_t *data, int32_t num_values, int32_t value) { int32_t job_idx = (blockIdx.x * blockDim.x + threadIdx.x), stride = (gridDim.x * blockDim.x); for (; job_idx < num_values; job_idx += stride) data[job_idx] = value; } // This launches a kernel. It's the same as doing: // for (int32_t i = 0; i < num_values; i++) data[i] = value; __device__ void FillValues(int32_t *data, int32_t num_values, int32_t value) { int32_t block_size = 256; int32_t grid_size = NumBlocks(num_values, block_size); hipLaunchKernelGGL(( FillValuesKernel), dim3(grid_size), dim3(block_size), 0, 0, data, num_values, value); } // When we invoke this we make a big enough grid that there doesn't have to // be a loop over rows, i.e. (gridDim.x * blockDim.x) / threads_per_row >= // num_rows __global__ void RowSplitsToRowIdsKernel(int32_t num_rows, int32_t threads_per_row, const int32_t *row_splits, int32_t num_elems, int32_t *row_ids) { int32_t thread = blockIdx.x * blockDim.x + threadIdx.x, num_threads = gridDim.x * blockDim.x, row = thread / threads_per_row, thread_this_row = thread % threads_per_row; if (row >= num_rows) return; K2_CHECK_GE(num_threads / threads_per_row, num_rows); int32_t this_row_split = row_splits[row], next_row_split = row_splits[row + 1], row_length = next_row_split - this_row_split; const int32_t max_loop = 8; // `max_loop` is heuristically chosen. if (row_length / threads_per_row > max_loop) { // We decide that looping too many times will be too slow, so we launch // another kernel to fill in the value for this row. (This is CUDA dynamic // parallelism). if (thread_this_row == 0) { FillValues(row_ids + this_row_split, row_length, row); } } else { // TODO(dan): figure out how to unroll this? for (; thread_this_row < row_length; thread_this_row += threads_per_row) row_ids[this_row_split + thread_this_row] = row; } } /* See declaration of RowSplitsToRowIds() in utils.h. These are implementation notes. Suppose the range we need to fill with a particular number (say, x) is from 1010 to 10000 inclusive (binary) The first kernel writes x to positions 1010, 1100, 10000; the significance of that sequence is we keep adding the smallest number we can add to get another zero at the end of the binary representation, until we exceed the range we're supposed to fill. The second kernel: for a given index into x that is must fill (say, 1111), it asks "is the index currently here already the right one?", which it can test using the function is_valid_index() below; if it's not already correct, it searches in a sequence of positions: 1110, 1100, 1000, 0000, like our sequence above but going downwards, again getting more zeros at the end of the binary representation, until it finds the correct value in the array at the searched position; then it copies the discovered value the original position requested (here, 1111). First kernel pseudocode: for each index 'i' into 't', it does: for (int32_t n=0, j = t[i]; j < t[i+1]; n++) { x[j] = i; if (j & (1<<n)) j += (1 << n); } Second kernel pseudocode: for each element of x, it searches for the right index. Suppose we're given num_indexes == length(n) == length(t) - 1. Define is_valid_index as follows: // returns true if j is the value that we should be putting at position 'i' in x: // that is, if t[j] <= i < t[j+1]. bool is_valid_index(i, j) { return (j >= 0 && j < num_indexes && t[j] <= i && i < t[j+1]); } // We suppose we are given i (the position into x that we're responsible for // setting: orig_i = i; for (int32_t n=0; !is_valid_index(i, x[i]); n++) { if (i & (1<<n)) i -= (1 << n); } x[orig_i] = x[i]; */ void RowSplitsToRowIds(ContextPtr &c, int32_t num_rows, const int32_t *row_splits, int32_t num_elems, int32_t *row_ids) { if (num_rows <= 0 || num_elems <= 0) return; DeviceType d = c->GetDeviceType(); if (d == kCpu) { int32_t cur_row_start = row_splits[0]; K2_CHECK_EQ(cur_row_start, 0); K2_CHECK_EQ(row_splits[num_rows], num_elems); for (int32_t row = 0; row < num_rows; ++row) { int32_t next_row_start = row_splits[row + 1]; for (; cur_row_start < next_row_start; ++cur_row_start) row_ids[cur_row_start] = row; } } else { K2_CHECK_EQ(d, kCuda); if (1) { // TODO: compare this for speed with the other branch. This is branch is // much simpler, and will be considerably faster for "normal" cases -> // probably preferred. int32_t avg_elems_per_row = (num_elems + num_rows - 1) / num_rows, threads_per_row = RoundUpToNearestPowerOfTwo(avg_elems_per_row), tot_threads = num_rows * threads_per_row; int32_t block_size = 256; int32_t grid_size = NumBlocks(tot_threads, block_size); hipLaunchKernelGGL(( K2_CUDA_SAFE_CALL(RowSplitsToRowIdsKernel), dim3(grid_size), dim3(block_size), 0, c->GetCudaStream(), num_rows, threads_per_row, row_splits, num_elems, row_ids)); } else { // TODO: Will probably just delete this branch at some point. // The following algorithm isn't particularly adapted to GPU hardware in // terms of coalesced reads and writes and so on, but it has reasonable // asymptotic time complexity (assuming all kernels run in parallel), // specifically: O(log(largest(row_splits[i+1]-row_splits[i]))) auto lambda_init_minus_one = [=] __host__ __device__(int32_t i) { row_ids[i] = -1; }; Eval(c, num_elems + 1, lambda_init_minus_one); auto lambda_phase_one = [=] __host__ __device__(int32_t i) { int32_t this_row_split = row_splits[i], next_row_split = (i < num_rows ? row_splits[i + 1] : this_row_split + 1); if (this_row_split < next_row_split) row_ids[this_row_split] = i; // we have to fill in row_ids[this_row_split], // row_ids[this_row_split+1]... row_ids[next_row_split-1] with the same // value but that could be a long loop. Instead we write at // this_row_split and all indexes this_row_split < i < next_row_split // such that i is the result of rounding up this_row_split to // (something)*2^n, for n = 1, 2, 3, ... this will take time logarithmic // in (next_row_split - this_row_split). we can then fill in the gaps // with a logarithmic-time loop, by looking for a value that's not (-1) // by rounding the current index down to successively higher powers // of 2. for (int32_t power = 0, j = this_row_split; j + (1 << power) < next_row_split; power++) { if (j & (1 << power)) { j += (1 << power); // we know that j is now < next_row_split, because we checked "j + // (1<<power) < next_row_split" in the loop condition. // Note, we don't want a loop-within-a-loop because of how SIMT // works... row_ids[j] = i; } } }; Eval(c, num_elems + 1, lambda_phase_one); auto lambda_phase_two = [=] __host__ __device__(int32_t j) { int32_t row_index = row_ids[j]; if (row_index != -1) return; int32_t power = 0, j2 = j; for (; row_index != -1; power++) { if (j2 & (1 << power)) { j2 -= (1 << power); row_index = row_ids[j2]; } assert(power < 31); } row_ids[j] = row_ids[j2]; }; // could do the next line for num_elems+1, but the element at `num_elems` // will already be set. Eval(c, num_elems, lambda_phase_two); } } } /* When we invoke this we make a big enough grid that there doesn't have to be a loop over elements, i.e. (gridDim.x * blockDim.x) / threads_per_elem > num_elems. (must be >=, because we imagine a phantom element at [num_elems] with the value `num_rows`.) @param [in] num_elems Number of elements in ragged matrix @param [in] threads_per_elem Number of threads we allocate per element. Must be >= 1. @param [in] row_ids The row_ids vector, of length `num_elems`; must be nonnegative and non-decreasing and all elements < num_rows. @param [in] num_rows Number of rows, must be greater than the largest (== last) element of `row_ids`. @param [out] row_splits This kernel will output a non-decreasing vector of length num_rows + 1, such that row_splits[0] == 0, row_splits[num_rows] == num_elems, and row_splits[row_ids[i]] <= i < row_splits[row_ids[i]+1] */ __global__ void RowIdsToRowSplitsKernel(int32_t num_elems, int32_t threads_per_elem, const int32_t *row_ids, int32_t num_rows, int32_t *row_splits) { int32_t thread = (blockIdx.x * blockDim.x + threadIdx.x), num_threads = gridDim.x * blockDim.x, elem = thread / threads_per_elem, thread_this_elem = thread % threads_per_elem; K2_CHECK_GE(num_threads / threads_per_elem, num_elems); if (elem > num_elems) return; int32_t this_row, prev_row; if (elem == 0) { prev_row = -1; this_row = row_ids[elem]; } else if (elem == num_elems) { prev_row = row_ids[elem - 1]; this_row = num_rows; } else { prev_row = row_ids[elem - 1]; this_row = row_ids[elem]; } // `num_splits` is the number of splits we have to write, usually 0 or 1 // but in principle unlimited as there could be empty rows. The // relationship between row_ids and row_splits is more symmetric than // you might expect. int32_t num_splits = this_row - prev_row; const int32_t max_loop = 8; // `max_loop` is heuristically chosen. if (num_splits / threads_per_elem > max_loop) { if (thread_this_elem == 0) { FillValues(row_splits + prev_row + 1, num_splits, elem); } } else { // TODO(dan): figure out how to unroll this? for (; thread_this_elem < num_splits; thread_this_elem += threads_per_elem) row_splits[prev_row + 1 + thread_this_elem] = elem; } } // see declaration in utils.h for documentation. void RowIdsToRowSplits(ContextPtr &c, int32_t num_elems, const int32_t *row_ids, bool no_empty_rows, int32_t num_rows, int32_t *row_splits) { // process corner case first if (num_elems == 0) { auto lambda_set_values = [=] __host__ __device__(int32_t i) { row_splits[i] = 0; }; Eval(c, num_rows + 1, lambda_set_values); return; } DeviceType d = c->GetDeviceType(); if (d == kCpu) { int32_t cur_row = -1; for (int32_t i = 0; i < num_elems; i++) { int32_t row = row_ids[i]; K2_CHECK_GE(row, cur_row); while (cur_row < row) { cur_row++; row_splits[cur_row] = i; } } // cur_row must be >= 0 here as num_elems > 0 K2_CHECK_GE(cur_row, 0); while (cur_row < num_rows) { row_splits[++cur_row] = num_elems; } } else { K2_CHECK_EQ(d, kCuda); if (no_empty_rows) { auto lambda_simple = [=] __host__ __device__(int32_t i) { int32_t this_row = row_ids[i], prev_row; if (i > 0) { // (normal case) prev_row = row_ids[i - 1]; } else { // i == 0 row_splits[num_rows] = num_elems; prev_row = -1; } K2_CHECK_LE(this_row, prev_row + 1); // no_empty_rows was asserted by // the user if (this_row > prev_row) { row_splits[this_row] = i; } }; Eval(c, num_elems, lambda_simple); return; } else { // By doing "+ 2" instead of "+ 1" we increase the minimum number of // threads-per-row, which may reduce latency when there are successive // empty rows. Any value >= 1 is correct though. int32_t avg_rows_per_elem = num_rows / num_elems + 2, threads_per_elem = RoundUpToNearestPowerOfTwo(avg_rows_per_elem), tot_threads = (num_elems + 1) * threads_per_elem; // +1 for the last row int32_t block_size = 256; int32_t grid_size = NumBlocks(tot_threads, block_size); hipLaunchKernelGGL(( K2_CUDA_SAFE_CALL(RowIdsToRowSplitsKernel), dim3(grid_size), dim3(block_size), 0, c->GetCudaStream(), num_elems, threads_per_elem, row_ids, num_rows, row_splits)); } } } /* Called inside GetTaskRedirect(); see documentation of that in header. Each task with 0 <= task < num_tasks gets allocated `threads_per_job` threads, e.g. threads_per_job = 4 or 16. It's a kind of n-ary search (generalization of binary search) where each branch is handled by a different thread so they can happen in parallel. TODO(dan): there are a lot of opportunities to further optimize this using GPU hardware tricks. The thread-block size this is called with must be jobs_per_block * threads_per_job. */ /* template <int32_t jobs_per_block, int32_t threads_per_job> __global__ void GetTaskRedirect(int32_t num_tasks, const int32_t *row_splits, TaskRedirect *redirect_out) { __shared__ int32_t temp[tasks_per_block]; // we do __syncwarp() for synchronization below; we require threads_per_job <= // 32 for this reason. static_assert(threads_per_job >= 2 && threads_per_job <= 32); // We have work to do for 0 <= job_idx < num_tasks, but be careful: job_idx // may be >= num_tasks if num_tasks is small or not a power of two (we don't // return because we need to do __syncwarp()). So we have to avoid out of // bounds memory access. int32_t job_idx = (blockIdx.x * blockDim.x + threadIdx.x) / threads_per_job; // `branch_idx` is which member we are of the group of the `threads_per_job` threads for this job. int32_t branch_idx = threadIdx.x % threads_per_job; // we assume blockDim.x % threads_per_job == 0 // `temp_idx` is which index in the temporary storage `temp` we are assigned // (one per job). int32_t temp_idx = threadIdx.x / threads_per_job; // TODO: we may at some point decide that row_splits[0] has to be zero. int32_t row_splits0 = row_splits[0], row_splits_nt = row_splits[num_tasks], num_items = row_splits_nt - row_splits0; if (num_items <= 0) { assert(num_items == 0); // This is a special case where there is no work to do; we give a trivial // assignment of tasks to jobs and return static_assert(threads_per_job >= 2); if (branch_idx < 2 && job_idx < num_tasks) { TaskRedirect tr { job_idx, 2, branch_idx }; redirect_out[job_idx + branch_idx * num_tasks] = tr; } return; } else if (branch_idx == 0 && job_idx < num_tasks) { // This code writes to the jobs in the first half of the output array, // that are allocated to the same-numbered task. int32_t task_idx = job_idx, this_row_split = row_splits[task_idx], next_row_split = row_splits[task_idx + 1]; // `num_jobs` below is the number of jobs that will be active for // this task. (The "1 +".. is the job that we assign for each // task, one job per task, in the "first half" of the jobs). // the job_idx we're working out below is the job_idx for the // "second half" of int32_t num_jobs_this_task = 1 + (next_row_split/dart_separation - this_row_split/dart_separation); TaskRedirect tr { task_idx, num_jobs_this_task, 0 }; redirect_out[task_idx] = tr; } // Now we have the less-trivial task of assigning the jobs in the 2nd half of the // output array to tasks (these are allocated roughly proportional to the amount // of work to do for that task). // We do the selection by throwing darts at a dart-board, evenly spaced, and seeing which task they correspond // to. There are `num_tasks` darts). // Note: we know dart_location < row_splits_nt because job_idx < num_tasks and // because integer division rounds down. int32_t dart_separation = num_items / num_tasks, dart_location = row_splits0 + job_idx * dart_separation; // OK, from this point the goal is to find a task_idx such that // row_splits[task_idx] <= dart_location < row_splits[task_idx + 1]. // This is guaranteed to exist, as long as job_id < num_tasks. // As long as job_id < num_tasks, we maintain the property that // row_splits[lower_bound] <= dart_location && // (upper_bound > num_tasks || row_splits[upper_bound] > dart_location). // (where upper_bound == lower_bound + range), i.e. they are truly // lower and upper bounds int32_t lower_bound = 0, range = num_tasks; // we are responsible for items lower_bound through // (upper_bound = lower_bound + range) - 1. while (range > threads_per_job) { int32_t upper_bound = lower_bound + range; // We need to narrow the range of `task_idx` that might be the correct one. // We round *up* because we require that task_idx_step * threads_per_job >= // range, so that we cover the entire range. int32_t task_idx_step = (range + threads_per_job - 1) / threads_per_job, // >= 2 my_lower_task_idx = lower_bound + branch_idx * task_idx_step, my_upper_task_idx = my_lower_task_idx + task_idx_step; // The following avoids out-of-bounds memory accesses. if (my_upper_task_idx > upper_bound) my_upper_task_idx = upper_bound; // TODO (dan): it may be possible to use one of those special within-warp // commands involving bitmaps to make the second comparison (dart_location < // row_splits[my_upper_task_idx]) unnecessary. if (my_lower_task_idx < num_tasks && row_splits[my_lower_task_idx] <= dart_location && dart_location < row_splits[my_upper_task_idx]) { // I am the "chosen branch" (exactly one will be chosen, as long as // job_idx < num_tasks). temp[temp_idx] = branch_idx; } __syncwarp(); int32_t chosen_branch_idx = temp[temp_idx]; lower_bound = lower_bound + chosen_branch_idx * task_idx_step; upper_bound = lower_bound + task_idx_step; range = task_idx_step; // note, we don't limit upper_bound to be <= num_tasks because we need all // threads in the block to go around the while loop the same number of // times. Therefore it's possible that upper_bound > num_tasks. K2_DASSERT(job_idx >= num_tasks || (row_splits[lower_bound] <= dart_location && (upper_bound > num_tasks || row_splits[upper_bound] > dart_location))); // TODO: remove once debugged. } int32_t task_idx = lower_bound + branch_idx; // TODO (dan): it may be possible to use one of those special within-warp // commands involving bitmaps to make the second comparison (dart_location < // row_splits[my_upper_task_idx]) unnecessary. // // The check `task_idx < num_tasks` is to avoid out-of-bounds access of row_splits. // The check `job_idx < num_tasks` is to avoid out-of-bounds access of `redirect_out`; // for these out-of-range job_idx values, it's possible for task_idx to have // any value since it may be uninitialized memory. if (task_idx < num_tasks && job_idx < num_tasks) { int32_t this_row_split = row_splits[task_idx], next_row_split = row_splits[task_idx + 1]; if (this_row_split <= dart_location && dart_location < next_row_split) { // OK, exactly one branch per job will reach this point. `num_jobs` below // is the number of jobs that will be active for this task. (The "1 // +".. is the job that we assign for each task, one job per task, in the // "first half" of the jobs). The job_id_this_task we're working out // below is the job_id within the second half of the TaskRedirects, // the half that are allocated by throwing darts. int32_t num_jobs_this_task = 1 + (next_row_split/dart_separation - this_row_split/dart_separation), job_idx_this_task = 1 + (dart_location - this_row_split)/dart_separation; K2_CHECK(job_id_this_task < num_jobs_this_task); TaskRedirect tr { task_idx, num_jobs_this_task, job_idx_this_task }; redirect_out[num_tasks + job_idx] = tr; } } } */ /* This is a quite simple implementation of GetTaskRedirect... I had a more complicated one above that had better O(N) performance for hard cases, but this one will handle more normal/smaller cases better, plus is easier to debug. The basic idea is to throw lots of threads at it, i.e. threads_per_task should be, say, twice larger than the average / expected number of jobs per task, so that if a task has lots of jobs it doesn't have to loop too many times. */ template <int32_t threads_per_task> __global__ void GetTaskRedirect(int32_t num_tasks, const int32_t *row_splits, TaskRedirect *redirect_out) { int32_t thread = blockIdx.x * blockDim.x + threadIdx.x; int32_t task_idx = thread / threads_per_task; if (task_idx >= num_tasks) return; // `thread_idx` is which member we are of the group of the `threads_per_job` // threads for this job. int32_t thread_idx = thread % threads_per_task; int32_t row_splits0 = row_splits[0], row_splits_nt = row_splits[num_tasks], num_items = row_splits_nt - row_splits0; // the 'num_items' is the // total amount of work to // do, that we want to // distribute fairly evenly. // The idea with `dart_separation` is this: Half of the jobs we allocate to // the corresponding tasks. The other half we allocate by throwing darts onto // the interval [0, num_items - 1], evenly spaced starting from 0, and seeing // which tasks they land in. This is somewhat random but it ensures that if // any task has a very large amount of work to do, it will get a roughly // proportionate number of jobs. int32_t dart_separation = num_items / num_tasks; if (dart_separation <= 0) { // This is a special case where there is no work to do; we give a trivial // assignment of tasks to jobs and return static_assert(threads_per_task >= 2, "threads per task must >= 2"); if (thread_idx < 2) { TaskRedirect tr{task_idx, 2, static_cast<uint16_t>(thread_idx)}; redirect_out[task_idx + thread_idx * num_tasks] = tr; } return; } // TODO(dan): IDK how well the hardware combines these memory requests; could // consider loading to shared memory first. int32_t this_row_split = row_splits[task_idx], next_row_split = row_splits[task_idx + 1]; // `num_jobs` below is the number of jobs that will be active for // this task. (The "1 +".. is the job that we assign for each // task, one job per task, in the "first half" of the jobs). // the job_idx we're working out below is the job_idx for the // "second half" of int32_t num_jobs_this_task = 1 + (min(next_row_split / dart_separation, num_tasks) - min(this_row_split / dart_separation, num_tasks)); // function `min` is from cuda K2_CHECK_EQ(static_cast<int32_t>(static_cast<uint16_t>(num_jobs_this_task)), num_jobs_this_task); for (int32_t job_id_this_task = thread_idx; job_id_this_task < num_jobs_this_task; job_id_this_task += threads_per_task) { int32_t job_idx = (job_id_this_task == 0 ? task_idx : // 1st half num_tasks + (this_row_split / dart_separation) + job_id_this_task - 1); // 2nd half. redirect_out[job_idx] = TaskRedirect{task_idx, static_cast<uint16_t>(num_jobs_this_task), static_cast<uint16_t>(job_id_this_task)}; } } void GetTaskRedirect(hipStream_t stream, int32_t num_tasks, const int32_t *row_splits, TaskRedirect *redirect_out) { if (num_tasks <= 0) return; if (stream == kCudaStreamInvalid) { // there's not much point in using this on CPU as there are better ways // to do things (sequentially), but this can be useful for debugging. // The idea with `dart_separation` is this: Half of the jobs we allocate // to the corresponding tasks. The other half we allocate by throwing // darts onto the interval [0, num_items - 1], evenly spaced starting from // 0, and seeing which tasks they land in. This is somewhat random but it // ensures that if any task has a very large amount of work to do, it will // get a roughly proportionate number of jobs. int32_t row_splits0 = row_splits[0], row_splits_nt = row_splits[num_tasks], num_items = row_splits_nt - row_splits0, dart_separation = num_items / num_tasks; if (dart_separation != 0) { for (int32_t task = 0; task < num_tasks; ++task) { int32_t this_row_split = row_splits[task], next_row_split = row_splits[task + 1]; int32_t num_jobs_this_task = 1 + (::min(next_row_split / dart_separation, num_tasks) - ::min(this_row_split / dart_separation, num_tasks)); K2_CHECK_EQ( static_cast<int32_t>(static_cast<uint16_t>(num_jobs_this_task)), num_jobs_this_task); for (int32_t job_id_this_task = 0; job_id_this_task < num_jobs_this_task; ++job_id_this_task) { int32_t job_idx = (job_id_this_task == 0 ? task : // 1st half num_tasks + (this_row_split / dart_separation) + job_id_this_task - 1); // 2nd half. redirect_out[job_idx] = TaskRedirect{task, static_cast<uint16_t>(num_jobs_this_task), static_cast<uint16_t>(job_id_this_task)}; } } } else { // This is a special case where there is no work to do; we give a trivial // assignment of tasks to jobs and return for (int32_t task = 0; task < num_tasks; ++task) { int32_t num_jobs_this_task = 2; for (int32_t job_id_this_task = 0; job_id_this_task < num_jobs_this_task; ++job_id_this_task) { int32_t job_idx = task + job_id_this_task * num_tasks; redirect_out[job_idx] = TaskRedirect{task, static_cast<uint16_t>(num_jobs_this_task), static_cast<uint16_t>(job_id_this_task)}; } } } } else { // compare 8 to 2, which is the expected number of jobs per task. having // 8 substantially greater than 2 gives a fairly big safety factor. // However this is still far from ideal in scenarios where the number of // tasks might be highly unbalanced. const int32_t threads_per_task = 8, tot_threads = threads_per_task * num_tasks; int32_t block_size = 256; int32_t grid_size = NumBlocks(tot_threads, block_size); hipLaunchKernelGGL(( K2_CUDA_SAFE_CALL(GetTaskRedirect<threads_per_task>) , dim3(grid_size), dim3(block_size), 0, stream, num_tasks, row_splits, redirect_out)); } } void GetTaskRedirect(ContextPtr &c, int32_t num_tasks, const int32_t *row_splits, TaskRedirect *redirect_out) { GetTaskRedirect(c->GetCudaStream(), num_tasks, row_splits, redirect_out); } } // namespace k2
f65e848be3aa39e20e392792c8b88001125bb1e1.cu
/** * @brief * utils * * @copyright * Copyright (c) 2020 Xiaomi Corporation (authors: Daniel Povey, Haowen Qiu) * * @copyright * See LICENSE for clarification regarding multiple authors */ #include <algorithm> #include "k2/csrc/math.h" #include "k2/csrc/utils.h" namespace k2 { // See FillValues() where this is invoked. It fills a region with // a constant value. __global__ void FillValuesKernel(int32_t *data, int32_t num_values, int32_t value) { int32_t job_idx = (blockIdx.x * blockDim.x + threadIdx.x), stride = (gridDim.x * blockDim.x); for (; job_idx < num_values; job_idx += stride) data[job_idx] = value; } // This launches a kernel. It's the same as doing: // for (int32_t i = 0; i < num_values; i++) data[i] = value; __device__ void FillValues(int32_t *data, int32_t num_values, int32_t value) { int32_t block_size = 256; int32_t grid_size = NumBlocks(num_values, block_size); FillValuesKernel<<<grid_size, block_size>>>(data, num_values, value); } // When we invoke this we make a big enough grid that there doesn't have to // be a loop over rows, i.e. (gridDim.x * blockDim.x) / threads_per_row >= // num_rows __global__ void RowSplitsToRowIdsKernel(int32_t num_rows, int32_t threads_per_row, const int32_t *row_splits, int32_t num_elems, int32_t *row_ids) { int32_t thread = blockIdx.x * blockDim.x + threadIdx.x, num_threads = gridDim.x * blockDim.x, row = thread / threads_per_row, thread_this_row = thread % threads_per_row; if (row >= num_rows) return; K2_CHECK_GE(num_threads / threads_per_row, num_rows); int32_t this_row_split = row_splits[row], next_row_split = row_splits[row + 1], row_length = next_row_split - this_row_split; const int32_t max_loop = 8; // `max_loop` is heuristically chosen. if (row_length / threads_per_row > max_loop) { // We decide that looping too many times will be too slow, so we launch // another kernel to fill in the value for this row. (This is CUDA dynamic // parallelism). if (thread_this_row == 0) { FillValues(row_ids + this_row_split, row_length, row); } } else { // TODO(dan): figure out how to unroll this? for (; thread_this_row < row_length; thread_this_row += threads_per_row) row_ids[this_row_split + thread_this_row] = row; } } /* See declaration of RowSplitsToRowIds() in utils.h. These are implementation notes. Suppose the range we need to fill with a particular number (say, x) is from 1010 to 10000 inclusive (binary) The first kernel writes x to positions 1010, 1100, 10000; the significance of that sequence is we keep adding the smallest number we can add to get another zero at the end of the binary representation, until we exceed the range we're supposed to fill. The second kernel: for a given index into x that is must fill (say, 1111), it asks "is the index currently here already the right one?", which it can test using the function is_valid_index() below; if it's not already correct, it searches in a sequence of positions: 1110, 1100, 1000, 0000, like our sequence above but going downwards, again getting more zeros at the end of the binary representation, until it finds the correct value in the array at the searched position; then it copies the discovered value the original position requested (here, 1111). First kernel pseudocode: for each index 'i' into 't', it does: for (int32_t n=0, j = t[i]; j < t[i+1]; n++) { x[j] = i; if (j & (1<<n)) j += (1 << n); } Second kernel pseudocode: for each element of x, it searches for the right index. Suppose we're given num_indexes == length(n) == length(t) - 1. Define is_valid_index as follows: // returns true if j is the value that we should be putting at position 'i' in x: // that is, if t[j] <= i < t[j+1]. bool is_valid_index(i, j) { return (j >= 0 && j < num_indexes && t[j] <= i && i < t[j+1]); } // We suppose we are given i (the position into x that we're responsible for // setting: orig_i = i; for (int32_t n=0; !is_valid_index(i, x[i]); n++) { if (i & (1<<n)) i -= (1 << n); } x[orig_i] = x[i]; */ void RowSplitsToRowIds(ContextPtr &c, int32_t num_rows, const int32_t *row_splits, int32_t num_elems, int32_t *row_ids) { if (num_rows <= 0 || num_elems <= 0) return; DeviceType d = c->GetDeviceType(); if (d == kCpu) { int32_t cur_row_start = row_splits[0]; K2_CHECK_EQ(cur_row_start, 0); K2_CHECK_EQ(row_splits[num_rows], num_elems); for (int32_t row = 0; row < num_rows; ++row) { int32_t next_row_start = row_splits[row + 1]; for (; cur_row_start < next_row_start; ++cur_row_start) row_ids[cur_row_start] = row; } } else { K2_CHECK_EQ(d, kCuda); if (1) { // TODO: compare this for speed with the other branch. This is branch is // much simpler, and will be considerably faster for "normal" cases -> // probably preferred. int32_t avg_elems_per_row = (num_elems + num_rows - 1) / num_rows, threads_per_row = RoundUpToNearestPowerOfTwo(avg_elems_per_row), tot_threads = num_rows * threads_per_row; int32_t block_size = 256; int32_t grid_size = NumBlocks(tot_threads, block_size); K2_CUDA_SAFE_CALL(RowSplitsToRowIdsKernel<<<grid_size, block_size, 0, c->GetCudaStream()>>>( num_rows, threads_per_row, row_splits, num_elems, row_ids)); } else { // TODO: Will probably just delete this branch at some point. // The following algorithm isn't particularly adapted to GPU hardware in // terms of coalesced reads and writes and so on, but it has reasonable // asymptotic time complexity (assuming all kernels run in parallel), // specifically: O(log(largest(row_splits[i+1]-row_splits[i]))) auto lambda_init_minus_one = [=] __host__ __device__(int32_t i) { row_ids[i] = -1; }; Eval(c, num_elems + 1, lambda_init_minus_one); auto lambda_phase_one = [=] __host__ __device__(int32_t i) { int32_t this_row_split = row_splits[i], next_row_split = (i < num_rows ? row_splits[i + 1] : this_row_split + 1); if (this_row_split < next_row_split) row_ids[this_row_split] = i; // we have to fill in row_ids[this_row_split], // row_ids[this_row_split+1]... row_ids[next_row_split-1] with the same // value but that could be a long loop. Instead we write at // this_row_split and all indexes this_row_split < i < next_row_split // such that i is the result of rounding up this_row_split to // (something)*2^n, for n = 1, 2, 3, ... this will take time logarithmic // in (next_row_split - this_row_split). we can then fill in the gaps // with a logarithmic-time loop, by looking for a value that's not (-1) // by rounding the current index down to successively higher powers // of 2. for (int32_t power = 0, j = this_row_split; j + (1 << power) < next_row_split; power++) { if (j & (1 << power)) { j += (1 << power); // we know that j is now < next_row_split, because we checked "j + // (1<<power) < next_row_split" in the loop condition. // Note, we don't want a loop-within-a-loop because of how SIMT // works... row_ids[j] = i; } } }; Eval(c, num_elems + 1, lambda_phase_one); auto lambda_phase_two = [=] __host__ __device__(int32_t j) { int32_t row_index = row_ids[j]; if (row_index != -1) return; int32_t power = 0, j2 = j; for (; row_index != -1; power++) { if (j2 & (1 << power)) { j2 -= (1 << power); row_index = row_ids[j2]; } assert(power < 31); } row_ids[j] = row_ids[j2]; }; // could do the next line for num_elems+1, but the element at `num_elems` // will already be set. Eval(c, num_elems, lambda_phase_two); } } } /* When we invoke this we make a big enough grid that there doesn't have to be a loop over elements, i.e. (gridDim.x * blockDim.x) / threads_per_elem > num_elems. (must be >=, because we imagine a phantom element at [num_elems] with the value `num_rows`.) @param [in] num_elems Number of elements in ragged matrix @param [in] threads_per_elem Number of threads we allocate per element. Must be >= 1. @param [in] row_ids The row_ids vector, of length `num_elems`; must be nonnegative and non-decreasing and all elements < num_rows. @param [in] num_rows Number of rows, must be greater than the largest (== last) element of `row_ids`. @param [out] row_splits This kernel will output a non-decreasing vector of length num_rows + 1, such that row_splits[0] == 0, row_splits[num_rows] == num_elems, and row_splits[row_ids[i]] <= i < row_splits[row_ids[i]+1] */ __global__ void RowIdsToRowSplitsKernel(int32_t num_elems, int32_t threads_per_elem, const int32_t *row_ids, int32_t num_rows, int32_t *row_splits) { int32_t thread = (blockIdx.x * blockDim.x + threadIdx.x), num_threads = gridDim.x * blockDim.x, elem = thread / threads_per_elem, thread_this_elem = thread % threads_per_elem; K2_CHECK_GE(num_threads / threads_per_elem, num_elems); if (elem > num_elems) return; int32_t this_row, prev_row; if (elem == 0) { prev_row = -1; this_row = row_ids[elem]; } else if (elem == num_elems) { prev_row = row_ids[elem - 1]; this_row = num_rows; } else { prev_row = row_ids[elem - 1]; this_row = row_ids[elem]; } // `num_splits` is the number of splits we have to write, usually 0 or 1 // but in principle unlimited as there could be empty rows. The // relationship between row_ids and row_splits is more symmetric than // you might expect. int32_t num_splits = this_row - prev_row; const int32_t max_loop = 8; // `max_loop` is heuristically chosen. if (num_splits / threads_per_elem > max_loop) { if (thread_this_elem == 0) { FillValues(row_splits + prev_row + 1, num_splits, elem); } } else { // TODO(dan): figure out how to unroll this? for (; thread_this_elem < num_splits; thread_this_elem += threads_per_elem) row_splits[prev_row + 1 + thread_this_elem] = elem; } } // see declaration in utils.h for documentation. void RowIdsToRowSplits(ContextPtr &c, int32_t num_elems, const int32_t *row_ids, bool no_empty_rows, int32_t num_rows, int32_t *row_splits) { // process corner case first if (num_elems == 0) { auto lambda_set_values = [=] __host__ __device__(int32_t i) { row_splits[i] = 0; }; Eval(c, num_rows + 1, lambda_set_values); return; } DeviceType d = c->GetDeviceType(); if (d == kCpu) { int32_t cur_row = -1; for (int32_t i = 0; i < num_elems; i++) { int32_t row = row_ids[i]; K2_CHECK_GE(row, cur_row); while (cur_row < row) { cur_row++; row_splits[cur_row] = i; } } // cur_row must be >= 0 here as num_elems > 0 K2_CHECK_GE(cur_row, 0); while (cur_row < num_rows) { row_splits[++cur_row] = num_elems; } } else { K2_CHECK_EQ(d, kCuda); if (no_empty_rows) { auto lambda_simple = [=] __host__ __device__(int32_t i) { int32_t this_row = row_ids[i], prev_row; if (i > 0) { // (normal case) prev_row = row_ids[i - 1]; } else { // i == 0 row_splits[num_rows] = num_elems; prev_row = -1; } K2_CHECK_LE(this_row, prev_row + 1); // no_empty_rows was asserted by // the user if (this_row > prev_row) { row_splits[this_row] = i; } }; Eval(c, num_elems, lambda_simple); return; } else { // By doing "+ 2" instead of "+ 1" we increase the minimum number of // threads-per-row, which may reduce latency when there are successive // empty rows. Any value >= 1 is correct though. int32_t avg_rows_per_elem = num_rows / num_elems + 2, threads_per_elem = RoundUpToNearestPowerOfTwo(avg_rows_per_elem), tot_threads = (num_elems + 1) * threads_per_elem; // +1 for the last row int32_t block_size = 256; int32_t grid_size = NumBlocks(tot_threads, block_size); K2_CUDA_SAFE_CALL(RowIdsToRowSplitsKernel<<<grid_size, block_size, 0, c->GetCudaStream()>>>( num_elems, threads_per_elem, row_ids, num_rows, row_splits)); } } } /* Called inside GetTaskRedirect(); see documentation of that in header. Each task with 0 <= task < num_tasks gets allocated `threads_per_job` threads, e.g. threads_per_job = 4 or 16. It's a kind of n-ary search (generalization of binary search) where each branch is handled by a different thread so they can happen in parallel. TODO(dan): there are a lot of opportunities to further optimize this using GPU hardware tricks. The thread-block size this is called with must be jobs_per_block * threads_per_job. */ /* template <int32_t jobs_per_block, int32_t threads_per_job> __global__ void GetTaskRedirect(int32_t num_tasks, const int32_t *row_splits, TaskRedirect *redirect_out) { __shared__ int32_t temp[tasks_per_block]; // we do __syncwarp() for synchronization below; we require threads_per_job <= // 32 for this reason. static_assert(threads_per_job >= 2 && threads_per_job <= 32); // We have work to do for 0 <= job_idx < num_tasks, but be careful: job_idx // may be >= num_tasks if num_tasks is small or not a power of two (we don't // return because we need to do __syncwarp()). So we have to avoid out of // bounds memory access. int32_t job_idx = (blockIdx.x * blockDim.x + threadIdx.x) / threads_per_job; // `branch_idx` is which member we are of the group of the `threads_per_job` threads for this job. int32_t branch_idx = threadIdx.x % threads_per_job; // we assume blockDim.x % threads_per_job == 0 // `temp_idx` is which index in the temporary storage `temp` we are assigned // (one per job). int32_t temp_idx = threadIdx.x / threads_per_job; // TODO: we may at some point decide that row_splits[0] has to be zero. int32_t row_splits0 = row_splits[0], row_splits_nt = row_splits[num_tasks], num_items = row_splits_nt - row_splits0; if (num_items <= 0) { assert(num_items == 0); // This is a special case where there is no work to do; we give a trivial // assignment of tasks to jobs and return static_assert(threads_per_job >= 2); if (branch_idx < 2 && job_idx < num_tasks) { TaskRedirect tr { job_idx, 2, branch_idx }; redirect_out[job_idx + branch_idx * num_tasks] = tr; } return; } else if (branch_idx == 0 && job_idx < num_tasks) { // This code writes to the jobs in the first half of the output array, // that are allocated to the same-numbered task. int32_t task_idx = job_idx, this_row_split = row_splits[task_idx], next_row_split = row_splits[task_idx + 1]; // `num_jobs` below is the number of jobs that will be active for // this task. (The "1 +".. is the job that we assign for each // task, one job per task, in the "first half" of the jobs). // the job_idx we're working out below is the job_idx for the // "second half" of int32_t num_jobs_this_task = 1 + (next_row_split/dart_separation - this_row_split/dart_separation); TaskRedirect tr { task_idx, num_jobs_this_task, 0 }; redirect_out[task_idx] = tr; } // Now we have the less-trivial task of assigning the jobs in the 2nd half of the // output array to tasks (these are allocated roughly proportional to the amount // of work to do for that task). // We do the selection by throwing darts at a dart-board, evenly spaced, and seeing which task they correspond // to. There are `num_tasks` darts). // Note: we know dart_location < row_splits_nt because job_idx < num_tasks and // because integer division rounds down. int32_t dart_separation = num_items / num_tasks, dart_location = row_splits0 + job_idx * dart_separation; // OK, from this point the goal is to find a task_idx such that // row_splits[task_idx] <= dart_location < row_splits[task_idx + 1]. // This is guaranteed to exist, as long as job_id < num_tasks. // As long as job_id < num_tasks, we maintain the property that // row_splits[lower_bound] <= dart_location && // (upper_bound > num_tasks || row_splits[upper_bound] > dart_location). // (where upper_bound == lower_bound + range), i.e. they are truly // lower and upper bounds int32_t lower_bound = 0, range = num_tasks; // we are responsible for items lower_bound through // (upper_bound = lower_bound + range) - 1. while (range > threads_per_job) { int32_t upper_bound = lower_bound + range; // We need to narrow the range of `task_idx` that might be the correct one. // We round *up* because we require that task_idx_step * threads_per_job >= // range, so that we cover the entire range. int32_t task_idx_step = (range + threads_per_job - 1) / threads_per_job, // >= 2 my_lower_task_idx = lower_bound + branch_idx * task_idx_step, my_upper_task_idx = my_lower_task_idx + task_idx_step; // The following avoids out-of-bounds memory accesses. if (my_upper_task_idx > upper_bound) my_upper_task_idx = upper_bound; // TODO (dan): it may be possible to use one of those special within-warp // commands involving bitmaps to make the second comparison (dart_location < // row_splits[my_upper_task_idx]) unnecessary. if (my_lower_task_idx < num_tasks && row_splits[my_lower_task_idx] <= dart_location && dart_location < row_splits[my_upper_task_idx]) { // I am the "chosen branch" (exactly one will be chosen, as long as // job_idx < num_tasks). temp[temp_idx] = branch_idx; } __syncwarp(); int32_t chosen_branch_idx = temp[temp_idx]; lower_bound = lower_bound + chosen_branch_idx * task_idx_step; upper_bound = lower_bound + task_idx_step; range = task_idx_step; // note, we don't limit upper_bound to be <= num_tasks because we need all // threads in the block to go around the while loop the same number of // times. Therefore it's possible that upper_bound > num_tasks. K2_DASSERT(job_idx >= num_tasks || (row_splits[lower_bound] <= dart_location && (upper_bound > num_tasks || row_splits[upper_bound] > dart_location))); // TODO: remove once debugged. } int32_t task_idx = lower_bound + branch_idx; // TODO (dan): it may be possible to use one of those special within-warp // commands involving bitmaps to make the second comparison (dart_location < // row_splits[my_upper_task_idx]) unnecessary. // // The check `task_idx < num_tasks` is to avoid out-of-bounds access of row_splits. // The check `job_idx < num_tasks` is to avoid out-of-bounds access of `redirect_out`; // for these out-of-range job_idx values, it's possible for task_idx to have // any value since it may be uninitialized memory. if (task_idx < num_tasks && job_idx < num_tasks) { int32_t this_row_split = row_splits[task_idx], next_row_split = row_splits[task_idx + 1]; if (this_row_split <= dart_location && dart_location < next_row_split) { // OK, exactly one branch per job will reach this point. `num_jobs` below // is the number of jobs that will be active for this task. (The "1 // +".. is the job that we assign for each task, one job per task, in the // "first half" of the jobs). The job_id_this_task we're working out // below is the job_id within the second half of the TaskRedirects, // the half that are allocated by throwing darts. int32_t num_jobs_this_task = 1 + (next_row_split/dart_separation - this_row_split/dart_separation), job_idx_this_task = 1 + (dart_location - this_row_split)/dart_separation; K2_CHECK(job_id_this_task < num_jobs_this_task); TaskRedirect tr { task_idx, num_jobs_this_task, job_idx_this_task }; redirect_out[num_tasks + job_idx] = tr; } } } */ /* This is a quite simple implementation of GetTaskRedirect... I had a more complicated one above that had better O(N) performance for hard cases, but this one will handle more normal/smaller cases better, plus is easier to debug. The basic idea is to throw lots of threads at it, i.e. threads_per_task should be, say, twice larger than the average / expected number of jobs per task, so that if a task has lots of jobs it doesn't have to loop too many times. */ template <int32_t threads_per_task> __global__ void GetTaskRedirect(int32_t num_tasks, const int32_t *row_splits, TaskRedirect *redirect_out) { int32_t thread = blockIdx.x * blockDim.x + threadIdx.x; int32_t task_idx = thread / threads_per_task; if (task_idx >= num_tasks) return; // `thread_idx` is which member we are of the group of the `threads_per_job` // threads for this job. int32_t thread_idx = thread % threads_per_task; int32_t row_splits0 = row_splits[0], row_splits_nt = row_splits[num_tasks], num_items = row_splits_nt - row_splits0; // the 'num_items' is the // total amount of work to // do, that we want to // distribute fairly evenly. // The idea with `dart_separation` is this: Half of the jobs we allocate to // the corresponding tasks. The other half we allocate by throwing darts onto // the interval [0, num_items - 1], evenly spaced starting from 0, and seeing // which tasks they land in. This is somewhat random but it ensures that if // any task has a very large amount of work to do, it will get a roughly // proportionate number of jobs. int32_t dart_separation = num_items / num_tasks; if (dart_separation <= 0) { // This is a special case where there is no work to do; we give a trivial // assignment of tasks to jobs and return static_assert(threads_per_task >= 2, "threads per task must >= 2"); if (thread_idx < 2) { TaskRedirect tr{task_idx, 2, static_cast<uint16_t>(thread_idx)}; redirect_out[task_idx + thread_idx * num_tasks] = tr; } return; } // TODO(dan): IDK how well the hardware combines these memory requests; could // consider loading to shared memory first. int32_t this_row_split = row_splits[task_idx], next_row_split = row_splits[task_idx + 1]; // `num_jobs` below is the number of jobs that will be active for // this task. (The "1 +".. is the job that we assign for each // task, one job per task, in the "first half" of the jobs). // the job_idx we're working out below is the job_idx for the // "second half" of int32_t num_jobs_this_task = 1 + (min(next_row_split / dart_separation, num_tasks) - min(this_row_split / dart_separation, num_tasks)); // function `min` is from cuda K2_CHECK_EQ(static_cast<int32_t>(static_cast<uint16_t>(num_jobs_this_task)), num_jobs_this_task); for (int32_t job_id_this_task = thread_idx; job_id_this_task < num_jobs_this_task; job_id_this_task += threads_per_task) { int32_t job_idx = (job_id_this_task == 0 ? task_idx : // 1st half num_tasks + (this_row_split / dart_separation) + job_id_this_task - 1); // 2nd half. redirect_out[job_idx] = TaskRedirect{task_idx, static_cast<uint16_t>(num_jobs_this_task), static_cast<uint16_t>(job_id_this_task)}; } } void GetTaskRedirect(cudaStream_t stream, int32_t num_tasks, const int32_t *row_splits, TaskRedirect *redirect_out) { if (num_tasks <= 0) return; if (stream == kCudaStreamInvalid) { // there's not much point in using this on CPU as there are better ways // to do things (sequentially), but this can be useful for debugging. // The idea with `dart_separation` is this: Half of the jobs we allocate // to the corresponding tasks. The other half we allocate by throwing // darts onto the interval [0, num_items - 1], evenly spaced starting from // 0, and seeing which tasks they land in. This is somewhat random but it // ensures that if any task has a very large amount of work to do, it will // get a roughly proportionate number of jobs. int32_t row_splits0 = row_splits[0], row_splits_nt = row_splits[num_tasks], num_items = row_splits_nt - row_splits0, dart_separation = num_items / num_tasks; if (dart_separation != 0) { for (int32_t task = 0; task < num_tasks; ++task) { int32_t this_row_split = row_splits[task], next_row_split = row_splits[task + 1]; int32_t num_jobs_this_task = 1 + (std::min(next_row_split / dart_separation, num_tasks) - std::min(this_row_split / dart_separation, num_tasks)); K2_CHECK_EQ( static_cast<int32_t>(static_cast<uint16_t>(num_jobs_this_task)), num_jobs_this_task); for (int32_t job_id_this_task = 0; job_id_this_task < num_jobs_this_task; ++job_id_this_task) { int32_t job_idx = (job_id_this_task == 0 ? task : // 1st half num_tasks + (this_row_split / dart_separation) + job_id_this_task - 1); // 2nd half. redirect_out[job_idx] = TaskRedirect{task, static_cast<uint16_t>(num_jobs_this_task), static_cast<uint16_t>(job_id_this_task)}; } } } else { // This is a special case where there is no work to do; we give a trivial // assignment of tasks to jobs and return for (int32_t task = 0; task < num_tasks; ++task) { int32_t num_jobs_this_task = 2; for (int32_t job_id_this_task = 0; job_id_this_task < num_jobs_this_task; ++job_id_this_task) { int32_t job_idx = task + job_id_this_task * num_tasks; redirect_out[job_idx] = TaskRedirect{task, static_cast<uint16_t>(num_jobs_this_task), static_cast<uint16_t>(job_id_this_task)}; } } } } else { // compare 8 to 2, which is the expected number of jobs per task. having // 8 substantially greater than 2 gives a fairly big safety factor. // However this is still far from ideal in scenarios where the number of // tasks might be highly unbalanced. const int32_t threads_per_task = 8, tot_threads = threads_per_task * num_tasks; int32_t block_size = 256; int32_t grid_size = NumBlocks(tot_threads, block_size); K2_CUDA_SAFE_CALL(GetTaskRedirect<threads_per_task> <<<grid_size, block_size, 0, stream>>>( num_tasks, row_splits, redirect_out)); } } void GetTaskRedirect(ContextPtr &c, int32_t num_tasks, const int32_t *row_splits, TaskRedirect *redirect_out) { GetTaskRedirect(c->GetCudaStream(), num_tasks, row_splits, redirect_out); } } // namespace k2
698712deafbdcc0a995d4a5b0a20c45a553bcba3.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // // auto-generated by op2.m on 19-Oct-2012 16:21:10 // // user function __device__ #include "adt_calc.h" // CUDA kernel function __global__ void op_cuda_adt_calc( double *ind_arg0, int *ind_map, short *arg_map, double *arg4, double *arg5, int *ind_arg_sizes, int *ind_arg_offs, int block_offset, int *blkmap, int *offset, int *nelems, int *ncolors, int *colors, int nblocks, int set_size) { __shared__ int *ind_arg0_map, ind_arg0_size; __shared__ double *ind_arg0_s; __shared__ int nelem, offset_b; extern __shared__ char shared[]; if (blockIdx.x+blockIdx.y*gridDim.x >= nblocks) return; if (threadIdx.x==0) { // get sizes and shift pointers and direct-mapped data int blockId = blkmap[blockIdx.x + blockIdx.y*gridDim.x + block_offset]; nelem = nelems[blockId]; offset_b = offset[blockId]; ind_arg0_size = ind_arg_sizes[0+blockId*1]; ind_arg0_map = &ind_map[0*set_size] + ind_arg_offs[0+blockId*1]; // set shared memory pointers int nbytes = 0; ind_arg0_s = (double *) &shared[nbytes]; } __syncthreads(); // make sure all of above completed // copy indirect datasets into shared memory or zero increment for (int n=threadIdx.x; n<ind_arg0_size*2; n+=blockDim.x) ind_arg0_s[n] = ind_arg0[n%2+ind_arg0_map[n/2]*2]; __syncthreads(); // process set elements for (int n=threadIdx.x; n<nelem; n+=blockDim.x) { // user-supplied kernel call adt_calc( ind_arg0_s+arg_map[0*set_size+n+offset_b]*2, ind_arg0_s+arg_map[1*set_size+n+offset_b]*2, ind_arg0_s+arg_map[2*set_size+n+offset_b]*2, ind_arg0_s+arg_map[3*set_size+n+offset_b]*2, arg4+(n+offset_b)*4, arg5+(n+offset_b)*1 ); } } // host stub function void op_par_loop_adt_calc(char const *name, op_set set, op_arg arg0, op_arg arg1, op_arg arg2, op_arg arg3, op_arg arg4, op_arg arg5 ){ int nargs = 6; op_arg args[6]; args[0] = arg0; args[1] = arg1; args[2] = arg2; args[3] = arg3; args[4] = arg4; args[5] = arg5; int ninds = 1; int inds[6] = {0,0,0,0,-1,-1}; if (OP_diags>2) { printf(" kernel routine with indirection: adt_calc\n"); } // get plan #ifdef OP_PART_SIZE_1 int part_size = OP_PART_SIZE_1; #else int part_size = OP_part_size; #endif int set_size = op_mpi_halo_exchanges(set, nargs, args); // initialise timers double cpu_t1, cpu_t2, wall_t1=0, wall_t2=0; op_timing_realloc(1); OP_kernels[1].name = name; OP_kernels[1].count += 1; if (set->size >0) { op_plan *Plan = op_plan_get(name,set,part_size,nargs,args,ninds,inds); op_timers_core(&cpu_t1, &wall_t1); // execute plan int block_offset = 0; for (int col=0; col < Plan->ncolors; col++) { if (col==Plan->ncolors_core) op_mpi_wait_all(nargs,args); #ifdef OP_BLOCK_SIZE_1 int nthread = OP_BLOCK_SIZE_1; #else int nthread = OP_block_size; #endif dim3 nblocks = dim3(Plan->ncolblk[col] >= (1<<16) ? 65535 : Plan->ncolblk[col], Plan->ncolblk[col] >= (1<<16) ? (Plan->ncolblk[col]-1)/65535+1: 1, 1); if (Plan->ncolblk[col] > 0) { int nshared = Plan->nsharedCol[col]; hipLaunchKernelGGL(( op_cuda_adt_calc), dim3(nblocks),dim3(nthread),nshared, 0, (double *)arg0.data_d, Plan->ind_map, Plan->loc_map, (double *)arg4.data_d, (double *)arg5.data_d, Plan->ind_sizes, Plan->ind_offs, block_offset, Plan->blkmap, Plan->offset, Plan->nelems, Plan->nthrcol, Plan->thrcol, Plan->ncolblk[col], set_size); cutilSafeCall(hipDeviceSynchronize()); cutilCheckMsg("op_cuda_adt_calc execution failed\n"); } block_offset += Plan->ncolblk[col]; } op_timing_realloc(1); OP_kernels[1].transfer += Plan->transfer; OP_kernels[1].transfer2 += Plan->transfer2; } op_mpi_set_dirtybit(nargs, args); // update kernel record op_timers_core(&cpu_t2, &wall_t2); OP_kernels[1].time += wall_t2 - wall_t1; }
698712deafbdcc0a995d4a5b0a20c45a553bcba3.cu
// // auto-generated by op2.m on 19-Oct-2012 16:21:10 // // user function __device__ #include "adt_calc.h" // CUDA kernel function __global__ void op_cuda_adt_calc( double *ind_arg0, int *ind_map, short *arg_map, double *arg4, double *arg5, int *ind_arg_sizes, int *ind_arg_offs, int block_offset, int *blkmap, int *offset, int *nelems, int *ncolors, int *colors, int nblocks, int set_size) { __shared__ int *ind_arg0_map, ind_arg0_size; __shared__ double *ind_arg0_s; __shared__ int nelem, offset_b; extern __shared__ char shared[]; if (blockIdx.x+blockIdx.y*gridDim.x >= nblocks) return; if (threadIdx.x==0) { // get sizes and shift pointers and direct-mapped data int blockId = blkmap[blockIdx.x + blockIdx.y*gridDim.x + block_offset]; nelem = nelems[blockId]; offset_b = offset[blockId]; ind_arg0_size = ind_arg_sizes[0+blockId*1]; ind_arg0_map = &ind_map[0*set_size] + ind_arg_offs[0+blockId*1]; // set shared memory pointers int nbytes = 0; ind_arg0_s = (double *) &shared[nbytes]; } __syncthreads(); // make sure all of above completed // copy indirect datasets into shared memory or zero increment for (int n=threadIdx.x; n<ind_arg0_size*2; n+=blockDim.x) ind_arg0_s[n] = ind_arg0[n%2+ind_arg0_map[n/2]*2]; __syncthreads(); // process set elements for (int n=threadIdx.x; n<nelem; n+=blockDim.x) { // user-supplied kernel call adt_calc( ind_arg0_s+arg_map[0*set_size+n+offset_b]*2, ind_arg0_s+arg_map[1*set_size+n+offset_b]*2, ind_arg0_s+arg_map[2*set_size+n+offset_b]*2, ind_arg0_s+arg_map[3*set_size+n+offset_b]*2, arg4+(n+offset_b)*4, arg5+(n+offset_b)*1 ); } } // host stub function void op_par_loop_adt_calc(char const *name, op_set set, op_arg arg0, op_arg arg1, op_arg arg2, op_arg arg3, op_arg arg4, op_arg arg5 ){ int nargs = 6; op_arg args[6]; args[0] = arg0; args[1] = arg1; args[2] = arg2; args[3] = arg3; args[4] = arg4; args[5] = arg5; int ninds = 1; int inds[6] = {0,0,0,0,-1,-1}; if (OP_diags>2) { printf(" kernel routine with indirection: adt_calc\n"); } // get plan #ifdef OP_PART_SIZE_1 int part_size = OP_PART_SIZE_1; #else int part_size = OP_part_size; #endif int set_size = op_mpi_halo_exchanges(set, nargs, args); // initialise timers double cpu_t1, cpu_t2, wall_t1=0, wall_t2=0; op_timing_realloc(1); OP_kernels[1].name = name; OP_kernels[1].count += 1; if (set->size >0) { op_plan *Plan = op_plan_get(name,set,part_size,nargs,args,ninds,inds); op_timers_core(&cpu_t1, &wall_t1); // execute plan int block_offset = 0; for (int col=0; col < Plan->ncolors; col++) { if (col==Plan->ncolors_core) op_mpi_wait_all(nargs,args); #ifdef OP_BLOCK_SIZE_1 int nthread = OP_BLOCK_SIZE_1; #else int nthread = OP_block_size; #endif dim3 nblocks = dim3(Plan->ncolblk[col] >= (1<<16) ? 65535 : Plan->ncolblk[col], Plan->ncolblk[col] >= (1<<16) ? (Plan->ncolblk[col]-1)/65535+1: 1, 1); if (Plan->ncolblk[col] > 0) { int nshared = Plan->nsharedCol[col]; op_cuda_adt_calc<<<nblocks,nthread,nshared>>>( (double *)arg0.data_d, Plan->ind_map, Plan->loc_map, (double *)arg4.data_d, (double *)arg5.data_d, Plan->ind_sizes, Plan->ind_offs, block_offset, Plan->blkmap, Plan->offset, Plan->nelems, Plan->nthrcol, Plan->thrcol, Plan->ncolblk[col], set_size); cutilSafeCall(cudaDeviceSynchronize()); cutilCheckMsg("op_cuda_adt_calc execution failed\n"); } block_offset += Plan->ncolblk[col]; } op_timing_realloc(1); OP_kernels[1].transfer += Plan->transfer; OP_kernels[1].transfer2 += Plan->transfer2; } op_mpi_set_dirtybit(nargs, args); // update kernel record op_timers_core(&cpu_t2, &wall_t2); OP_kernels[1].time += wall_t2 - wall_t1; }
649769af7cf2fe8a54c701df61ab8c68835ea2a6.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include "paddle/phi/kernels/sparse/pool_grad_kernel.h" #include "paddle/phi/backends/gpu/gpu_context.h" #include "paddle/phi/backends/gpu/gpu_info.h" #include "paddle/phi/backends/gpu/gpu_launch_config.h" #include "paddle/phi/core/kernel_registry.h" #include "paddle/phi/core/tensor_utils.h" #include "paddle/phi/core/visit_type.h" #include "paddle/phi/kernels/empty_kernel.h" #include "paddle/phi/kernels/funcs/math_function.h" #include "paddle/phi/kernels/funcs/pooling.h" #include "paddle/phi/kernels/funcs/sparse/convolution.h" namespace phi { namespace sparse { template <typename T, typename IntT = int> __global__ void MaxPoolGradCudaKernel(const T* in_features_ptr, const T* out_features_ptr, const T* out_grad_ptr, const IntT* rulebook_ptr, const int n, const int rulebook_len, const int channels, T* x_grad_ptr) { phi::funcs::MaxPoolGrad<T> grad_functor; CUDA_KERNEL_LOOP_TYPE(i, n * channels, int64_t) { int real_i = i / channels; int c = i - real_i * channels; IntT in_i = rulebook_ptr[real_i]; IntT out_i = rulebook_ptr[real_i + rulebook_len]; grad_functor.compute(in_features_ptr[in_i * channels + c], out_features_ptr[out_i * channels + c], out_grad_ptr[out_i * channels + c], 1, &x_grad_ptr[in_i * channels + c]); } } template <typename T, typename IntT = int> void MaxPoolCooGradGPUKernel(const GPUContext& dev_ctx, const SparseCooTensor& x, const DenseTensor& rulebook, const DenseTensor& counter, const SparseCooTensor& out, const SparseCooTensor& out_grad, const std::vector<int>& kernel_sizes, SparseCooTensor* x_grad) { int kernel_size = kernel_sizes[0] * kernel_sizes[1] * kernel_sizes[2]; const int in_channels = x.dims()[4]; int rulebook_len = rulebook.dims()[1]; const IntT* rulebook_ptr = rulebook.data<IntT>(); std::vector<int> offsets(kernel_size + 1); const int* counter_ptr = counter.data<int>(); phi::funcs::sparse::PrefixSum(counter_ptr, &offsets[0], kernel_size); const T* in_features_ptr = x.non_zero_elements().data<T>(); const T* out_features_ptr = out.non_zero_elements().data<T>(); const T* out_grad_ptr = out_grad.non_zero_elements().data<T>(); // TODO(zhangkaihuo): call phi::sparse::EmptyLike DenseTensor x_grad_indices = phi::EmptyLike<IntT>(dev_ctx, x.non_zero_indices()); DenseTensor x_grad_values = phi::EmptyLike<T>(dev_ctx, x.non_zero_elements()); x_grad->SetMember(x_grad_indices, x_grad_values, x.dims(), true); T* x_grad_ptr = x_grad_values.data<T>(); phi::funcs::SetConstant<GPUContext, T> set_zero; set_zero(dev_ctx, &x_grad_values, static_cast<T>(0.0f)); phi::Copy<GPUContext>(dev_ctx, x.non_zero_indices(), dev_ctx.GetPlace(), false, &x_grad_indices); for (int i = 0; i < kernel_size; i++) { if (counter_ptr[i] <= 0) { continue; } auto config = phi::backends::gpu::GetGpuLaunchConfig1D( dev_ctx, counter_ptr[i] * in_channels, 1); hipLaunchKernelGGL(( MaxPoolGradCudaKernel<T, IntT>) , dim3(config.block_per_grid.x), dim3(config.thread_per_block.x), 0, dev_ctx.stream(), in_features_ptr, out_features_ptr, out_grad_ptr, rulebook_ptr + offsets[i], counter_ptr[i], rulebook_len, in_channels, x_grad_ptr); } } template <typename T, typename Context> void MaxPoolCooGradKernel(const Context& dev_ctx, const SparseCooTensor& x, const DenseTensor& rulebook, const DenseTensor& counter, const SparseCooTensor& out, const SparseCooTensor& out_grad, const std::vector<int>& kernel_sizes, SparseCooTensor* x_grad) { PD_VISIT_BASE_INTEGRAL_TYPES( x.non_zero_indices().dtype(), "MaxPoolCooGradGPUKernel", ([&] { MaxPoolCooGradGPUKernel<T, data_t>( dev_ctx, x, rulebook, counter, out, out_grad, kernel_sizes, x_grad); })); } } // namespace sparse } // namespace phi PD_REGISTER_KERNEL(maxpool_coo_grad, GPU, ALL_LAYOUT, phi::sparse::MaxPoolCooGradKernel, float, double) { kernel->InputAt(0).SetDataLayout(phi::DataLayout::SPARSE_COO); }
649769af7cf2fe8a54c701df61ab8c68835ea2a6.cu
/* Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include "paddle/phi/kernels/sparse/pool_grad_kernel.h" #include "paddle/phi/backends/gpu/gpu_context.h" #include "paddle/phi/backends/gpu/gpu_info.h" #include "paddle/phi/backends/gpu/gpu_launch_config.h" #include "paddle/phi/core/kernel_registry.h" #include "paddle/phi/core/tensor_utils.h" #include "paddle/phi/core/visit_type.h" #include "paddle/phi/kernels/empty_kernel.h" #include "paddle/phi/kernels/funcs/math_function.h" #include "paddle/phi/kernels/funcs/pooling.h" #include "paddle/phi/kernels/funcs/sparse/convolution.h" namespace phi { namespace sparse { template <typename T, typename IntT = int> __global__ void MaxPoolGradCudaKernel(const T* in_features_ptr, const T* out_features_ptr, const T* out_grad_ptr, const IntT* rulebook_ptr, const int n, const int rulebook_len, const int channels, T* x_grad_ptr) { phi::funcs::MaxPoolGrad<T> grad_functor; CUDA_KERNEL_LOOP_TYPE(i, n * channels, int64_t) { int real_i = i / channels; int c = i - real_i * channels; IntT in_i = rulebook_ptr[real_i]; IntT out_i = rulebook_ptr[real_i + rulebook_len]; grad_functor.compute(in_features_ptr[in_i * channels + c], out_features_ptr[out_i * channels + c], out_grad_ptr[out_i * channels + c], 1, &x_grad_ptr[in_i * channels + c]); } } template <typename T, typename IntT = int> void MaxPoolCooGradGPUKernel(const GPUContext& dev_ctx, const SparseCooTensor& x, const DenseTensor& rulebook, const DenseTensor& counter, const SparseCooTensor& out, const SparseCooTensor& out_grad, const std::vector<int>& kernel_sizes, SparseCooTensor* x_grad) { int kernel_size = kernel_sizes[0] * kernel_sizes[1] * kernel_sizes[2]; const int in_channels = x.dims()[4]; int rulebook_len = rulebook.dims()[1]; const IntT* rulebook_ptr = rulebook.data<IntT>(); std::vector<int> offsets(kernel_size + 1); const int* counter_ptr = counter.data<int>(); phi::funcs::sparse::PrefixSum(counter_ptr, &offsets[0], kernel_size); const T* in_features_ptr = x.non_zero_elements().data<T>(); const T* out_features_ptr = out.non_zero_elements().data<T>(); const T* out_grad_ptr = out_grad.non_zero_elements().data<T>(); // TODO(zhangkaihuo): call phi::sparse::EmptyLike DenseTensor x_grad_indices = phi::EmptyLike<IntT>(dev_ctx, x.non_zero_indices()); DenseTensor x_grad_values = phi::EmptyLike<T>(dev_ctx, x.non_zero_elements()); x_grad->SetMember(x_grad_indices, x_grad_values, x.dims(), true); T* x_grad_ptr = x_grad_values.data<T>(); phi::funcs::SetConstant<GPUContext, T> set_zero; set_zero(dev_ctx, &x_grad_values, static_cast<T>(0.0f)); phi::Copy<GPUContext>(dev_ctx, x.non_zero_indices(), dev_ctx.GetPlace(), false, &x_grad_indices); for (int i = 0; i < kernel_size; i++) { if (counter_ptr[i] <= 0) { continue; } auto config = phi::backends::gpu::GetGpuLaunchConfig1D( dev_ctx, counter_ptr[i] * in_channels, 1); MaxPoolGradCudaKernel<T, IntT> <<<config.block_per_grid.x, config.thread_per_block.x, 0, dev_ctx.stream()>>>(in_features_ptr, out_features_ptr, out_grad_ptr, rulebook_ptr + offsets[i], counter_ptr[i], rulebook_len, in_channels, x_grad_ptr); } } template <typename T, typename Context> void MaxPoolCooGradKernel(const Context& dev_ctx, const SparseCooTensor& x, const DenseTensor& rulebook, const DenseTensor& counter, const SparseCooTensor& out, const SparseCooTensor& out_grad, const std::vector<int>& kernel_sizes, SparseCooTensor* x_grad) { PD_VISIT_BASE_INTEGRAL_TYPES( x.non_zero_indices().dtype(), "MaxPoolCooGradGPUKernel", ([&] { MaxPoolCooGradGPUKernel<T, data_t>( dev_ctx, x, rulebook, counter, out, out_grad, kernel_sizes, x_grad); })); } } // namespace sparse } // namespace phi PD_REGISTER_KERNEL(maxpool_coo_grad, GPU, ALL_LAYOUT, phi::sparse::MaxPoolCooGradKernel, float, double) { kernel->InputAt(0).SetDataLayout(phi::DataLayout::SPARSE_COO); }
21130a71bf67a5557e6d5f7a4daedc81ca560fb5.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /*Copyright(c) 2020, The Regents of the University of California, Davis. */ /* */ /* */ /*Redistribution and use in source and binary forms, with or without modification, */ /*are permitted provided that the following conditions are met : */ /* */ /*1. Redistributions of source code must retain the above copyright notice, this */ /*list of conditions and the following disclaimer. */ /*2. Redistributions in binary form must reproduce the above copyright notice, */ /*this list of conditions and the following disclaimer in the documentation */ /*and / or other materials provided with the distribution. */ /* */ /*THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND */ /*ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED */ /*WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.*/ /*IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, */ /*INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES(INCLUDING, BUT */ /*NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR*/ /*PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, */ /*WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT(INCLUDING NEGLIGENCE OR OTHERWISE) */ /*ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE */ /*POSSIBILITY OF SUCH DAMAGE. */ /************************************************************************************/ /************************************************************************************/ #pragma once #include <cstdint> namespace GpuBTree { namespace kernels { template<typename KeyT, typename ValueT, typename SizeT, typename AllocatorT> __global__ void insert_keys(uint32_t* __restrict__ d_root, KeyT* __restrict__ d_keys, ValueT* __restrict__ d_values, SizeT num_keys, AllocatorT allocator) { uint32_t tid = threadIdx.x + blockIdx.x * blockDim.x; uint32_t laneId = lane_id(); KeyT myKey; ValueT myValue; bool to_insert = false; if ((tid - laneId) >= num_keys) return; if (tid < num_keys) { myKey = d_keys[tid] + 2; myValue = d_values[tid] + 2; to_insert = true; } warps::insertion_unit(to_insert, myKey, myValue, d_root, &allocator); } template<typename AllocatorT> __global__ void init_btree(uint32_t* d_root, AllocatorT allocator) { uint32_t laneId = lane_id(); uint32_t root_id; if (laneId == 0) root_id = allocator.allocate(); root_id = __shfl_sync(WARP_MASK, root_id, 0); *d_root = root_id; uint32_t* tree_root = allocator.getAddressPtr(root_id); if (laneId < 2) tree_root[laneId] = 1 - laneId; } template<typename KeyT, typename ValueT, typename SizeT, typename AllocatorT> __global__ void search_b_tree(uint32_t* __restrict__ d_root, KeyT* __restrict__ d_queries, ValueT* __restrict__ d_results, SizeT num_queries, AllocatorT allocator) { uint32_t tid = threadIdx.x + blockIdx.x * blockDim.x; uint32_t laneId = lane_id(); if ((tid - laneId) >= num_queries) return; uint32_t myQuery = 0; uint32_t myResult = SEARCH_NOT_FOUND; bool to_search = false; if (tid < num_queries) { myQuery = d_queries[tid] + 2; to_search = true; } warps::search_unit(to_search, laneId, myQuery, myResult, d_root, &allocator); if (tid < num_queries) myResult = myResult ? myResult - 2 : myResult; d_results[tid] = myResult; } template<typename KeyT, typename SizeT, typename AllocatorT> __global__ void delete_b_tree(uint32_t* __restrict__ d_root, KeyT* __restrict__ d_queries, SizeT num_queries, AllocatorT allocator) { uint32_t tid = threadIdx.x + blockIdx.x * blockDim.x; uint32_t laneId = lane_id(); if ((tid - laneId) >= num_queries) return; KeyT myQuery = 0xFFFFFFFF; if (tid < uint32_t(num_queries)) { myQuery = d_queries[tid] + 2; } warps::delete_unit_bulk(laneId, myQuery, d_root, &allocator); } template<typename KeyT, typename ValueT, typename SizeT, typename AllocatorT> __global__ void range_b_tree(uint32_t* __restrict__ d_root, KeyT* __restrict__ d_queries_lower, KeyT* __restrict__ d_queries_upper, ValueT* __restrict__ d_range_results, SizeT num_queries, SizeT range_length, AllocatorT allocator) { uint32_t tid = threadIdx.x + blockIdx.x * blockDim.x; uint32_t laneId = lane_id(); if ((tid - laneId) >= num_queries) return; uint32_t lower_bound = 0; uint32_t upper_bound = 0; bool to_search = false; if (tid < num_queries) { lower_bound = d_queries_lower[tid] + 2; upper_bound = d_queries_upper[tid] + 2; to_search = true; } warps::range_unit(laneId, to_search, lower_bound, upper_bound, d_range_results, d_root, range_length, &allocator); } }; // namespace kernels }; // namespace GpuBTree
21130a71bf67a5557e6d5f7a4daedc81ca560fb5.cu
/*Copyright(c) 2020, The Regents of the University of California, Davis. */ /* */ /* */ /*Redistribution and use in source and binary forms, with or without modification, */ /*are permitted provided that the following conditions are met : */ /* */ /*1. Redistributions of source code must retain the above copyright notice, this */ /*list of conditions and the following disclaimer. */ /*2. Redistributions in binary form must reproduce the above copyright notice, */ /*this list of conditions and the following disclaimer in the documentation */ /*and / or other materials provided with the distribution. */ /* */ /*THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND */ /*ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED */ /*WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.*/ /*IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, */ /*INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES(INCLUDING, BUT */ /*NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR*/ /*PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, */ /*WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT(INCLUDING NEGLIGENCE OR OTHERWISE) */ /*ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE */ /*POSSIBILITY OF SUCH DAMAGE. */ /************************************************************************************/ /************************************************************************************/ #pragma once #include <cstdint> namespace GpuBTree { namespace kernels { template<typename KeyT, typename ValueT, typename SizeT, typename AllocatorT> __global__ void insert_keys(uint32_t* __restrict__ d_root, KeyT* __restrict__ d_keys, ValueT* __restrict__ d_values, SizeT num_keys, AllocatorT allocator) { uint32_t tid = threadIdx.x + blockIdx.x * blockDim.x; uint32_t laneId = lane_id(); KeyT myKey; ValueT myValue; bool to_insert = false; if ((tid - laneId) >= num_keys) return; if (tid < num_keys) { myKey = d_keys[tid] + 2; myValue = d_values[tid] + 2; to_insert = true; } warps::insertion_unit(to_insert, myKey, myValue, d_root, &allocator); } template<typename AllocatorT> __global__ void init_btree(uint32_t* d_root, AllocatorT allocator) { uint32_t laneId = lane_id(); uint32_t root_id; if (laneId == 0) root_id = allocator.allocate(); root_id = __shfl_sync(WARP_MASK, root_id, 0); *d_root = root_id; uint32_t* tree_root = allocator.getAddressPtr(root_id); if (laneId < 2) tree_root[laneId] = 1 - laneId; } template<typename KeyT, typename ValueT, typename SizeT, typename AllocatorT> __global__ void search_b_tree(uint32_t* __restrict__ d_root, KeyT* __restrict__ d_queries, ValueT* __restrict__ d_results, SizeT num_queries, AllocatorT allocator) { uint32_t tid = threadIdx.x + blockIdx.x * blockDim.x; uint32_t laneId = lane_id(); if ((tid - laneId) >= num_queries) return; uint32_t myQuery = 0; uint32_t myResult = SEARCH_NOT_FOUND; bool to_search = false; if (tid < num_queries) { myQuery = d_queries[tid] + 2; to_search = true; } warps::search_unit(to_search, laneId, myQuery, myResult, d_root, &allocator); if (tid < num_queries) myResult = myResult ? myResult - 2 : myResult; d_results[tid] = myResult; } template<typename KeyT, typename SizeT, typename AllocatorT> __global__ void delete_b_tree(uint32_t* __restrict__ d_root, KeyT* __restrict__ d_queries, SizeT num_queries, AllocatorT allocator) { uint32_t tid = threadIdx.x + blockIdx.x * blockDim.x; uint32_t laneId = lane_id(); if ((tid - laneId) >= num_queries) return; KeyT myQuery = 0xFFFFFFFF; if (tid < uint32_t(num_queries)) { myQuery = d_queries[tid] + 2; } warps::delete_unit_bulk(laneId, myQuery, d_root, &allocator); } template<typename KeyT, typename ValueT, typename SizeT, typename AllocatorT> __global__ void range_b_tree(uint32_t* __restrict__ d_root, KeyT* __restrict__ d_queries_lower, KeyT* __restrict__ d_queries_upper, ValueT* __restrict__ d_range_results, SizeT num_queries, SizeT range_length, AllocatorT allocator) { uint32_t tid = threadIdx.x + blockIdx.x * blockDim.x; uint32_t laneId = lane_id(); if ((tid - laneId) >= num_queries) return; uint32_t lower_bound = 0; uint32_t upper_bound = 0; bool to_search = false; if (tid < num_queries) { lower_bound = d_queries_lower[tid] + 2; upper_bound = d_queries_upper[tid] + 2; to_search = true; } warps::range_unit(laneId, to_search, lower_bound, upper_bound, d_range_results, d_root, range_length, &allocator); } }; // namespace kernels }; // namespace GpuBTree
1640be37ad618931f3273f1393dc68f166af8f8d.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <vector> #include <cutf/hiprand.hpp> #include <cutf/cublas.hpp> #include <cutf/memory.hpp> #include <rand_projection_base.hpp> #include <hiprand/hiprand_kernel.h> #include "cuda_common.hpp" namespace { __global__ void rand_kernel( float* const dst_ptr, const std::size_t array_size, const float* const candidates_ptr, const float* const candidates_prob_ptr, const std::size_t candidates_size, const std::uint64_t seed ) { const auto tid = threadIdx.x + blockDim.x * blockIdx.x; if (tid >= array_size) { return; } hiprandState_t curand_state; hiprand_init(seed, tid, 0, &curand_state); for (unsigned i = tid; i < array_size; i += gridDim.x * blockDim.x) { const auto r = hiprand_uniform(&curand_state); std::size_t j = 0; for (; j < candidates_size - 1; j++) { if (r < candidates_prob_ptr[j]) { break; } } __syncwarp(); dst_ptr[i] = candidates_ptr[j]; } } } // unnamed namespace void mtk::rsvd_test::random_projection_discrete::gen_rand(const std::uint64_t seed) { std::vector<float> acc_rands; acc_rands.push_back(random_candidate_probs[0]); for (unsigned i = 1; i < random_candidate_probs.size(); i++) { const auto v = acc_rands[acc_rands.size() - 1] + random_candidate_probs[i]; acc_rands.push_back(v); } for (auto &p : acc_rands) { p /= acc_rands[acc_rands.size() - 1]; } auto dev_rand_candidates = cutf::memory::malloc_async<float>(acc_rands.size(), cuda_stream); auto dev_rand_candidate_probs = cutf::memory::malloc_async<float>(acc_rands.size(), cuda_stream); cutf::memory::copy_async(dev_rand_candidates , random_candidates.data(), acc_rands.size(), cuda_stream); cutf::memory::copy_async(dev_rand_candidate_probs, acc_rands.data() , acc_rands.size(), cuda_stream); hipLaunchKernelGGL(( rand_kernel), dim3(256), dim3(256), 0, cuda_stream, rand_matrix_ptr, get_max_src_n() * get_max_target_rank(), dev_rand_candidates, dev_rand_candidate_probs, acc_rands.size(), seed ); cutf::memory::free_async(dev_rand_candidate_probs, cuda_stream); cutf::memory::free_async(dev_rand_candidates, cuda_stream); } void mtk::rsvd_test::random_projection_discrete::apply( const std::size_t m, const std::size_t n, const std::size_t r, float* const dst_ptr, const std::size_t ldd, float* const src_ptr, const std::size_t lds ) { const float alpha = 1.0f, beta = 0.0f; CUTF_CHECK_ERROR(cutf::cublas::gemm( cublas_handle, HIPBLAS_OP_N, HIPBLAS_OP_T, m, r, n, &alpha, src_ptr, lds, rand_matrix_ptr, r, &beta, dst_ptr, ldd )); } void mtk::rsvd_test::random_projection_discrete::allocate_working_memory() { rand_matrix_ptr = cutf::memory::malloc_async<float>(get_max_src_n() * get_max_target_rank(), cuda_stream); } void mtk::rsvd_test::random_projection_discrete::free_working_memory() { cutf::memory::free_async(rand_matrix_ptr, cuda_stream); }
1640be37ad618931f3273f1393dc68f166af8f8d.cu
#include <vector> #include <cutf/curand.hpp> #include <cutf/cublas.hpp> #include <cutf/memory.hpp> #include <rand_projection_base.hpp> #include <curand_kernel.h> #include "cuda_common.hpp" namespace { __global__ void rand_kernel( float* const dst_ptr, const std::size_t array_size, const float* const candidates_ptr, const float* const candidates_prob_ptr, const std::size_t candidates_size, const std::uint64_t seed ) { const auto tid = threadIdx.x + blockDim.x * blockIdx.x; if (tid >= array_size) { return; } curandState curand_state; curand_init(seed, tid, 0, &curand_state); for (unsigned i = tid; i < array_size; i += gridDim.x * blockDim.x) { const auto r = curand_uniform(&curand_state); std::size_t j = 0; for (; j < candidates_size - 1; j++) { if (r < candidates_prob_ptr[j]) { break; } } __syncwarp(); dst_ptr[i] = candidates_ptr[j]; } } } // unnamed namespace void mtk::rsvd_test::random_projection_discrete::gen_rand(const std::uint64_t seed) { std::vector<float> acc_rands; acc_rands.push_back(random_candidate_probs[0]); for (unsigned i = 1; i < random_candidate_probs.size(); i++) { const auto v = acc_rands[acc_rands.size() - 1] + random_candidate_probs[i]; acc_rands.push_back(v); } for (auto &p : acc_rands) { p /= acc_rands[acc_rands.size() - 1]; } auto dev_rand_candidates = cutf::memory::malloc_async<float>(acc_rands.size(), cuda_stream); auto dev_rand_candidate_probs = cutf::memory::malloc_async<float>(acc_rands.size(), cuda_stream); cutf::memory::copy_async(dev_rand_candidates , random_candidates.data(), acc_rands.size(), cuda_stream); cutf::memory::copy_async(dev_rand_candidate_probs, acc_rands.data() , acc_rands.size(), cuda_stream); rand_kernel<<<256, 256, 0, cuda_stream>>>( rand_matrix_ptr, get_max_src_n() * get_max_target_rank(), dev_rand_candidates, dev_rand_candidate_probs, acc_rands.size(), seed ); cutf::memory::free_async(dev_rand_candidate_probs, cuda_stream); cutf::memory::free_async(dev_rand_candidates, cuda_stream); } void mtk::rsvd_test::random_projection_discrete::apply( const std::size_t m, const std::size_t n, const std::size_t r, float* const dst_ptr, const std::size_t ldd, float* const src_ptr, const std::size_t lds ) { const float alpha = 1.0f, beta = 0.0f; CUTF_CHECK_ERROR(cutf::cublas::gemm( cublas_handle, CUBLAS_OP_N, CUBLAS_OP_T, m, r, n, &alpha, src_ptr, lds, rand_matrix_ptr, r, &beta, dst_ptr, ldd )); } void mtk::rsvd_test::random_projection_discrete::allocate_working_memory() { rand_matrix_ptr = cutf::memory::malloc_async<float>(get_max_src_n() * get_max_target_rank(), cuda_stream); } void mtk::rsvd_test::random_projection_discrete::free_working_memory() { cutf::memory::free_async(rand_matrix_ptr, cuda_stream); }
d0f0be53feb93dbcf5133e3e40a1eba0ee7806d5.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "ProcessModules.cuh" #include "TrackSeeding.cuh" #include "TrackForwarding.cuh" /** * @brief Processes modules in decreasing order with some stride */ __device__ void process_modules( Velo::Module* module_data, float* shared_best_fits, const uint starting_module, const uint stride, bool* hit_used, const short* h0_candidates, const short* h2_candidates, const uint number_of_modules, const uint* module_hitStarts, const uint* module_hitNums, const float* hit_Xs, const float* hit_Ys, const float* hit_Zs, const float* hit_Phis, uint* weaktracks_insert_pointer, uint* tracklets_insert_pointer, uint* ttf_insert_pointer, uint* tracks_insert_pointer, uint* tracks_to_follow, Velo::TrackletHits* weak_tracks, Velo::TrackletHits* tracklets, Velo::TrackHits* tracks, const uint number_of_hits, unsigned short* h1_rel_indices, uint* local_number_of_hits, const uint hit_offset, const float* dev_velo_module_zs ) { auto first_module = starting_module; // Prepare the first seeding iteration // Load shared module information if (threadIdx.x < 6) { const auto module_number = first_module - threadIdx.x; module_data[threadIdx.x].hitStart = module_hitStarts[module_number] - hit_offset; module_data[threadIdx.x].hitNums = module_hitNums[module_number]; module_data[threadIdx.x].z = dev_velo_module_zs[module_number]; } // Due to shared module data loading __syncthreads(); // Do first track seeding track_seeding( shared_best_fits, hit_Xs, hit_Ys, hit_Zs, module_data, h0_candidates, h2_candidates, hit_used, tracklets_insert_pointer, ttf_insert_pointer, tracklets, tracks_to_follow, h1_rel_indices, local_number_of_hits ); // Prepare forwarding - seeding loop uint last_ttf = 0; first_module -= stride; while (first_module >= 4) { // Due to WAR between trackSeedingFirst and the code below __syncthreads(); // Iterate in modules // Load in shared if (threadIdx.x < 6) { const auto module_number = first_module - threadIdx.x; module_data[threadIdx.x].hitStart = module_hitStarts[module_number] - hit_offset; module_data[threadIdx.x].hitNums = module_hitNums[module_number]; module_data[threadIdx.x].z = dev_velo_module_zs[module_number]; } const auto prev_ttf = last_ttf; last_ttf = ttf_insert_pointer[0]; const auto diff_ttf = last_ttf - prev_ttf; // Reset atomics local_number_of_hits[0] = 0; // Due to module data loading __syncthreads(); // Track Forwarding track_forwarding( hit_Xs, hit_Ys, hit_Zs, hit_Phis, hit_used, tracks_insert_pointer, ttf_insert_pointer, weaktracks_insert_pointer, module_data, diff_ttf, tracks_to_follow, weak_tracks, prev_ttf, tracklets, tracks, number_of_hits ); // Due to ttf_insert_pointer __syncthreads(); // Seeding track_seeding( shared_best_fits, hit_Xs, hit_Ys, hit_Zs, module_data, h0_candidates, h2_candidates, hit_used, tracklets_insert_pointer, ttf_insert_pointer, tracklets, tracks_to_follow, h1_rel_indices, local_number_of_hits ); first_module -= stride; } // Due to last seeding ttf_insert_pointer __syncthreads(); const auto prev_ttf = last_ttf; last_ttf = ttf_insert_pointer[0]; const auto diff_ttf = last_ttf - prev_ttf; // Process the last bunch of track_to_follows for (int i=0; i<(diff_ttf + blockDim.x - 1) / blockDim.x; ++i) { const auto ttf_element = blockDim.x * i + threadIdx.x; if (ttf_element < diff_ttf) { const int fulltrackno = tracks_to_follow[(prev_ttf + ttf_element) % VeloTracking::ttf_modulo]; const bool track_flag = (fulltrackno & 0x80000000) == 0x80000000; const int trackno = fulltrackno & 0x0FFFFFFF; // Here we are only interested in three-hit tracks, // to mark them as "doubtful" if (track_flag) { const auto weakP = atomicAdd(weaktracks_insert_pointer, 1); assert(weakP < number_of_hits); weak_tracks[weakP] = tracklets[trackno]; } } } }
d0f0be53feb93dbcf5133e3e40a1eba0ee7806d5.cu
#include "ProcessModules.cuh" #include "TrackSeeding.cuh" #include "TrackForwarding.cuh" /** * @brief Processes modules in decreasing order with some stride */ __device__ void process_modules( Velo::Module* module_data, float* shared_best_fits, const uint starting_module, const uint stride, bool* hit_used, const short* h0_candidates, const short* h2_candidates, const uint number_of_modules, const uint* module_hitStarts, const uint* module_hitNums, const float* hit_Xs, const float* hit_Ys, const float* hit_Zs, const float* hit_Phis, uint* weaktracks_insert_pointer, uint* tracklets_insert_pointer, uint* ttf_insert_pointer, uint* tracks_insert_pointer, uint* tracks_to_follow, Velo::TrackletHits* weak_tracks, Velo::TrackletHits* tracklets, Velo::TrackHits* tracks, const uint number_of_hits, unsigned short* h1_rel_indices, uint* local_number_of_hits, const uint hit_offset, const float* dev_velo_module_zs ) { auto first_module = starting_module; // Prepare the first seeding iteration // Load shared module information if (threadIdx.x < 6) { const auto module_number = first_module - threadIdx.x; module_data[threadIdx.x].hitStart = module_hitStarts[module_number] - hit_offset; module_data[threadIdx.x].hitNums = module_hitNums[module_number]; module_data[threadIdx.x].z = dev_velo_module_zs[module_number]; } // Due to shared module data loading __syncthreads(); // Do first track seeding track_seeding( shared_best_fits, hit_Xs, hit_Ys, hit_Zs, module_data, h0_candidates, h2_candidates, hit_used, tracklets_insert_pointer, ttf_insert_pointer, tracklets, tracks_to_follow, h1_rel_indices, local_number_of_hits ); // Prepare forwarding - seeding loop uint last_ttf = 0; first_module -= stride; while (first_module >= 4) { // Due to WAR between trackSeedingFirst and the code below __syncthreads(); // Iterate in modules // Load in shared if (threadIdx.x < 6) { const auto module_number = first_module - threadIdx.x; module_data[threadIdx.x].hitStart = module_hitStarts[module_number] - hit_offset; module_data[threadIdx.x].hitNums = module_hitNums[module_number]; module_data[threadIdx.x].z = dev_velo_module_zs[module_number]; } const auto prev_ttf = last_ttf; last_ttf = ttf_insert_pointer[0]; const auto diff_ttf = last_ttf - prev_ttf; // Reset atomics local_number_of_hits[0] = 0; // Due to module data loading __syncthreads(); // Track Forwarding track_forwarding( hit_Xs, hit_Ys, hit_Zs, hit_Phis, hit_used, tracks_insert_pointer, ttf_insert_pointer, weaktracks_insert_pointer, module_data, diff_ttf, tracks_to_follow, weak_tracks, prev_ttf, tracklets, tracks, number_of_hits ); // Due to ttf_insert_pointer __syncthreads(); // Seeding track_seeding( shared_best_fits, hit_Xs, hit_Ys, hit_Zs, module_data, h0_candidates, h2_candidates, hit_used, tracklets_insert_pointer, ttf_insert_pointer, tracklets, tracks_to_follow, h1_rel_indices, local_number_of_hits ); first_module -= stride; } // Due to last seeding ttf_insert_pointer __syncthreads(); const auto prev_ttf = last_ttf; last_ttf = ttf_insert_pointer[0]; const auto diff_ttf = last_ttf - prev_ttf; // Process the last bunch of track_to_follows for (int i=0; i<(diff_ttf + blockDim.x - 1) / blockDim.x; ++i) { const auto ttf_element = blockDim.x * i + threadIdx.x; if (ttf_element < diff_ttf) { const int fulltrackno = tracks_to_follow[(prev_ttf + ttf_element) % VeloTracking::ttf_modulo]; const bool track_flag = (fulltrackno & 0x80000000) == 0x80000000; const int trackno = fulltrackno & 0x0FFFFFFF; // Here we are only interested in three-hit tracks, // to mark them as "doubtful" if (track_flag) { const auto weakP = atomicAdd(weaktracks_insert_pointer, 1); assert(weakP < number_of_hits); weak_tracks[weakP] = tracklets[trackno]; } } } }
63795968fd676b6a4366e6e6b2ecfcb549ac8dd0.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <string.h> #include <stdint.h> #include <stdlib.h> #include <time.h> #include <hip/hip_runtime.h> #include <sys/stat.h> #include <math.h> #define CTR 1 #include "aes.h" #define THREADS_PER_BLOCK 1024 void test_xcrypt_ctr(const char* xcrypt) { uint8_t key[16] = { 0x2b, 0x7e, 0x15, 0x16, 0x28, 0xae, 0xd2, 0xa6, 0xab, 0xf7, 0x15, 0x88, 0x09, 0xcf, 0x4f, 0x3c }; uint8_t iv[16] = { 0xf0, 0xf1, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7, 0xf8, 0xf9, 0xfa, 0xfb, 0xfc, 0xfd, 0xfe, 0xff }; struct AES_ctx ctx; // Encryption part if (strcmp(xcrypt, "encrypt")==0) { AES_init_ctx_iv(&ctx, key, iv); printf("Encryption kernel launching..\n"); static const char filenamer[] = "plaintext.txt"; static const char filenamee[] = "encrypted.txt"; FILE *fr = fopen(filenamer, "rb"); FILE *fe = fopen(filenamee, "wb"); if (fr == NULL || fe == NULL) exit(EXIT_FAILURE); size_t len = 0, BufContSz; ssize_t read; int i; uint8_t *d_file; struct AES_ctx *d_ctx; // Loading key and IV to GPU hipMalloc(&d_ctx, sizeof(AES_ctx)); hipMemcpy(d_ctx, &ctx, sizeof(AES_ctx), hipMemcpyHostToDevice); struct stat buffer; int status = stat(filenamer, &buffer); if (status != 0) printf("File size reading error"); size_t file_size = buffer.st_size; uint8_t *file_data = (uint8_t *)malloc(file_size); uint8_t *file_dat = (uint8_t *)malloc(file_size); // Loading file data to GPU global memory hipMalloc(&d_file, file_size); fread(file_data, 1, file_size, fr); // calculating number of blocks and number of threads int numThreads = (file_size+63)/64; int numBlocks = numThreads/1024; int sqrtBlocks = sqrt(numBlocks)+1; hipMemcpy(d_file, file_data, file_size, hipMemcpyHostToDevice); dim3 dimBlock(32, 32, 1); dim3 dimGrid(sqrtBlocks, sqrtBlocks, 1); hipLaunchKernelGGL(( AES_CTR_xcrypt_buffer), dim3(dimGrid), dim3(dimBlock), 0, 0, d_ctx, d_file, 64, file_size); hipDeviceSynchronize(); // copying back encrypted data to the CPU hipMemcpy(file_dat, d_file, file_size, hipMemcpyDeviceToHost); // writing it to encrypted.txt fwrite(file_dat, 1, file_size, fe); fclose(fr); fclose(fe); hipFree(d_file); hipFree(d_ctx); free(file_data); free(file_dat); } // Decryption part - similar to encryption else { AES_init_ctx_iv(&ctx, key, iv); printf("Decryption kernel launching..\n"); static const char filenamed[] = "decrypted.txt"; static const char filenamee[] = "encrypted.txt"; FILE *fe = fopen(filenamee, "rb"); FILE *fd = fopen(filenamed, "wb"); if (fe == NULL || fd == NULL) exit(EXIT_FAILURE); size_t len = 0, BufContSz; ssize_t read; int i; uint8_t *de_file; struct AES_ctx *de_ctx; hipMalloc(&de_ctx, sizeof(AES_ctx)); hipMemcpy(de_ctx, &ctx, sizeof(AES_ctx), hipMemcpyHostToDevice); struct stat buffer; int status = stat(filenamee, &buffer); if (status != 0) printf("File size reading error"); size_t file_size = buffer.st_size; uint8_t *file_data = (uint8_t *)malloc(file_size); uint8_t *file_dat = (uint8_t *)malloc(file_size); hipMalloc(&de_file, file_size); fread(file_data, 1, file_size, fe); int numThreads = (file_size+63)/64; int numBlocks = numThreads/1024; int sqrtBlocks = sqrt(numBlocks)+1; hipMemcpy(de_file, file_data, file_size, hipMemcpyHostToDevice); dim3 dimBlock(32, 32, 1); dim3 dimGrid(sqrtBlocks, sqrtBlocks, 1); hipLaunchKernelGGL(( AES_CTR_xcrypt_buffer), dim3(dimGrid), dim3(dimBlock), 0, 0, de_ctx, de_file, 64, file_size); hipDeviceSynchronize(); hipMemcpy(file_dat, de_file, file_size, hipMemcpyDeviceToHost); fwrite(file_dat, 1, file_size, fd); fclose(fe); fclose(fd); hipFree(de_file); hipFree(de_ctx); free(file_data); free(file_dat); } } int main(void) { clock_t start, end; double cpu_time_used; start = clock(); test_xcrypt_ctr("encrypt"); end = clock(); cpu_time_used = ((double)(end - start))/CLOCKS_PER_SEC; printf("Encryption time: %fs\n", cpu_time_used); start = clock(); test_xcrypt_ctr("decrypt"); end = clock(); cpu_time_used = ((double)(end - start))/CLOCKS_PER_SEC; printf("Decryption time: %fs\n", cpu_time_used); return 0; }
63795968fd676b6a4366e6e6b2ecfcb549ac8dd0.cu
#include <stdio.h> #include <string.h> #include <stdint.h> #include <stdlib.h> #include <time.h> #include <cuda.h> #include <sys/stat.h> #include <math.h> #define CTR 1 #include "aes.h" #define THREADS_PER_BLOCK 1024 void test_xcrypt_ctr(const char* xcrypt) { uint8_t key[16] = { 0x2b, 0x7e, 0x15, 0x16, 0x28, 0xae, 0xd2, 0xa6, 0xab, 0xf7, 0x15, 0x88, 0x09, 0xcf, 0x4f, 0x3c }; uint8_t iv[16] = { 0xf0, 0xf1, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7, 0xf8, 0xf9, 0xfa, 0xfb, 0xfc, 0xfd, 0xfe, 0xff }; struct AES_ctx ctx; // Encryption part if (strcmp(xcrypt, "encrypt")==0) { AES_init_ctx_iv(&ctx, key, iv); printf("Encryption kernel launching..\n"); static const char filenamer[] = "plaintext.txt"; static const char filenamee[] = "encrypted.txt"; FILE *fr = fopen(filenamer, "rb"); FILE *fe = fopen(filenamee, "wb"); if (fr == NULL || fe == NULL) exit(EXIT_FAILURE); size_t len = 0, BufContSz; ssize_t read; int i; uint8_t *d_file; struct AES_ctx *d_ctx; // Loading key and IV to GPU cudaMalloc(&d_ctx, sizeof(AES_ctx)); cudaMemcpy(d_ctx, &ctx, sizeof(AES_ctx), cudaMemcpyHostToDevice); struct stat buffer; int status = stat(filenamer, &buffer); if (status != 0) printf("File size reading error"); size_t file_size = buffer.st_size; uint8_t *file_data = (uint8_t *)malloc(file_size); uint8_t *file_dat = (uint8_t *)malloc(file_size); // Loading file data to GPU global memory cudaMalloc(&d_file, file_size); fread(file_data, 1, file_size, fr); // calculating number of blocks and number of threads int numThreads = (file_size+63)/64; int numBlocks = numThreads/1024; int sqrtBlocks = sqrt(numBlocks)+1; cudaMemcpy(d_file, file_data, file_size, cudaMemcpyHostToDevice); dim3 dimBlock(32, 32, 1); dim3 dimGrid(sqrtBlocks, sqrtBlocks, 1); AES_CTR_xcrypt_buffer<<<dimGrid, dimBlock>>>(d_ctx, d_file, 64, file_size); cudaDeviceSynchronize(); // copying back encrypted data to the CPU cudaMemcpy(file_dat, d_file, file_size, cudaMemcpyDeviceToHost); // writing it to encrypted.txt fwrite(file_dat, 1, file_size, fe); fclose(fr); fclose(fe); cudaFree(d_file); cudaFree(d_ctx); free(file_data); free(file_dat); } // Decryption part - similar to encryption else { AES_init_ctx_iv(&ctx, key, iv); printf("Decryption kernel launching..\n"); static const char filenamed[] = "decrypted.txt"; static const char filenamee[] = "encrypted.txt"; FILE *fe = fopen(filenamee, "rb"); FILE *fd = fopen(filenamed, "wb"); if (fe == NULL || fd == NULL) exit(EXIT_FAILURE); size_t len = 0, BufContSz; ssize_t read; int i; uint8_t *de_file; struct AES_ctx *de_ctx; cudaMalloc(&de_ctx, sizeof(AES_ctx)); cudaMemcpy(de_ctx, &ctx, sizeof(AES_ctx), cudaMemcpyHostToDevice); struct stat buffer; int status = stat(filenamee, &buffer); if (status != 0) printf("File size reading error"); size_t file_size = buffer.st_size; uint8_t *file_data = (uint8_t *)malloc(file_size); uint8_t *file_dat = (uint8_t *)malloc(file_size); cudaMalloc(&de_file, file_size); fread(file_data, 1, file_size, fe); int numThreads = (file_size+63)/64; int numBlocks = numThreads/1024; int sqrtBlocks = sqrt(numBlocks)+1; cudaMemcpy(de_file, file_data, file_size, cudaMemcpyHostToDevice); dim3 dimBlock(32, 32, 1); dim3 dimGrid(sqrtBlocks, sqrtBlocks, 1); AES_CTR_xcrypt_buffer<<<dimGrid, dimBlock>>>(de_ctx, de_file, 64, file_size); cudaDeviceSynchronize(); cudaMemcpy(file_dat, de_file, file_size, cudaMemcpyDeviceToHost); fwrite(file_dat, 1, file_size, fd); fclose(fe); fclose(fd); cudaFree(de_file); cudaFree(de_ctx); free(file_data); free(file_dat); } } int main(void) { clock_t start, end; double cpu_time_used; start = clock(); test_xcrypt_ctr("encrypt"); end = clock(); cpu_time_used = ((double)(end - start))/CLOCKS_PER_SEC; printf("Encryption time: %fs\n", cpu_time_used); start = clock(); test_xcrypt_ctr("decrypt"); end = clock(); cpu_time_used = ((double)(end - start))/CLOCKS_PER_SEC; printf("Decryption time: %fs\n", cpu_time_used); return 0; }
ffe70b43e932974c151e5abb7e4a4582060dc703.hip
// !!! This is a file automatically generated by hipify!!! #include "compute.h" #include "math.h" #include "hip/hip_runtime.h" #include <thrust/extrema.h> #include <thrust/device_ptr.h> #define MIN_INTENSITY 1e-10f #define MAX_INTENSITY 1e10f #define THREAD_COUNT 1024 inline void on_error(hipError_t errcode, const char *file, int line) { if (errcode != hipSuccess) { fprintf(stderr, "CUDA error: %s (%s:%d)\n", hipGetErrorString(errcode), file, line); exit(EXIT_FAILURE); } } #define checkCudaErrors(ret) on_error((ret), __FILE__, __LINE__) #define getLastCudaError() on_error(hipGetLastError(), __FILE__, __LINE__) extern "C" void run_kernel( const point_charge_t *charges, const int charge_count, const bounds_t *bounds, uint32_t *result); __global__ void calculate_intensity( const point_charge_t *charges, const bounds_t *bounds, float *result) { const float k = 8.99e-9f; // Coulomb's constant point_charge_t charge = charges[threadIdx.x]; float x_scaled = bounds->x_min + blockIdx.x * bounds->x_scale / (double)gridDim.x; float y_scaled = bounds->y_min + blockIdx.y * bounds->y_scale / (double)gridDim.y; float dx = charge.x - x_scaled; float dy = charge.y - y_scaled; float r = sqrt(dx * dx + dy * dy); float intensity = k * charge.charge / r; unsigned long offset = blockDim.x * (gridDim.x * blockIdx.y + blockIdx.x); result[2 * offset + threadIdx.x] = intensity * dx / r; result[2 * offset + blockDim.x + threadIdx.x] = intensity * dy / r; } __global__ void calculate_intensity_slow( const point_charge_t *charges, const bounds_t *bounds, const unsigned charge_count, float *result, int n) { const float k = 8.99e-9f; // Coulomb's constant unsigned int pixel_idx = blockIdx.x * blockDim.x + threadIdx.x; if (pixel_idx >= n) return; unsigned int x = pixel_idx % bounds->width; unsigned int y = pixel_idx / bounds->width; result[2 * pixel_idx] = 0; result[2 * pixel_idx + 1] = 0; for (unsigned i = 0; i < charge_count; ++i) { point_charge_t charge = charges[i]; float x_scaled = bounds->x_min + x * bounds->x_scale / (double)bounds->width; float y_scaled = bounds->y_min + y * bounds->y_scale / (double)bounds->height; float dx = charge.x - x_scaled; float dy = charge.y - y_scaled; float r = sqrt(dx * dx + dy * dy); float intensity = k * charge.charge / r; result[2 * pixel_idx] += intensity * dx / r; result[2 * pixel_idx + 1] += intensity * dy / r; } } __global__ void add_intensities(float *g_idata, float *g_odata) { extern __shared__ float sdata[]; unsigned int tid = threadIdx.x; unsigned int i = blockIdx.x * blockDim.x + threadIdx.x; sdata[tid] = g_idata[i]; __syncthreads(); for (unsigned int s = blockDim.x / 2; s > 32; s >>= 1) { if (tid < s) sdata[tid] += sdata[tid + s]; __syncthreads(); } // manually unrolled last warp unsigned blocksize = blockDim.x; if ((blocksize >= 64) && (tid < 32)) sdata[tid] += sdata[tid + 32]; if ((blocksize >= 32) && (tid < 16)) sdata[tid] += sdata[tid + 16]; if ((blocksize >= 16) && (tid < 8)) sdata[tid] += sdata[tid + 8]; if ((blocksize >= 8) && (tid < 4)) sdata[tid] += sdata[tid + 4]; if ((blocksize >= 4) && (tid < 2)) sdata[tid] += sdata[tid + 2]; if ((blocksize >= 2) && (tid < 1)) sdata[tid] += sdata[tid + 1]; if (tid == 0) g_odata[blockIdx.x] = sdata[tid]; } __global__ void total_intensity(float *g_idata, float *g_odata, unsigned int n) { unsigned int i = blockIdx.x * blockDim.x + threadIdx.x; if (i < n) { float x = g_idata[2 * i]; float y = g_idata[2 * i + 1]; __syncthreads(); g_odata[i] = fmax(fmin(sqrt(x * x + y * y), MAX_INTENSITY), MIN_INTENSITY); } } __global__ void intensity_to_color(float *g_idata, uint32_t *g_odata, const float min, const float max, int n) { unsigned int i = blockIdx.x * blockDim.x + threadIdx.x; if (i >= n) return; float diff = max - min; float intensity = g_idata[i]; intensity = fmax(intensity, MIN_INTENSITY); intensity = fmin(intensity, MAX_INTENSITY); float log = log10(intensity); float scaled = (log - min) / diff; float hue = (1 - scaled) * 300; float h_prim = hue / 60.0; float f_x = 1 - fabs(fmod(h_prim, 2.0f) - 1); uint8_t x = (uint8_t)(f_x * 0xFF); unsigned int rounded_h = (unsigned int) h_prim + 1; g_odata[i] = x << ((rounded_h % 3) * 8) | 0xff << (8 * (2 - ((rounded_h / 2) % 3))); } extern "C" void run_kernel(const point_charge_t *charges, const int charge_count, const bounds_t *bounds, uint32_t *result) { const unsigned long pixel_count = bounds->width * bounds->height; const unsigned int charges_size = charge_count * sizeof(point_charge_t); const unsigned int bounds_size = sizeof(bounds_t); const unsigned long result_size = 2 * sizeof(float) * charge_count * pixel_count; const unsigned long reduced_size = sizeof(uint32_t) * pixel_count; point_charge_t *d_charges; checkCudaErrors(hipMalloc((void **)&d_charges, charges_size)); checkCudaErrors(hipMemcpy(d_charges, charges, charges_size, hipMemcpyHostToDevice)); bounds_t *d_bounds; checkCudaErrors(hipMalloc((void**)&d_bounds, bounds_size)); checkCudaErrors(hipMemcpy(d_bounds, bounds, bounds_size, hipMemcpyHostToDevice)); float *d_result_vec; int block_count = pixel_count / THREAD_COUNT + 1; dim3 max_thread_grid(block_count, 1, 1); size_t free, total; checkCudaErrors(hipMemGetInfo(&free, &total)); if (free >= result_size) { checkCudaErrors(hipMalloc((void**)&d_result_vec, result_size)); dim3 charge_intensity_grid(bounds->width, bounds->height, 1); dim3 threads(charge_count, 1, 1); hipLaunchKernelGGL(( calculate_intensity), dim3(charge_intensity_grid), dim3(threads) , 0, 0, d_charges, d_bounds, d_result_vec); getLastCudaError(); dim3 component_intensity_grid(2 * bounds->width * bounds->height, 1, 1); unsigned int smem = sizeof(float) * charge_count; hipLaunchKernelGGL(( add_intensities), dim3(component_intensity_grid), dim3(threads), smem , 0, d_result_vec, d_result_vec); getLastCudaError(); } else { checkCudaErrors(hipMalloc((void**)&d_result_vec, 2 * reduced_size)); hipLaunchKernelGGL(( calculate_intensity_slow), dim3(max_thread_grid), dim3(THREAD_COUNT) , 0, 0, d_charges, d_bounds, charge_count, d_result_vec, pixel_count); getLastCudaError(); } hipLaunchKernelGGL(( total_intensity), dim3(max_thread_grid), dim3(THREAD_COUNT) , 0, 0, d_result_vec, d_result_vec, pixel_count); getLastCudaError(); thrust::device_ptr<float> intensities_ptr(d_result_vec); float min = *thrust::min_element(intensities_ptr, intensities_ptr + pixel_count); float max = *thrust::max_element(intensities_ptr, intensities_ptr + pixel_count); min = log10(fmax(min, MIN_INTENSITY)); max = log10(fmin(max, MAX_INTENSITY)); hipLaunchKernelGGL(( intensity_to_color), dim3(max_thread_grid), dim3(THREAD_COUNT) , 0, 0, d_result_vec, (uint32_t*)d_result_vec, min, max, pixel_count); getLastCudaError(); checkCudaErrors(hipMemcpy(result, (uint32_t*)d_result_vec, reduced_size, hipMemcpyDeviceToHost)); checkCudaErrors(hipFree(d_charges)); checkCudaErrors(hipFree(d_bounds)); checkCudaErrors(hipFree(d_result_vec)); }
ffe70b43e932974c151e5abb7e4a4582060dc703.cu
#include "compute.h" #include "math.h" #include "cuda_runtime.h" #include <thrust/extrema.h> #include <thrust/device_ptr.h> #define MIN_INTENSITY 1e-10f #define MAX_INTENSITY 1e10f #define THREAD_COUNT 1024 inline void on_error(cudaError_t errcode, const char *file, int line) { if (errcode != cudaSuccess) { fprintf(stderr, "CUDA error: %s (%s:%d)\n", cudaGetErrorString(errcode), file, line); exit(EXIT_FAILURE); } } #define checkCudaErrors(ret) on_error((ret), __FILE__, __LINE__) #define getLastCudaError() on_error(cudaGetLastError(), __FILE__, __LINE__) extern "C" void run_kernel( const point_charge_t *charges, const int charge_count, const bounds_t *bounds, uint32_t *result); __global__ void calculate_intensity( const point_charge_t *charges, const bounds_t *bounds, float *result) { const float k = 8.99e-9f; // Coulomb's constant point_charge_t charge = charges[threadIdx.x]; float x_scaled = bounds->x_min + blockIdx.x * bounds->x_scale / (double)gridDim.x; float y_scaled = bounds->y_min + blockIdx.y * bounds->y_scale / (double)gridDim.y; float dx = charge.x - x_scaled; float dy = charge.y - y_scaled; float r = sqrt(dx * dx + dy * dy); float intensity = k * charge.charge / r; unsigned long offset = blockDim.x * (gridDim.x * blockIdx.y + blockIdx.x); result[2 * offset + threadIdx.x] = intensity * dx / r; result[2 * offset + blockDim.x + threadIdx.x] = intensity * dy / r; } __global__ void calculate_intensity_slow( const point_charge_t *charges, const bounds_t *bounds, const unsigned charge_count, float *result, int n) { const float k = 8.99e-9f; // Coulomb's constant unsigned int pixel_idx = blockIdx.x * blockDim.x + threadIdx.x; if (pixel_idx >= n) return; unsigned int x = pixel_idx % bounds->width; unsigned int y = pixel_idx / bounds->width; result[2 * pixel_idx] = 0; result[2 * pixel_idx + 1] = 0; for (unsigned i = 0; i < charge_count; ++i) { point_charge_t charge = charges[i]; float x_scaled = bounds->x_min + x * bounds->x_scale / (double)bounds->width; float y_scaled = bounds->y_min + y * bounds->y_scale / (double)bounds->height; float dx = charge.x - x_scaled; float dy = charge.y - y_scaled; float r = sqrt(dx * dx + dy * dy); float intensity = k * charge.charge / r; result[2 * pixel_idx] += intensity * dx / r; result[2 * pixel_idx + 1] += intensity * dy / r; } } __global__ void add_intensities(float *g_idata, float *g_odata) { extern __shared__ float sdata[]; unsigned int tid = threadIdx.x; unsigned int i = blockIdx.x * blockDim.x + threadIdx.x; sdata[tid] = g_idata[i]; __syncthreads(); for (unsigned int s = blockDim.x / 2; s > 32; s >>= 1) { if (tid < s) sdata[tid] += sdata[tid + s]; __syncthreads(); } // manually unrolled last warp unsigned blocksize = blockDim.x; if ((blocksize >= 64) && (tid < 32)) sdata[tid] += sdata[tid + 32]; if ((blocksize >= 32) && (tid < 16)) sdata[tid] += sdata[tid + 16]; if ((blocksize >= 16) && (tid < 8)) sdata[tid] += sdata[tid + 8]; if ((blocksize >= 8) && (tid < 4)) sdata[tid] += sdata[tid + 4]; if ((blocksize >= 4) && (tid < 2)) sdata[tid] += sdata[tid + 2]; if ((blocksize >= 2) && (tid < 1)) sdata[tid] += sdata[tid + 1]; if (tid == 0) g_odata[blockIdx.x] = sdata[tid]; } __global__ void total_intensity(float *g_idata, float *g_odata, unsigned int n) { unsigned int i = blockIdx.x * blockDim.x + threadIdx.x; if (i < n) { float x = g_idata[2 * i]; float y = g_idata[2 * i + 1]; __syncthreads(); g_odata[i] = fmax(fmin(sqrt(x * x + y * y), MAX_INTENSITY), MIN_INTENSITY); } } __global__ void intensity_to_color(float *g_idata, uint32_t *g_odata, const float min, const float max, int n) { unsigned int i = blockIdx.x * blockDim.x + threadIdx.x; if (i >= n) return; float diff = max - min; float intensity = g_idata[i]; intensity = fmax(intensity, MIN_INTENSITY); intensity = fmin(intensity, MAX_INTENSITY); float log = log10(intensity); float scaled = (log - min) / diff; float hue = (1 - scaled) * 300; float h_prim = hue / 60.0; float f_x = 1 - fabs(fmod(h_prim, 2.0f) - 1); uint8_t x = (uint8_t)(f_x * 0xFF); unsigned int rounded_h = (unsigned int) h_prim + 1; g_odata[i] = x << ((rounded_h % 3) * 8) | 0xff << (8 * (2 - ((rounded_h / 2) % 3))); } extern "C" void run_kernel(const point_charge_t *charges, const int charge_count, const bounds_t *bounds, uint32_t *result) { const unsigned long pixel_count = bounds->width * bounds->height; const unsigned int charges_size = charge_count * sizeof(point_charge_t); const unsigned int bounds_size = sizeof(bounds_t); const unsigned long result_size = 2 * sizeof(float) * charge_count * pixel_count; const unsigned long reduced_size = sizeof(uint32_t) * pixel_count; point_charge_t *d_charges; checkCudaErrors(cudaMalloc((void **)&d_charges, charges_size)); checkCudaErrors(cudaMemcpy(d_charges, charges, charges_size, cudaMemcpyHostToDevice)); bounds_t *d_bounds; checkCudaErrors(cudaMalloc((void**)&d_bounds, bounds_size)); checkCudaErrors(cudaMemcpy(d_bounds, bounds, bounds_size, cudaMemcpyHostToDevice)); float *d_result_vec; int block_count = pixel_count / THREAD_COUNT + 1; dim3 max_thread_grid(block_count, 1, 1); size_t free, total; checkCudaErrors(cudaMemGetInfo(&free, &total)); if (free >= result_size) { checkCudaErrors(cudaMalloc((void**)&d_result_vec, result_size)); dim3 charge_intensity_grid(bounds->width, bounds->height, 1); dim3 threads(charge_count, 1, 1); calculate_intensity<<< charge_intensity_grid, threads >>>(d_charges, d_bounds, d_result_vec); getLastCudaError(); dim3 component_intensity_grid(2 * bounds->width * bounds->height, 1, 1); unsigned int smem = sizeof(float) * charge_count; add_intensities<<< component_intensity_grid, threads, smem >>>(d_result_vec, d_result_vec); getLastCudaError(); } else { checkCudaErrors(cudaMalloc((void**)&d_result_vec, 2 * reduced_size)); calculate_intensity_slow<<< max_thread_grid, THREAD_COUNT >>>(d_charges, d_bounds, charge_count, d_result_vec, pixel_count); getLastCudaError(); } total_intensity<<< max_thread_grid, THREAD_COUNT >>>(d_result_vec, d_result_vec, pixel_count); getLastCudaError(); thrust::device_ptr<float> intensities_ptr(d_result_vec); float min = *thrust::min_element(intensities_ptr, intensities_ptr + pixel_count); float max = *thrust::max_element(intensities_ptr, intensities_ptr + pixel_count); min = log10(fmax(min, MIN_INTENSITY)); max = log10(fmin(max, MAX_INTENSITY)); intensity_to_color<<< max_thread_grid, THREAD_COUNT >>>(d_result_vec, (uint32_t*)d_result_vec, min, max, pixel_count); getLastCudaError(); checkCudaErrors(cudaMemcpy(result, (uint32_t*)d_result_vec, reduced_size, cudaMemcpyDeviceToHost)); checkCudaErrors(cudaFree(d_charges)); checkCudaErrors(cudaFree(d_bounds)); checkCudaErrors(cudaFree(d_result_vec)); }
f84098265aca43e7d301760bda139239b600fbe3.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include "GpuPoissonSolver.h" __global__ void jacobiIter(char* matrix, int* col_idx, int* row_ptr, float* unk_vect, float* rhs_vect, int matrix_dim, int matrix_size) { int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx >= matrix_dim) return; int n = idx + 1 < matrix_dim ? row_ptr[idx + 1] : matrix_size; float sum = 0.f; for (int i = row_ptr[idx]; i < n; i++) { int j = col_idx[i]; if (idx != j) { sum += matrix[i] * unk_vect[j]; } } unk_vect[idx] = (rhs_vect[idx] - sum) / -4.f; } __global__ void fitRange(float* red, float* green, float* blue, int len) { int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx >= len) return; if (red[idx] < 0.f) red[idx] = 0.f; if (red[idx] > 255.f) red[idx] = 255.f; if (green[idx] < 0.f) green[idx] = 0.f; if (green[idx] > 255.f) green[idx] = 255.f; if (blue[idx] < 0.f) blue[idx] = 0.f; if (blue[idx] > 255.f) blue[idx] = 255.f; } void gpuJacobiSolver(char* h_matrix, int* h_col_idx, int* h_row_ptr, float* h_unk_vect_red, float* h_unk_vect_green, float* h_unk_vect_blue, float* h_rhs_vect_red, float* h_rhs_vect_green, float* h_rhs_vect_blue, int matrix_dim, int matrix_size, int iters) { char* d_matrix; int* d_col_idx; int* d_row_ptr; float* d_unk_vect_red; float* d_unk_vect_green; float* d_unk_vect_blue; float* d_rhs_vect_red; float* d_rhs_vect_green; float* d_rhs_vect_blue; hipMalloc((void**) &d_matrix, matrix_size * sizeof(char)); hipMalloc((void**) &d_col_idx, matrix_size * sizeof(int)); hipMalloc((void**) &d_row_ptr, matrix_dim * sizeof(int)); hipMalloc((void**) &d_unk_vect_red, matrix_dim * sizeof(float)); hipMalloc((void**) &d_unk_vect_green, matrix_dim * sizeof(float)); hipMalloc((void**) &d_unk_vect_blue, matrix_dim * sizeof(float)); hipMalloc((void**) &d_rhs_vect_red, matrix_dim * sizeof(float)); hipMalloc((void**) &d_rhs_vect_green, matrix_dim * sizeof(float)); hipMalloc((void**) &d_rhs_vect_blue, matrix_dim * sizeof(float)); hipMemcpy(d_matrix, h_matrix, matrix_size * sizeof(char), hipMemcpyHostToDevice); hipMemcpy(d_col_idx, h_col_idx, matrix_size * sizeof(int), hipMemcpyHostToDevice); hipMemcpy(d_row_ptr, h_row_ptr, matrix_dim * sizeof(int), hipMemcpyHostToDevice); hipMemcpy(d_unk_vect_red, h_unk_vect_red, matrix_dim * sizeof(float), hipMemcpyHostToDevice); hipMemcpy(d_unk_vect_green, h_unk_vect_green, matrix_dim * sizeof(float), hipMemcpyHostToDevice); hipMemcpy(d_unk_vect_blue, h_unk_vect_blue, matrix_dim * sizeof(float), hipMemcpyHostToDevice); hipMemcpy(d_rhs_vect_red, h_rhs_vect_red, matrix_dim * sizeof(float), hipMemcpyHostToDevice); hipMemcpy(d_rhs_vect_green, h_rhs_vect_green, matrix_dim * sizeof(float), hipMemcpyHostToDevice); hipMemcpy(d_rhs_vect_blue, h_rhs_vect_blue, matrix_dim * sizeof(float), hipMemcpyHostToDevice); int threadsPerBlock = 128; int blocks = (matrix_dim / threadsPerBlock) + !!(matrix_dim % threadsPerBlock); for (int i = 0; i < iters; i++) { hipLaunchKernelGGL(( jacobiIter), dim3(blocks), dim3(threadsPerBlock), 0, 0, d_matrix, d_col_idx, d_row_ptr, d_unk_vect_red, d_rhs_vect_red, matrix_dim, matrix_size); hipLaunchKernelGGL(( jacobiIter), dim3(blocks), dim3(threadsPerBlock), 0, 0, d_matrix, d_col_idx, d_row_ptr, d_unk_vect_green, d_rhs_vect_green, matrix_dim, matrix_size); hipLaunchKernelGGL(( jacobiIter), dim3(blocks), dim3(threadsPerBlock), 0, 0, d_matrix, d_col_idx, d_row_ptr, d_unk_vect_blue, d_rhs_vect_blue, matrix_dim, matrix_size); } hipLaunchKernelGGL(( fitRange), dim3(blocks), dim3(threadsPerBlock), 0, 0, d_unk_vect_red, d_unk_vect_green, d_unk_vect_blue, matrix_dim); hipMemcpy(h_unk_vect_red, d_unk_vect_red, matrix_dim * sizeof(float), hipMemcpyDeviceToHost); hipMemcpy(h_unk_vect_green, d_unk_vect_green, matrix_dim * sizeof(float), hipMemcpyDeviceToHost); hipMemcpy(h_unk_vect_blue, d_unk_vect_blue, matrix_dim * sizeof(float), hipMemcpyDeviceToHost); hipFree(d_matrix); hipFree(d_col_idx); hipFree(d_row_ptr); hipFree(d_unk_vect_red); hipFree(d_unk_vect_green); hipFree(d_unk_vect_blue); hipFree(d_rhs_vect_red); hipFree(d_rhs_vect_green); hipFree(d_rhs_vect_blue); }
f84098265aca43e7d301760bda139239b600fbe3.cu
#include "cuda_runtime.h" #include "device_launch_parameters.h" #include "GpuPoissonSolver.h" __global__ void jacobiIter(char* matrix, int* col_idx, int* row_ptr, float* unk_vect, float* rhs_vect, int matrix_dim, int matrix_size) { int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx >= matrix_dim) return; int n = idx + 1 < matrix_dim ? row_ptr[idx + 1] : matrix_size; float sum = 0.f; for (int i = row_ptr[idx]; i < n; i++) { int j = col_idx[i]; if (idx != j) { sum += matrix[i] * unk_vect[j]; } } unk_vect[idx] = (rhs_vect[idx] - sum) / -4.f; } __global__ void fitRange(float* red, float* green, float* blue, int len) { int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx >= len) return; if (red[idx] < 0.f) red[idx] = 0.f; if (red[idx] > 255.f) red[idx] = 255.f; if (green[idx] < 0.f) green[idx] = 0.f; if (green[idx] > 255.f) green[idx] = 255.f; if (blue[idx] < 0.f) blue[idx] = 0.f; if (blue[idx] > 255.f) blue[idx] = 255.f; } void gpuJacobiSolver(char* h_matrix, int* h_col_idx, int* h_row_ptr, float* h_unk_vect_red, float* h_unk_vect_green, float* h_unk_vect_blue, float* h_rhs_vect_red, float* h_rhs_vect_green, float* h_rhs_vect_blue, int matrix_dim, int matrix_size, int iters) { char* d_matrix; int* d_col_idx; int* d_row_ptr; float* d_unk_vect_red; float* d_unk_vect_green; float* d_unk_vect_blue; float* d_rhs_vect_red; float* d_rhs_vect_green; float* d_rhs_vect_blue; cudaMalloc((void**) &d_matrix, matrix_size * sizeof(char)); cudaMalloc((void**) &d_col_idx, matrix_size * sizeof(int)); cudaMalloc((void**) &d_row_ptr, matrix_dim * sizeof(int)); cudaMalloc((void**) &d_unk_vect_red, matrix_dim * sizeof(float)); cudaMalloc((void**) &d_unk_vect_green, matrix_dim * sizeof(float)); cudaMalloc((void**) &d_unk_vect_blue, matrix_dim * sizeof(float)); cudaMalloc((void**) &d_rhs_vect_red, matrix_dim * sizeof(float)); cudaMalloc((void**) &d_rhs_vect_green, matrix_dim * sizeof(float)); cudaMalloc((void**) &d_rhs_vect_blue, matrix_dim * sizeof(float)); cudaMemcpy(d_matrix, h_matrix, matrix_size * sizeof(char), cudaMemcpyHostToDevice); cudaMemcpy(d_col_idx, h_col_idx, matrix_size * sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(d_row_ptr, h_row_ptr, matrix_dim * sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(d_unk_vect_red, h_unk_vect_red, matrix_dim * sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(d_unk_vect_green, h_unk_vect_green, matrix_dim * sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(d_unk_vect_blue, h_unk_vect_blue, matrix_dim * sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(d_rhs_vect_red, h_rhs_vect_red, matrix_dim * sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(d_rhs_vect_green, h_rhs_vect_green, matrix_dim * sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(d_rhs_vect_blue, h_rhs_vect_blue, matrix_dim * sizeof(float), cudaMemcpyHostToDevice); int threadsPerBlock = 128; int blocks = (matrix_dim / threadsPerBlock) + !!(matrix_dim % threadsPerBlock); for (int i = 0; i < iters; i++) { jacobiIter<<<blocks, threadsPerBlock>>>(d_matrix, d_col_idx, d_row_ptr, d_unk_vect_red, d_rhs_vect_red, matrix_dim, matrix_size); jacobiIter<<<blocks, threadsPerBlock>>>(d_matrix, d_col_idx, d_row_ptr, d_unk_vect_green, d_rhs_vect_green, matrix_dim, matrix_size); jacobiIter<<<blocks, threadsPerBlock>>>(d_matrix, d_col_idx, d_row_ptr, d_unk_vect_blue, d_rhs_vect_blue, matrix_dim, matrix_size); } fitRange<<<blocks, threadsPerBlock>>>(d_unk_vect_red, d_unk_vect_green, d_unk_vect_blue, matrix_dim); cudaMemcpy(h_unk_vect_red, d_unk_vect_red, matrix_dim * sizeof(float), cudaMemcpyDeviceToHost); cudaMemcpy(h_unk_vect_green, d_unk_vect_green, matrix_dim * sizeof(float), cudaMemcpyDeviceToHost); cudaMemcpy(h_unk_vect_blue, d_unk_vect_blue, matrix_dim * sizeof(float), cudaMemcpyDeviceToHost); cudaFree(d_matrix); cudaFree(d_col_idx); cudaFree(d_row_ptr); cudaFree(d_unk_vect_red); cudaFree(d_unk_vect_green); cudaFree(d_unk_vect_blue); cudaFree(d_rhs_vect_red); cudaFree(d_rhs_vect_green); cudaFree(d_rhs_vect_blue); }
0eee95c625dabc5fc285c740cd6b21844d27dd41.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include <algorithm> #include <string> #include "paddle/fluid/operators/interpolate_op.h" #include "paddle/fluid/platform/device/gpu/gpu_launch_config.h" #include "paddle/fluid/platform/device/gpu/gpu_primitives.h" namespace paddle { namespace operators { using framework::Tensor; using DataLayout = framework::DataLayout; template <typename T> __global__ void KeNearestNeighborInterpFw(const T* in, const size_t in_img_h, const size_t in_img_w, const size_t input_h, const size_t input_w, T* out, const size_t out_img_h, const size_t out_img_w, const size_t output_h, const size_t output_w, const size_t num_channels, const float ratio_h, const float ratio_w, const bool align_corners, const DataLayout data_layout) { int nthreads = output_h * output_w; int tid = blockIdx.x * blockDim.x + threadIdx.x; int stride = blockDim.x * gridDim.x; for (; tid < nthreads; tid += stride) { int out_id_h = tid / output_w; int out_id_w = tid % output_w; int in_img_size = input_w / num_channels; int out_img_size = output_w / num_channels; int channel_id, out_img_idy, out_img_idx; if (data_layout == DataLayout::kNCHW) { channel_id = out_id_w / out_img_size; out_img_idy = (out_id_w % out_img_size) / out_img_w; out_img_idx = tid % out_img_w; } else { out_img_idy = out_id_w / (out_img_w * num_channels); out_img_idx = out_id_w % (out_img_w * num_channels) / num_channels; channel_id = tid % num_channels; } int in_img_idy = (align_corners) ? static_cast<int>(ratio_h * out_img_idy + 0.5) : static_cast<int>(ratio_h * out_img_idy); int in_img_idx = (align_corners) ? static_cast<int>(ratio_w * out_img_idx + 0.5) : static_cast<int>(ratio_w * out_img_idx); if (data_layout == DataLayout::kNCHW) { out[tid] = in[out_id_h * input_w + channel_id * in_img_size + in_img_idy * in_img_w + in_img_idx]; } else { out[tid] = in[out_id_h * input_w + in_img_idy * in_img_w * num_channels + in_img_idx * num_channels + channel_id]; } } } template <typename T> __global__ void KeNearestNeighborInterpBw(T* in, const size_t in_img_h, const size_t in_img_w, const size_t input_h, const size_t input_w, const T* out, const size_t out_img_h, const size_t out_img_w, const size_t output_h, const size_t output_w, const size_t num_channels, const float ratio_h, const float ratio_w, const bool align_corners, const DataLayout data_layout) { int nthreads = output_h * output_w; int tid = blockIdx.x * blockDim.x + threadIdx.x; int stride = blockDim.x * gridDim.x; for (; tid < nthreads; tid += stride) { int out_id_h = tid / output_w; int out_id_w = tid % output_w; int in_img_size = input_w / num_channels; int out_img_size = output_w / num_channels; int channel_id, out_img_idy, out_img_idx; if (data_layout == DataLayout::kNCHW) { channel_id = out_id_w / out_img_size; out_img_idy = (out_id_w % out_img_size) / out_img_w; out_img_idx = tid % out_img_w; } else { out_img_idy = out_id_w / (out_img_w * num_channels); out_img_idx = out_id_w % (out_img_w * num_channels) / num_channels; channel_id = tid % num_channels; } int in_img_idy = (align_corners) ? static_cast<int>(ratio_h * out_img_idy + 0.5) : static_cast<int>(ratio_h * out_img_idy); int in_img_idx = (align_corners) ? static_cast<int>(ratio_w * out_img_idx + 0.5) : static_cast<int>(ratio_w * out_img_idx); T* in_pos; if (data_layout == DataLayout::kNCHW) { in_pos = &in[out_id_h * input_w + channel_id * in_img_size + in_img_idy * in_img_w + in_img_idx]; } else { in_pos = &in[out_id_h * input_w + in_img_idy * in_img_w * num_channels + in_img_idx * num_channels + channel_id]; } const T out_pos = out[out_id_h * output_w + out_id_w]; platform::CudaAtomicAdd(in_pos, out_pos); } } template <typename T> __global__ void KeLinearInterpFw(const T* in, const size_t in_img_w, const size_t input_w, T* out, const size_t out_img_w, const size_t output_h, const size_t output_w, const size_t num_channels, const float ratio_w, const bool align_corners, const int align_mode, const DataLayout data_layout) { int nthreads = output_h * output_w; int tid = blockIdx.x * blockDim.x + threadIdx.x; int stride = blockDim.x * gridDim.x; bool align_flag = (align_mode == 0 && !align_corners); for (; tid < nthreads; tid += stride) { int out_id_h = tid / output_w; int out_id_w = tid % output_w; int in_img_size = input_w / num_channels; int out_img_size = output_w / num_channels; int channel_id, out_img_idy, out_img_idx; if (data_layout == DataLayout::kNCHW) { channel_id = out_id_w / out_img_size; out_img_idx = tid % out_img_w; } else { out_img_idx = out_id_w % (out_img_w * num_channels) / num_channels; channel_id = tid % num_channels; } int in_img_idx = align_flag ? static_cast<int>(ratio_w * (out_img_idx + 0.5) - 0.5) : static_cast<int>(ratio_w * out_img_idx); in_img_idx = (in_img_idx > 0) ? in_img_idx : 0; // w int w_id = (in_img_idx < in_img_w - 1) ? 1 : 0; // w_id T src_w = ratio_w * (out_img_idx + 0.5) - 0.5; src_w = (src_w > 0) ? src_w : 0; T w1lambda = align_flag ? src_w - in_img_idx : ratio_w * out_img_idx - in_img_idx; T w2lambda = 1.f - w1lambda; if (data_layout == DataLayout::kNCHW) { const T* in_pos = &in[out_id_h * out_id_w + channel_id * in_img_size + in_img_idx]; // linear interpolation out[out_id_h * output_w + out_id_w] = w2lambda * in_pos[0] + w1lambda * in_pos[w_id]; } else { const T* in_pos = &in[out_id_h * input_w + in_img_idx * num_channels + channel_id]; // linear interpolation out[out_id_h * output_w + out_id_w] = w2lambda * in_pos[0] + w1lambda * in_pos[w_id * num_channels]; } } } template <typename T> __global__ void KeLinearInterpBw(T* in, const size_t in_img_w, const size_t input_w, const T* out, const size_t out_img_w, const size_t output_h, const size_t output_w, const size_t num_channels, const T ratio_w, const bool align_corners, const int align_mode, const DataLayout data_layout) { int nthreads = output_h * output_w; int tid = blockIdx.x * blockDim.x + threadIdx.x; int stride = blockDim.x * gridDim.x; bool align_flag = (align_mode == 0 && !align_corners); for (; tid < nthreads; tid += stride) { int out_id_h = tid / output_w; int out_id_w = tid % output_w; int in_img_size = input_w / num_channels; int out_img_size = output_w / num_channels; int channel_id, out_img_idx; if (data_layout == DataLayout::kNCHW) { channel_id = out_id_w / out_img_size; out_img_idx = tid % out_img_w; } else { out_img_idx = out_id_w % (out_img_w * num_channels) / num_channels; channel_id = tid % num_channels; } int in_img_idx = align_flag ? ratio_w * (out_img_idx + 0.5) - 0.5 : ratio_w * out_img_idx; in_img_idx = (in_img_idx > 0) ? in_img_idx : 0; // w int w_id = (in_img_idx < in_img_w - 1) ? 1 : 0; // w_id T src_w = ratio_w * (out_img_idx + 0.5) - 0.5; src_w = (src_w > 0) ? src_w : 0; T w1lambda = align_flag ? src_w - in_img_idx : ratio_w * out_img_idx - in_img_idx; T w2lambda = 1.f - w1lambda; T* in_pos; if (data_layout == DataLayout::kNCHW) { in_pos = &in[out_id_h * input_w + channel_id * in_img_size + in_img_idx]; } else { in_pos = &in[out_id_h * input_w + in_img_idx * num_channels + channel_id]; } const T* out_pos = &out[out_id_w]; if (data_layout == DataLayout::kNCHW) { platform::CudaAtomicAdd(&in_pos[0], w2lambda * out_pos[0]); platform::CudaAtomicAdd(&in_pos[w_id], w1lambda * out_pos[0]); } else { platform::CudaAtomicAdd(&in_pos[0], w2lambda * out_pos[0]); platform::CudaAtomicAdd(&in_pos[w_id * num_channels], w1lambda * out_pos[0]); } } } template <typename T> __global__ void KeBilinearInterpFw(const T* in, const size_t in_img_h, const size_t in_img_w, const size_t input_h, const size_t input_w, T* out, const size_t out_img_h, const size_t out_img_w, const size_t output_h, const size_t output_w, const size_t num_channels, const float ratio_h, const float ratio_w, const bool align_corners, const int align_mode, const DataLayout data_layout) { int nthreads = output_h * output_w; int tid = blockIdx.x * blockDim.x + threadIdx.x; int stride = blockDim.x * gridDim.x; bool align_flag = (align_mode == 0 && !align_corners); for (; tid < nthreads; tid += stride) { int out_id_h = tid / output_w; int out_id_w = tid % output_w; int in_img_size = input_w / num_channels; int out_img_size = output_w / num_channels; int channel_id, out_img_idy, out_img_idx; if (data_layout == DataLayout::kNCHW) { channel_id = out_id_w / out_img_size; out_img_idy = (out_id_w % out_img_size) / out_img_w; out_img_idx = tid % out_img_w; } else { out_img_idy = out_id_w / (out_img_w * num_channels); out_img_idx = out_id_w % (out_img_w * num_channels) / num_channels; channel_id = tid % num_channels; } int in_img_idy = align_flag ? static_cast<int>(ratio_h * (out_img_idy + 0.5) - 0.5) : static_cast<int>(ratio_h * out_img_idy); in_img_idy = (in_img_idy > 0) ? in_img_idy : 0; int h_id = (in_img_idy < in_img_h - 1) ? 1 : 0; T src_h = ratio_h * (out_img_idy + 0.5) - 0.5; src_h = (src_h > 0) ? src_h : 0; T h1lambda = align_flag ? src_h - in_img_idy : ratio_h * out_img_idy - in_img_idy; T h2lambda = 1.f - h1lambda; int in_img_idx = align_flag ? static_cast<int>(ratio_w * (out_img_idx + 0.5) - 0.5) : static_cast<int>(ratio_w * out_img_idx); in_img_idx = (in_img_idx > 0) ? in_img_idx : 0; int w_id = (in_img_idx < in_img_w - 1) ? 1 : 0; T src_w = ratio_w * (out_img_idx + 0.5) - 0.5; src_w = (src_w > 0) ? src_w : 0; T w1lambda = align_flag ? src_w - in_img_idx : ratio_w * out_img_idx - in_img_idx; T w2lambda = 1.f - w1lambda; if (data_layout == DataLayout::kNCHW) { const T* in_pos = &in[out_id_h * input_w + channel_id * in_img_size + in_img_idy * in_img_w + in_img_idx]; // bilinear interpolation out[out_id_h * output_w + out_id_w] = h2lambda * (w2lambda * in_pos[0] + w1lambda * in_pos[w_id]) + h1lambda * (w2lambda * in_pos[h_id * in_img_w] + w1lambda * in_pos[h_id * in_img_w + w_id]); } else { const T* in_pos = &in[out_id_h * input_w + in_img_idy * in_img_w * num_channels + in_img_idx * num_channels + channel_id]; // bilinear interpolation out[out_id_h * output_w + out_id_w] = h2lambda * (w2lambda * in_pos[0] + w1lambda * in_pos[w_id * num_channels]) + h1lambda * (w2lambda * in_pos[h_id * in_img_w * num_channels] + w1lambda * in_pos[h_id * in_img_w * num_channels + w_id * num_channels]); } } } template <typename T> __global__ void KeBilinearInterpBw(T* in, const size_t in_img_h, const size_t in_img_w, const size_t input_h, const size_t input_w, const T* out, const size_t out_img_h, const size_t out_img_w, const size_t output_h, const size_t output_w, const size_t num_channels, const T ratio_h, const T ratio_w, const bool align_corners, const int align_mode, const DataLayout data_layout) { int nthreads = output_h * output_w; int tid = blockIdx.x * blockDim.x + threadIdx.x; int stride = blockDim.x * gridDim.x; bool align_flag = (align_mode == 0 && !align_corners); for (; tid < nthreads; tid += stride) { int out_id_h = tid / output_w; int out_id_w = tid % output_w; int in_img_size = input_w / num_channels; int out_img_size = output_w / num_channels; int channel_id, out_img_idy, out_img_idx; if (data_layout == DataLayout::kNCHW) { channel_id = out_id_w / out_img_size; out_img_idy = (out_id_w % out_img_size) / out_img_w; out_img_idx = tid % out_img_w; } else { out_img_idy = out_id_w / (out_img_w * num_channels); out_img_idx = out_id_w % (out_img_w * num_channels) / num_channels; channel_id = tid % num_channels; } int in_img_idy = align_flag ? ratio_h * (out_img_idy + 0.5) - 0.5 : ratio_h * out_img_idy; in_img_idy = (in_img_idy > 0) ? in_img_idy : 0; int h_id = (in_img_idy < in_img_h - 1) ? 1 : 0; T src_h = ratio_h * (out_img_idy + 0.5) - 0.5; src_h = (src_h > 0) ? src_h : 0; T h1lambda = align_flag ? src_h - in_img_idy : ratio_h * out_img_idy - in_img_idy; T h2lambda = 1.f - h1lambda; int in_img_idx = align_flag ? ratio_w * (out_img_idx + 0.5) - 0.5 : ratio_w * out_img_idx; in_img_idx = (in_img_idx > 0) ? in_img_idx : 0; int w_id = (in_img_idx < in_img_w - 1) ? 1 : 0; T src_w = ratio_w * (out_img_idx + 0.5) - 0.5; src_w = (src_w > 0) ? src_w : 0; T w1lambda = align_flag ? src_w - in_img_idx : ratio_w * out_img_idx - in_img_idx; T w2lambda = 1.f - w1lambda; T* in_pos; if (data_layout == DataLayout::kNCHW) { in_pos = &in[out_id_h * input_w + channel_id * in_img_size + in_img_idy * in_img_w + in_img_idx]; } else { in_pos = &in[out_id_h * input_w + in_img_idy * in_img_w * num_channels + in_img_idx * num_channels + channel_id]; } const T* out_pos = &out[out_id_h * output_w + out_id_w]; if (data_layout == DataLayout::kNCHW) { platform::CudaAtomicAdd(&in_pos[0], h2lambda * w2lambda * out_pos[0]); platform::CudaAtomicAdd(&in_pos[w_id], h2lambda * w1lambda * out_pos[0]); platform::CudaAtomicAdd(&in_pos[h_id * in_img_w], h1lambda * w2lambda * out_pos[0]); platform::CudaAtomicAdd(&in_pos[h_id * in_img_w + w_id], h1lambda * w1lambda * out_pos[0]); } else { platform::CudaAtomicAdd(&in_pos[0], h2lambda * w2lambda * out_pos[0]); platform::CudaAtomicAdd(&in_pos[w_id * num_channels], h2lambda * w1lambda * out_pos[0]); platform::CudaAtomicAdd(&in_pos[h_id * in_img_w * num_channels], h1lambda * w2lambda * out_pos[0]); platform::CudaAtomicAdd( &in_pos[h_id * in_img_w * num_channels + w_id * num_channels], h1lambda * w1lambda * out_pos[0]); } } } template <typename T> __global__ void KeTrilinearInterpFw(const T* in, const size_t in_img_d, const size_t in_img_h, const size_t in_img_w, const size_t input_h, const size_t input_w, T* out, const size_t out_img_d, const size_t out_img_h, const size_t out_img_w, const size_t output_h, const size_t output_w, const size_t num_channels, const float ratio_d, const float ratio_h, const float ratio_w, const bool align_corners, const int align_mode, const DataLayout data_layout) { int nthreads = output_h * output_w; int tid = blockIdx.x * blockDim.x + threadIdx.x; int stride = blockDim.x * gridDim.x; bool align_flag = (align_mode == 0 && !align_corners); for (; tid < nthreads; tid += stride) { int out_id_h = tid / output_w; int out_id_w = tid % output_w; int in_img_size = input_w / num_channels; int out_img_size = output_w / num_channels; int channel_id, out_img_idt, out_img_idy, out_img_idx; if (data_layout == DataLayout::kNCHW) { channel_id = out_id_w / out_img_size; out_img_idt = (out_id_w % out_img_size) / out_img_h / out_img_w; out_img_idy = ((out_id_w % out_img_size) / out_img_w) % out_img_h; out_img_idx = tid % out_img_w; } else { out_img_idt = out_id_w / (out_img_h * out_img_w * num_channels); out_img_idy = out_id_w % (out_img_h * out_img_w * num_channels) / (out_img_w * num_channels); out_img_idx = out_id_w % (out_img_w * num_channels) / num_channels; channel_id = tid % num_channels; } int in_img_idt = align_flag ? static_cast<int>(ratio_d * (out_img_idt + 0.5) - 0.5) : static_cast<int>(ratio_d * out_img_idt); in_img_idt = (in_img_idt > 0) ? in_img_idt : 0; int d_id = (in_img_idt < in_img_d - 1) ? 1 : 0; T src_d = ratio_d * (out_img_idt + 0.5) - 0.5; src_d = (src_d > 0) ? src_d : 0; T d1lambda = align_flag ? src_d - in_img_idt : ratio_d * out_img_idt - in_img_idt; T d2lambda = 1.f - d1lambda; int in_img_idy = align_flag ? static_cast<int>(ratio_h * (out_img_idy + 0.5) - 0.5) : static_cast<int>(ratio_h * out_img_idy); in_img_idy = (in_img_idy > 0) ? in_img_idy : 0; int h_id = (in_img_idy < in_img_h - 1) ? 1 : 0; T src_h = ratio_h * (out_img_idy + 0.5) - 0.5; src_h = (src_h > 0) ? src_h : 0; T h1lambda = align_flag ? src_h - in_img_idy : ratio_h * out_img_idy - in_img_idy; T h2lambda = 1.f - h1lambda; int in_img_idx = align_flag ? static_cast<int>(ratio_w * (out_img_idx + 0.5) - 0.5) : static_cast<int>(ratio_w * out_img_idx); in_img_idx = (in_img_idx > 0) ? in_img_idx : 0; int w_id = (in_img_idx < in_img_w - 1) ? 1 : 0; T src_w = ratio_w * (out_img_idx + 0.5) - 0.5; src_w = (src_w > 0) ? src_w : 0; T w1lambda = align_flag ? src_w - in_img_idx : ratio_w * out_img_idx - in_img_idx; T w2lambda = 1.f - w1lambda; if (data_layout == DataLayout::kNCHW) { int in_pos1_idx = out_id_h * input_w + channel_id * in_img_size + (in_img_idt * in_img_h + in_img_idy) * in_img_w + in_img_idx; const T* in_pos1 = &in[in_pos1_idx]; int in_pos2_idx = in_pos1_idx + d_id * in_img_h * in_img_w; const T* in_pos2 = &in[in_pos2_idx]; // trilinear interpolation out[out_id_h * output_w + out_id_w] = d2lambda * (h2lambda * (w2lambda * in_pos1[0] + w1lambda * in_pos1[w_id]) + h1lambda * (w2lambda * in_pos1[h_id * in_img_w] + w1lambda * in_pos1[h_id * in_img_w + w_id])) + d1lambda * (h2lambda * (w2lambda * in_pos2[0] + w1lambda * in_pos2[w_id]) + h1lambda * (w2lambda * in_pos2[h_id * in_img_w] + w1lambda * in_pos2[h_id * in_img_w + w_id])); } else { int in_pos1_idx = out_id_h * input_w + in_img_idt * in_img_h * in_img_w * num_channels + in_img_idy * in_img_w * num_channels + in_img_idx * num_channels + channel_id; const T* in_pos1 = &in[in_pos1_idx]; int in_pos2_idx = in_pos1_idx + d_id * in_img_h * in_img_w * num_channels; const T* in_pos2 = &in[in_pos2_idx]; // trilinear interpolation out[out_id_h * output_w + out_id_w] = d2lambda * (h2lambda * (w2lambda * in_pos1[0] + w1lambda * in_pos1[w_id * num_channels]) + h1lambda * (w2lambda * in_pos1[h_id * in_img_w * num_channels] + w1lambda * in_pos1[h_id * in_img_w * num_channels + w_id * num_channels])) + d1lambda * (h2lambda * (w2lambda * in_pos2[0] + w1lambda * in_pos2[w_id * num_channels]) + h1lambda * (w2lambda * in_pos2[h_id * in_img_w * num_channels] + w1lambda * in_pos2[h_id * in_img_w * num_channels + w_id * num_channels])); } } } template <typename T> __global__ void KeTrilinearInterpBw(T* in, const size_t in_img_d, const size_t in_img_h, const size_t in_img_w, const size_t input_h, const size_t input_w, const T* out, const size_t out_img_d, const size_t out_img_h, const size_t out_img_w, const size_t output_h, const size_t output_w, const size_t num_channels, const T ratio_d, const T ratio_h, const T ratio_w, const bool align_corners, const int align_mode, const DataLayout data_layout) { int nthreads = output_h * output_w; int tid = blockIdx.x * blockDim.x + threadIdx.x; int stride = blockDim.x * gridDim.x; bool align_flag = (align_mode == 0 && !align_corners); for (; tid < nthreads; tid += stride) { int out_id_h = tid / output_w; int out_id_w = tid % output_w; int in_img_size = input_w / num_channels; int out_img_size = output_w / num_channels; int channel_id, out_img_idt, out_img_idy, out_img_idx; if (data_layout == DataLayout::kNCHW) { channel_id = out_id_w / out_img_size; out_img_idt = (out_id_w % out_img_size) / out_img_h / out_img_w; out_img_idy = ((out_id_w % out_img_size) / out_img_w) % out_img_h; out_img_idx = tid % out_img_w; } else { out_img_idt = out_id_w / (out_img_h * out_img_w * num_channels); out_img_idy = out_id_w % (out_img_h * out_img_w * num_channels) / (out_img_w * num_channels); out_img_idx = out_id_w % (out_img_w * num_channels) / num_channels; channel_id = tid % num_channels; } int in_img_idt = align_flag ? static_cast<int>(ratio_d * (out_img_idt + 0.5) - 0.5) : static_cast<int>(ratio_d * out_img_idt); in_img_idt = (in_img_idt > 0) ? in_img_idt : 0; int d_id = (in_img_idt < in_img_d - 1) ? 1 : 0; T src_d = ratio_d * (out_img_idt + 0.5) - 0.5; src_d = (src_d > 0) ? src_d : 0; T d1lambda = align_flag ? src_d - in_img_idt : ratio_d * out_img_idt - in_img_idt; T d2lambda = 1.f - d1lambda; int in_img_idy = align_flag ? static_cast<int>(ratio_h * (out_img_idy + 0.5) - 0.5) : static_cast<int>(ratio_h * out_img_idy); in_img_idy = (in_img_idy > 0) ? in_img_idy : 0; int h_id = (in_img_idy < in_img_h - 1) ? 1 : 0; T src_h = ratio_h * (out_img_idy + 0.5) - 0.5; src_h = (src_h > 0) ? src_h : 0; T h1lambda = align_flag ? src_h - in_img_idy : ratio_h * out_img_idy - in_img_idy; T h2lambda = 1.f - h1lambda; int in_img_idx = align_flag ? static_cast<int>(ratio_w * (out_img_idx + 0.5) - 0.5) : static_cast<int>(ratio_w * out_img_idx); in_img_idx = (in_img_idx > 0) ? in_img_idx : 0; int w_id = (in_img_idx < in_img_w - 1) ? 1 : 0; T src_w = ratio_w * (out_img_idx + 0.5) - 0.5; src_w = (src_w > 0) ? src_w : 0; T w1lambda = align_flag ? src_w - in_img_idx : ratio_w * out_img_idx - in_img_idx; T w2lambda = 1.f - w1lambda; if (data_layout == DataLayout::kNCHW) { int in_pos1_idx = out_id_h * input_w + channel_id * in_img_size + (in_img_idt * in_img_h + in_img_idy) * in_img_w + in_img_idx; T* in_pos1 = &in[in_pos1_idx]; int in_pos2_idx = in_pos1_idx + d_id * in_img_h * in_img_w; T* in_pos2 = &in[in_pos2_idx]; const T* out_pos = &out[out_id_h * output_w + out_id_w]; // trilinear interpolation grad platform::CudaAtomicAdd(&in_pos1[0], d2lambda * h2lambda * w2lambda * out_pos[0]); platform::CudaAtomicAdd(&in_pos1[w_id], d2lambda * h2lambda * w1lambda * out_pos[0]); platform::CudaAtomicAdd(&in_pos1[h_id * in_img_w], d2lambda * h1lambda * w2lambda * out_pos[0]); platform::CudaAtomicAdd(&in_pos1[h_id * in_img_w + w_id], d2lambda * h1lambda * w1lambda * out_pos[0]); platform::CudaAtomicAdd(&in_pos2[0], d1lambda * h2lambda * w2lambda * out_pos[0]); platform::CudaAtomicAdd(&in_pos2[w_id], d1lambda * h2lambda * w1lambda * out_pos[0]); platform::CudaAtomicAdd(&in_pos2[h_id * in_img_w], d1lambda * h1lambda * w2lambda * out_pos[0]); platform::CudaAtomicAdd(&in_pos2[h_id * in_img_w + w_id], d1lambda * h1lambda * w1lambda * out_pos[0]); } else { int in_pos1_idx = out_id_h * input_w + in_img_idt * in_img_h * in_img_w * num_channels + in_img_idy * in_img_w * num_channels + in_img_idx * num_channels + channel_id; T* in_pos1 = &in[in_pos1_idx]; int in_pos2_idx = in_pos1_idx + d_id * in_img_h * in_img_w * num_channels; T* in_pos2 = &in[in_pos2_idx]; const T* out_pos = &out[out_id_h * output_w + out_id_w]; // trilinear interpolation grad platform::CudaAtomicAdd(&in_pos1[0], d2lambda * h2lambda * w2lambda * out_pos[0]); platform::CudaAtomicAdd(&in_pos1[w_id * num_channels], d2lambda * h2lambda * w1lambda * out_pos[0]); platform::CudaAtomicAdd(&in_pos1[h_id * in_img_w * num_channels], d2lambda * h1lambda * w2lambda * out_pos[0]); platform::CudaAtomicAdd( &in_pos1[h_id * in_img_w * num_channels + w_id * num_channels], d2lambda * h1lambda * w1lambda * out_pos[0]); platform::CudaAtomicAdd(&in_pos2[0], d1lambda * h2lambda * w2lambda * out_pos[0]); platform::CudaAtomicAdd(&in_pos2[w_id * num_channels], d1lambda * h2lambda * w1lambda * out_pos[0]); platform::CudaAtomicAdd(&in_pos2[h_id * in_img_w * num_channels], d1lambda * h1lambda * w2lambda * out_pos[0]); platform::CudaAtomicAdd( &in_pos2[h_id * in_img_w * num_channels + w_id * num_channels], d1lambda * h1lambda * w1lambda * out_pos[0]); } } } template <typename T> __device__ __forceinline__ static T Kecubic_interp( const T x0, const T x1, const T x2, const T x3, T t) { T coeffs[4]; T a = -0.75; T x_1 = t; T x_2 = 1.0 - t; coeffs[0] = cubic_convolution2<T>(x_1 + 1.0, a); coeffs[1] = cubic_convolution1<T>(x_1, a); coeffs[2] = cubic_convolution1<T>(x_2, a); coeffs[3] = cubic_convolution2<T>(x_2 + 1.0, a); return x0 * coeffs[0] + x1 * coeffs[1] + x2 * coeffs[2] + x3 * coeffs[3]; } template <typename T> __global__ void KeBicubicInterpFw(const T* in, const size_t in_img_h, const size_t in_img_w, const size_t input_h, const size_t input_w, T* out, const size_t out_img_h, const size_t out_img_w, const size_t output_h, const size_t output_w, const size_t num_channels, const float ratio_h, const float ratio_w, const bool align_corners, const DataLayout data_layout) { int nthreads = output_h * output_w; int tid = blockIdx.x * blockDim.x + threadIdx.x; int stride = blockDim.x * gridDim.x; for (; tid < nthreads; tid += stride) { int out_id_h = tid / output_w; int out_id_w = tid % output_w; int in_img_size = input_w / num_channels; int out_img_size = output_w / num_channels; int channel_id, out_img_idy, out_img_idx; if (data_layout == DataLayout::kNCHW) { channel_id = out_id_w / out_img_size; out_img_idy = (out_id_w % out_img_size) / out_img_w; out_img_idx = tid % out_img_w; } else { out_img_idy = out_id_w / (out_img_w * num_channels); out_img_idx = out_id_w % (out_img_w * num_channels) / num_channels; channel_id = tid % num_channels; } T in_img_idy = align_corners ? static_cast<T>(ratio_h * out_img_idy) : static_cast<T>(ratio_h * (out_img_idy + 0.5) - 0.5); int input_y = floorf(in_img_idy); const T y_t = in_img_idy - input_y; T in_img_idx = align_corners ? static_cast<T>(ratio_w * out_img_idx) : static_cast<T>(ratio_w * (out_img_idx + 0.5) - 0.5); int input_x = floorf(in_img_idx); const T x_t = in_img_idx - input_x; T coefficients[4]; const T* in_pos_0; const T* in_pos_1; const T* in_pos_2; const T* in_pos_3; int access_x_0; if (data_layout == DataLayout::kNCHW) { for (int k = 0; k < 4; k++) { int access_y = max(min(input_y - 1 + k, static_cast<int>(in_img_h - 1)), 0); access_x_0 = max(min(input_x - 1, static_cast<int>(in_img_w - 1)), 0); int access_x_1 = max(min(input_x + 0, static_cast<int>(in_img_w - 1)), 0); int access_x_2 = max(min(input_x + 1, static_cast<int>(in_img_w - 1)), 0); int access_x_3 = max(min(input_x + 2, static_cast<int>(in_img_w - 1)), 0); in_pos_0 = &in[out_id_h * input_w + channel_id * in_img_size + access_y * in_img_w + access_x_0]; in_pos_1 = &in[out_id_h * input_w + channel_id * in_img_size + access_y * in_img_w + access_x_1]; in_pos_2 = &in[out_id_h * input_w + channel_id * in_img_size + access_y * in_img_w + access_x_2]; in_pos_3 = &in[out_id_h * input_w + channel_id * in_img_size + access_y * in_img_w + access_x_3]; coefficients[k] = Kecubic_interp<T>( in_pos_0[0], in_pos_1[0], in_pos_2[0], in_pos_3[0], x_t); } out[out_id_h * output_w + out_id_w] = Kecubic_interp<T>(coefficients[0], coefficients[1], coefficients[2], coefficients[3], y_t); } else { for (int k = 0; k < 4; k++) { int access_y = max(min(input_y - 1 + k, static_cast<int>((in_img_h - 1))), 0); int access_x_0 = max(min(input_x - 1, static_cast<int>((in_img_w - 1))), 0); int access_x_1 = max(min(input_x + 0, static_cast<int>((in_img_w - 1))), 0); int access_x_2 = max(min(input_x + 1, static_cast<int>((in_img_w - 1))), 0); int access_x_3 = max(min(input_x + 2, static_cast<int>((in_img_w - 1))), 0); const T* in_pos_0 = &in[out_id_h * input_w + access_y * in_img_w * num_channels + access_x_0 * num_channels + channel_id]; const T* in_pos_1 = &in[out_id_h * input_w + access_y * in_img_w * num_channels + access_x_1 * num_channels + channel_id]; const T* in_pos_2 = &in[out_id_h * input_w + access_y * in_img_w * num_channels + access_x_2 * num_channels + channel_id]; const T* in_pos_3 = &in[out_id_h * input_w + access_y * in_img_w * num_channels + access_x_3 * num_channels + channel_id]; coefficients[k] = Kecubic_interp( in_pos_0[0], in_pos_1[0], in_pos_2[0], in_pos_3[0], x_t); } out[out_id_h * output_w + out_id_w] = static_cast<T>(Kecubic_interp(coefficients[0], coefficients[1], coefficients[2], coefficients[3], y_t)); } } } template <typename T> __global__ void KeBicubicInterpBw(T* in, const size_t in_img_h, const size_t in_img_w, const size_t input_h, const size_t input_w, const T* out, const size_t out_img_h, const size_t out_img_w, const size_t output_h, const size_t output_w, const size_t num_channels, const float ratio_h, const float ratio_w, const bool align_corners, const DataLayout data_layout) { int nthreads = output_h * output_w; int tid = blockIdx.x * blockDim.x + threadIdx.x; int stride = blockDim.x * gridDim.x; for (; tid < nthreads; tid += stride) { int out_id_h = tid / output_w; int out_id_w = tid % output_w; int in_img_size = input_w / num_channels; int out_img_size = output_w / num_channels; int channel_id, out_img_idy, out_img_idx; if (data_layout == DataLayout::kNCHW) { channel_id = out_id_w / out_img_size; out_img_idy = (out_id_w % out_img_size) / out_img_w; out_img_idx = tid % out_img_w; } else { out_img_idy = out_id_w / (out_img_w * num_channels); out_img_idx = out_id_w % (out_img_w * num_channels) / num_channels; channel_id = tid % num_channels; } T in_img_idy = align_corners ? static_cast<T>(ratio_h * out_img_idy) : static_cast<T>(ratio_h * (out_img_idy + 0.5) - 0.5); int input_y = floorf(in_img_idy); const T y_t = in_img_idy - input_y; T in_img_idx = align_corners ? static_cast<T>(ratio_w * out_img_idx) : static_cast<T>(ratio_w * (out_img_idx + 0.5) - 0.5); int input_x = floorf(in_img_idx); const T x_t = in_img_idx - input_x; T x_coeffs[4]; T y_coeffs[4]; get_cubic_upsample_coefficients(x_coeffs, x_t); get_cubic_upsample_coefficients(y_coeffs, y_t); const T* out_pos = &out[out_id_h * output_w + out_id_w]; T* in_pos; for (int i = 0; i < 4; i++) { for (int j = 0; j < 4; j++) { int access_y = max(min(static_cast<int>(input_y - 1 + j), static_cast<int>(in_img_h - 1)), 0); int access_x = max(min(static_cast<int>(input_x - 1 + i), static_cast<int>(in_img_w - 1)), 0); if (data_layout == DataLayout::kNCHW) { in_pos = &in[out_id_h * input_w + channel_id * in_img_size + access_y * in_img_w + access_x]; } else { in_pos = &in[out_id_h * input_w + access_y * in_img_w * num_channels + access_x * num_channels + channel_id]; } platform::CudaAtomicAdd(&in_pos[0], (out_pos[0] * y_coeffs[j] * x_coeffs[i])); } } } } template <typename T> static void Interpolate1DCUDAFwd(const framework::ExecutionContext& ctx, const Tensor& input, Tensor* output) { auto* input_data = input.data<T>(); const std::string data_layout_str = ctx.Attr<std::string>("data_layout"); const DataLayout data_layout = framework::StringToDataLayout(data_layout_str); int n, c, in_d, in_h, in_w; ExtractNCDWH(input.dims(), data_layout, &n, &c, &in_d, &in_h, &in_w); auto interp_method = ctx.Attr<std::string>("interp_method"); bool align_corners = ctx.Attr<bool>("align_corners"); int align_mode = ctx.Attr<int>("align_mode"); int out_w = ctx.Attr<int>("out_w"); auto list_new_shape_tensor = ctx.MultiInput<framework::Tensor>("SizeTensor"); if (list_new_shape_tensor.size() > 0) { // have size tensor auto new_size = get_new_shape(list_new_shape_tensor); out_w = new_size[0]; } else { float scale; auto scale_tensor = ctx.Input<Tensor>("Scale"); if (scale_tensor != nullptr) { auto scale_data = get_new_data_from_tensor<float>(scale_tensor); scale = scale_data[0]; } else { scale = ctx.Attr<float>("scale"); } if (scale > 0) { out_w = static_cast<int>(in_w * scale); } auto out_size = ctx.Input<Tensor>("OutSize"); if (out_size != nullptr) { Tensor sizes; framework::TensorCopySync(*out_size, platform::CPUPlace(), &sizes); auto size_data = sizes.data<int>(); out_w = size_data[0]; } } PADDLE_ENFORCE_GT(out_w, 0, platform::errors::InvalidArgument( "out_w in Attr(out_shape) of Op(interpolate) " "should be greater than 0.")); framework::DDim dim_out; if (data_layout == DataLayout::kNCHW) { dim_out = {n, c, out_w}; } else { dim_out = {n, out_w, c}; } auto output_data = output->mutable_data<T>(dim_out, ctx.GetPlace()); if (in_w == out_w) { framework::TensorCopy(input, ctx.GetPlace(), output); return; } float ratio_w = 0.f; if (out_w > 1) { ratio_w = (align_corners) ? static_cast<float>(in_w - 1.0) / (out_w - 1.0) : static_cast<float>(in_w) / out_w; } int in_cw = c * in_w; int out_cw = c * out_w; int pixelNum = n * out_cw; platform::GpuLaunchConfig config = platform::GetGpuLaunchConfig1D(ctx.cuda_device_context(), pixelNum); if ("linear" == interp_method) { hipLaunchKernelGGL(( KeLinearInterpFw<T>), dim3(config.block_per_grid), dim3(config.thread_per_block), 0, ctx.cuda_device_context().stream(), input_data, in_w, in_cw, output_data, out_w, n, out_cw, c, ratio_w, align_corners, align_mode, data_layout); } } template <typename T> static void Interpolate2DCUDAFwd(const framework::ExecutionContext& ctx, const Tensor& input, Tensor* output) { auto* input_data = input.data<T>(); const std::string data_layout_str = ctx.Attr<std::string>("data_layout"); const DataLayout data_layout = framework::StringToDataLayout(data_layout_str); int n, c, in_d, in_h, in_w; ExtractNCDWH(input.dims(), data_layout, &n, &c, &in_d, &in_h, &in_w); auto interp_method = ctx.Attr<std::string>("interp_method"); bool align_corners = ctx.Attr<bool>("align_corners"); int align_mode = ctx.Attr<int>("align_mode"); int out_h = ctx.Attr<int>("out_h"); int out_w = ctx.Attr<int>("out_w"); auto list_new_shape_tensor = ctx.MultiInput<framework::Tensor>("SizeTensor"); if (list_new_shape_tensor.size() > 0) { // have size tensor auto new_size = get_new_shape(list_new_shape_tensor); out_h = new_size[0]; out_w = new_size[1]; } else { float scale; auto scale_tensor = ctx.Input<Tensor>("Scale"); if (scale_tensor != nullptr) { auto scale_data = get_new_data_from_tensor<float>(scale_tensor); scale = scale_data[0]; } else { scale = ctx.Attr<float>("scale"); } if (scale > 0) { out_h = static_cast<int>(in_h * scale); out_w = static_cast<int>(in_w * scale); } auto out_size = ctx.Input<Tensor>("OutSize"); if (out_size != nullptr) { Tensor sizes; framework::TensorCopySync(*out_size, platform::CPUPlace(), &sizes); auto size_data = sizes.data<int>(); out_h = size_data[0]; out_w = size_data[1]; } } PADDLE_ENFORCE_GT(out_h, 0, platform::errors::InvalidArgument( "out_h in Attr(out_shape) of Op(interpolate) " "should be greater than 0.")); PADDLE_ENFORCE_GT(out_w, 0, platform::errors::InvalidArgument( "out_w in Attr(out_shape) of Op(interpolate) " "should be greater than 0.")); framework::DDim dim_out; if (data_layout == DataLayout::kNCHW) { dim_out = {n, c, out_h, out_w}; } else { dim_out = {n, out_h, out_w, c}; } auto output_data = output->mutable_data<T>(dim_out, ctx.GetPlace()); if (in_h == out_h && in_w == out_w) { framework::TensorCopy(input, ctx.GetPlace(), output); return; } float ratio_h = 0.f; float ratio_w = 0.f; if (out_h > 1) { ratio_h = (align_corners) ? static_cast<float>(in_h - 1) / (out_h - 1) : static_cast<float>(in_h) / out_h; } if (out_w > 1) { ratio_w = (align_corners) ? static_cast<float>(in_w - 1) / (out_w - 1) : static_cast<float>(in_w) / out_w; } int in_hw = in_h * in_w; int out_hw = out_h * out_w; int in_chw = c * in_hw; int out_chw = c * out_hw; int pixelNum = n * out_chw; platform::GpuLaunchConfig config = platform::GetGpuLaunchConfig1D(ctx.cuda_device_context(), pixelNum); if ("nearest" == interp_method) { hipLaunchKernelGGL(( KeNearestNeighborInterpFw<T>) , dim3(config.block_per_grid), dim3(config.thread_per_block), 0, ctx.cuda_device_context().stream(), input_data, in_h, in_w, n, in_chw, output_data, out_h, out_w, n, out_chw, c, ratio_h, ratio_w, align_corners, data_layout); } else if ("bilinear" == interp_method) { hipLaunchKernelGGL(( KeBilinearInterpFw<T>), dim3(config.block_per_grid), dim3(config.thread_per_block), 0, ctx.cuda_device_context().stream(), input_data, in_h, in_w, n, in_chw, output_data, out_h, out_w, n, out_chw, c, ratio_h, ratio_w, align_corners, align_mode, data_layout); } else if ("bicubic" == interp_method) { hipLaunchKernelGGL(( KeBicubicInterpFw<T>) , dim3(config.block_per_grid), dim3(512), 0, ctx.cuda_device_context().stream(), input_data, in_h, in_w, n, in_chw, output_data, out_h, out_w, n, out_chw, c, ratio_h, ratio_w, align_corners, data_layout); } } template <typename T> static void Interpolate3DCUDAFwd(const framework::ExecutionContext& ctx, const Tensor& input, Tensor* output) { auto* input_data = input.data<T>(); const std::string data_layout_str = ctx.Attr<std::string>("data_layout"); const DataLayout data_layout = framework::StringToDataLayout(data_layout_str); int n, c, in_d, in_h, in_w; ExtractNCDWH(input.dims(), data_layout, &n, &c, &in_d, &in_h, &in_w); auto interp_method = ctx.Attr<std::string>("interp_method"); bool align_corners = ctx.Attr<bool>("align_corners"); int align_mode = ctx.Attr<int>("align_mode"); int out_d = ctx.Attr<int>("out_d"); int out_h = ctx.Attr<int>("out_h"); int out_w = ctx.Attr<int>("out_w"); auto list_new_shape_tensor = ctx.MultiInput<framework::Tensor>("SizeTensor"); if (list_new_shape_tensor.size() > 0) { // have size tensor auto new_size = get_new_shape(list_new_shape_tensor); out_d = new_size[0]; out_h = new_size[1]; out_w = new_size[2]; } else { float scale; auto scale_tensor = ctx.Input<Tensor>("Scale"); if (scale_tensor != nullptr) { auto scale_data = get_new_data_from_tensor<float>(scale_tensor); scale = scale_data[0]; } else { scale = ctx.Attr<float>("scale"); } if (scale > 0) { out_d = static_cast<int>(in_d * scale); out_h = static_cast<int>(in_h * scale); out_w = static_cast<int>(in_w * scale); } auto out_size = ctx.Input<Tensor>("OutSize"); if (out_size != nullptr) { Tensor sizes; framework::TensorCopySync(*out_size, platform::CPUPlace(), &sizes); auto size_data = sizes.data<int>(); out_d = size_data[0]; out_h = size_data[1]; out_w = size_data[2]; } } PADDLE_ENFORCE_GT(out_d, 0, platform::errors::InvalidArgument( "out_d in Attr(out_shape) of Op(interpolate) " "should be greater than 0.")); PADDLE_ENFORCE_GT(out_h, 0, platform::errors::InvalidArgument( "out_h in Attr(out_shape) of Op(interpolate) " "should be greater than 0.")); PADDLE_ENFORCE_GT(out_w, 0, platform::errors::InvalidArgument( "out_w in Attr(out_shape) of Op(interpolate) " "should be greater than 0.")); framework::DDim dim_out; if (data_layout == DataLayout::kNCHW) { dim_out = {n, c, out_d, out_h, out_w}; } else { dim_out = {n, out_d, out_h, out_w, c}; } auto output_data = output->mutable_data<T>(dim_out, ctx.GetPlace()); if (in_d == out_d && in_h == out_h && in_w == out_w) { framework::TensorCopy(input, ctx.GetPlace(), output); return; } float ratio_d = 0.f; float ratio_h = 0.f; float ratio_w = 0.f; if (out_d > 1) { ratio_d = (align_corners) ? static_cast<float>(in_d - 1) / (out_d - 1) : static_cast<float>(in_d) / out_d; } if (out_h > 1) { ratio_h = (align_corners) ? static_cast<float>(in_h - 1) / (out_h - 1) : static_cast<float>(in_h) / out_h; } if (out_w > 1) { ratio_w = (align_corners) ? static_cast<float>(in_w - 1) / (out_w - 1) : static_cast<float>(in_w) / out_w; } int in_dhw = in_d * in_h * in_w; int out_dhw = out_d * out_h * out_w; int in_cdhw = c * in_dhw; int out_cdhw = c * out_dhw; int pixelNum = n * out_cdhw; platform::GpuLaunchConfig config = platform::GetGpuLaunchConfig1D(ctx.cuda_device_context(), pixelNum); if ("trilinear" == interp_method) { hipLaunchKernelGGL(( KeTrilinearInterpFw<T>) , dim3(config.block_per_grid), dim3(config.thread_per_block), 0, ctx.cuda_device_context().stream(), input_data, in_d, in_h, in_w, n, in_cdhw, output_data, out_d, out_h, out_w, n, out_cdhw, c, ratio_d, ratio_h, ratio_w, align_corners, align_mode, data_layout); } } template <typename T> static void Interpolate1DCUDABwd(const framework::ExecutionContext& ctx, Tensor* input_grad, const Tensor output_grad) { auto* input = ctx.Input<Tensor>("X"); const std::string data_layout_str = ctx.Attr<std::string>("data_layout"); const DataLayout data_layout = framework::StringToDataLayout(data_layout_str); int n, c, in_d, in_h, in_w; ExtractNCDWH(input->dims(), data_layout, &n, &c, &in_d, &in_h, &in_w); auto interp_method = ctx.Attr<std::string>("interp_method"); bool align_corners = ctx.Attr<bool>("align_corners"); int align_mode = ctx.Attr<int>("align_mode"); int out_w = ctx.Attr<int>("out_w"); float scale; auto scale_tensor = ctx.Input<Tensor>("Scale"); if (scale_tensor != nullptr) { auto scale_data = get_new_data_from_tensor<float>(scale_tensor); scale = scale_data[0]; } else { scale = ctx.Attr<float>("scale"); } if (scale > 0) { out_w = static_cast<int>(in_w * scale); } auto out_size = ctx.Input<Tensor>("OutSize"); if (out_size != nullptr) { Tensor sizes; framework::TensorCopySync(*out_size, platform::CPUPlace(), &sizes); auto size_data = sizes.data<int>(); out_w = size_data[0]; } auto list_new_size_tensor = ctx.MultiInput<framework::Tensor>("SizeTensor"); if (list_new_size_tensor.size() > 0) { // have size tensor auto new_size = get_new_shape(list_new_size_tensor); out_w = new_size[0]; } auto* output_grad_data = output_grad.data<T>(); framework::DDim dim_grad; if (data_layout == DataLayout::kNCHW) { dim_grad = {n, c, in_w}; } else { dim_grad = {n, in_w, c}; } input_grad->mutable_data<T>(dim_grad, ctx.GetPlace()); auto* input_grad_data = input_grad->mutable_data<T>(dim_grad, ctx.GetPlace()); auto& device_ctx = ctx.template device_context<phi::GPUContext>(); phi::funcs::SetConstant<phi::GPUContext, T> zero; zero(device_ctx, input_grad, static_cast<T>(0.0)); if (in_w == out_w) { framework::TensorCopy(output_grad, ctx.GetPlace(), input_grad); return; } float ratio_w = 0.f; if (out_w > 1) { ratio_w = (align_corners) ? static_cast<float>(in_w - 1) / (out_w - 1) : static_cast<float>(in_w) / out_w; } int in_cw = c * in_w; int out_cw = c * out_w; int pixelNum = n * out_cw; platform::GpuLaunchConfig config = platform::GetGpuLaunchConfig1D(ctx.cuda_device_context(), pixelNum); if ("linear" == interp_method) { hipLaunchKernelGGL(( KeLinearInterpBw<T>) , dim3(config.block_per_grid), dim3(config.thread_per_block), 0, ctx.cuda_device_context().stream(), input_grad_data, in_w, in_cw, output_grad_data, out_w, n, out_cw, c, ratio_w, align_corners, align_mode, data_layout); } } template <typename T> static void Interpolate2DCUDABwd(const framework::ExecutionContext& ctx, Tensor* input_grad, const Tensor output_grad) { auto* input = ctx.Input<Tensor>("X"); const std::string data_layout_str = ctx.Attr<std::string>("data_layout"); const DataLayout data_layout = framework::StringToDataLayout(data_layout_str); int n, c, in_d, in_h, in_w; ExtractNCDWH(input->dims(), data_layout, &n, &c, &in_d, &in_h, &in_w); auto interp_method = ctx.Attr<std::string>("interp_method"); bool align_corners = ctx.Attr<bool>("align_corners"); int align_mode = ctx.Attr<int>("align_mode"); int out_h = ctx.Attr<int>("out_h"); int out_w = ctx.Attr<int>("out_w"); float scale; auto scale_tensor = ctx.Input<Tensor>("Scale"); if (scale_tensor != nullptr) { auto scale_data = get_new_data_from_tensor<float>(scale_tensor); scale = scale_data[0]; } else { scale = ctx.Attr<float>("scale"); } if (scale > 0) { out_h = static_cast<int>(in_h * scale); out_w = static_cast<int>(in_w * scale); } auto out_size = ctx.Input<Tensor>("OutSize"); if (out_size != nullptr) { Tensor sizes; framework::TensorCopySync(*out_size, platform::CPUPlace(), &sizes); auto size_data = sizes.data<int>(); out_h = size_data[0]; out_w = size_data[1]; } auto list_new_size_tensor = ctx.MultiInput<framework::Tensor>("SizeTensor"); if (list_new_size_tensor.size() > 0) { // have size tensor auto new_size = get_new_shape(list_new_size_tensor); out_h = new_size[0]; out_w = new_size[1]; } auto* output_grad_data = output_grad.data<T>(); framework::DDim dim_grad; if (data_layout == DataLayout::kNCHW) { dim_grad = {n, c, in_h, in_w}; } else { dim_grad = {n, in_h, in_w, c}; } input_grad->mutable_data<T>(dim_grad, ctx.GetPlace()); auto* input_grad_data = input_grad->mutable_data<T>(dim_grad, ctx.GetPlace()); auto& device_ctx = ctx.template device_context<phi::GPUContext>(); phi::funcs::SetConstant<phi::GPUContext, T> zero; zero(device_ctx, input_grad, static_cast<T>(0.0)); if (in_h == out_h && in_w == out_w) { framework::TensorCopy(output_grad, ctx.GetPlace(), input_grad); return; } float ratio_h = 0.f; float ratio_w = 0.f; if (out_h > 1) { ratio_h = (align_corners) ? static_cast<float>(in_h - 1) / (out_h - 1) : static_cast<float>(in_h) / out_h; } if (out_w > 1) { ratio_w = (align_corners) ? static_cast<float>(in_w - 1) / (out_w - 1) : static_cast<float>(in_w) / out_w; } int in_hw = in_h * in_w; int out_hw = out_h * out_w; int in_chw = c * in_hw; int out_chw = c * out_hw; int pixelNum = n * out_chw; platform::GpuLaunchConfig config = platform::GetGpuLaunchConfig1D(ctx.cuda_device_context(), pixelNum); if ("nearest" == interp_method) { hipLaunchKernelGGL(( KeNearestNeighborInterpBw<T>) , dim3(config.block_per_grid), dim3(config.thread_per_block), 0, ctx.cuda_device_context().stream(), input_grad_data, in_h, in_w, n, in_chw, output_grad_data, out_h, out_w, n, out_chw, c, ratio_h, ratio_w, align_corners, data_layout); } else if ("bilinear" == interp_method) { hipLaunchKernelGGL(( KeBilinearInterpBw<T>) , dim3(config.block_per_grid), dim3(config.thread_per_block), 0, ctx.cuda_device_context().stream(), input_grad_data, in_h, in_w, n, in_chw, output_grad_data, out_h, out_w, n, out_chw, c, ratio_h, ratio_w, align_corners, align_mode, data_layout); } else if ("bicubic" == interp_method) { hipLaunchKernelGGL(( KeBicubicInterpBw<T>) , dim3(config.block_per_grid), dim3(512), 0, ctx.cuda_device_context().stream(), input_grad_data, in_h, in_w, n, in_chw, output_grad_data, out_h, out_w, n, out_chw, c, ratio_h, ratio_w, align_corners, data_layout); } } template <typename T> static void Interpolate3DCUDABwd(const framework::ExecutionContext& ctx, Tensor* input_grad, const Tensor& output_grad) { auto* input = ctx.Input<Tensor>("X"); const std::string data_layout_str = ctx.Attr<std::string>("data_layout"); const DataLayout data_layout = framework::StringToDataLayout(data_layout_str); int n, c, in_d, in_h, in_w; ExtractNCDWH(input->dims(), data_layout, &n, &c, &in_d, &in_h, &in_w); auto interp_method = ctx.Attr<std::string>("interp_method"); bool align_corners = ctx.Attr<bool>("align_corners"); int align_mode = ctx.Attr<int>("align_mode"); int out_d = ctx.Attr<int>("out_d"); int out_h = ctx.Attr<int>("out_h"); int out_w = ctx.Attr<int>("out_w"); float scale; auto scale_tensor = ctx.Input<Tensor>("Scale"); if (scale_tensor != nullptr) { auto scale_data = get_new_data_from_tensor<float>(scale_tensor); scale = scale_data[0]; } else { scale = ctx.Attr<float>("scale"); } if (scale > 0) { out_d = static_cast<int>(in_d * scale); out_h = static_cast<int>(in_h * scale); out_w = static_cast<int>(in_w * scale); } auto out_size = ctx.Input<Tensor>("OutSize"); if (out_size != nullptr) { Tensor sizes; framework::TensorCopySync(*out_size, platform::CPUPlace(), &sizes); auto size_data = sizes.data<int>(); out_d = size_data[0]; out_h = size_data[1]; out_w = size_data[2]; } auto list_new_size_tensor = ctx.MultiInput<framework::Tensor>("SizeTensor"); if (list_new_size_tensor.size() > 0) { // have size tensor auto new_size = get_new_shape(list_new_size_tensor); out_d = new_size[0]; out_h = new_size[1]; out_w = new_size[2]; } auto* output_grad_data = output_grad.data<T>(); framework::DDim dim_grad; if (data_layout == DataLayout::kNCHW) { dim_grad = {n, c, in_d, in_h, in_w}; } else { dim_grad = {n, in_d, in_h, in_w, c}; } auto* input_grad_data = input_grad->mutable_data<T>(dim_grad, ctx.GetPlace()); auto& device_ctx = ctx.template device_context<phi::GPUContext>(); phi::funcs::SetConstant<phi::GPUContext, T> zero; zero(device_ctx, input_grad, static_cast<T>(0.0)); if (in_d == out_d && in_h == out_h && in_w == out_w) { framework::TensorCopy(output_grad, ctx.GetPlace(), input_grad); return; } float ratio_d = 0.f; float ratio_h = 0.f; float ratio_w = 0.f; if (out_d > 1) { ratio_d = (align_corners) ? static_cast<float>(in_d - 1) / (out_d - 1) : static_cast<float>(in_d) / out_d; } if (out_h > 1) { ratio_h = (align_corners) ? static_cast<float>(in_h - 1) / (out_h - 1) : static_cast<float>(in_h) / out_h; } if (out_w > 1) { ratio_w = (align_corners) ? static_cast<float>(in_w - 1) / (out_w - 1) : static_cast<float>(in_w) / out_w; } int in_dhw = in_d * in_h * in_w; int out_dhw = out_d * out_h * out_w; int in_cdhw = c * in_dhw; int out_cdhw = c * out_dhw; int pixelNum = n * out_cdhw; platform::GpuLaunchConfig config = platform::GetGpuLaunchConfig1D(ctx.cuda_device_context(), pixelNum); if ("trilinear" == interp_method) { hipLaunchKernelGGL(( KeTrilinearInterpBw<T>) , dim3(config.block_per_grid), dim3(config.thread_per_block), 0, ctx.cuda_device_context().stream(), input_grad_data, in_d, in_h, in_w, n, in_cdhw, output_grad_data, out_d, out_h, out_w, n, out_cdhw, c, ratio_d, ratio_h, ratio_w, align_corners, align_mode, data_layout); } } template <typename T> class InterpolateOpCUDAKernel : public framework::OpKernel<T> { public: void Compute(const framework::ExecutionContext& ctx) const override { PADDLE_ENFORCE_EQ( platform::is_gpu_place(ctx.GetPlace()), true, platform::errors::NotFound("This kernel only runs on GPU device.")); auto* input = ctx.Input<Tensor>("X"); auto* output = ctx.Output<Tensor>("Out"); auto input_dims = input->dims(); if (input_dims.size() == 3) { // 1D interpolation Interpolate1DCUDAFwd<T>(ctx, *input, output); } else if (input_dims.size() == 4) { // 2D interpolation Interpolate2DCUDAFwd<T>(ctx, *input, output); } else if (input_dims.size() == 5) { // 3D interpolation Interpolate3DCUDAFwd<T>(ctx, *input, output); } } }; template <typename T> class InterpolateGradOpCUDAKernel : public framework::OpKernel<T> { public: void Compute(const framework::ExecutionContext& ctx) const override { PADDLE_ENFORCE_EQ( platform::is_gpu_place(ctx.GetPlace()), true, platform::errors::NotFound("This kernel only runs on GPU device.")); auto* input_grad = ctx.Output<Tensor>(framework::GradVarName("X")); auto* output_grad = ctx.Input<Tensor>(framework::GradVarName("Out")); auto output_grad_dims = output_grad->dims(); if (output_grad_dims.size() == 3) { // 1D interpolation Interpolate1DCUDABwd<T>(ctx, input_grad, *output_grad); } else if (output_grad_dims.size() == 4) { // 2D interpolation Interpolate2DCUDABwd<T>(ctx, input_grad, *output_grad); } else if (output_grad_dims.size() == 5) { // 3D interpolation Interpolate3DCUDABwd<T>(ctx, input_grad, *output_grad); } } }; } // namespace operators } // namespace paddle namespace ops = paddle::operators; REGISTER_OP_CUDA_KERNEL(bilinear_interp, ops::InterpolateOpCUDAKernel<float>, ops::InterpolateOpCUDAKernel<double>, ops::InterpolateOpCUDAKernel<int>); REGISTER_OP_CUDA_KERNEL(bilinear_interp_grad, ops::InterpolateGradOpCUDAKernel<float>, ops::InterpolateGradOpCUDAKernel<double>); REGISTER_OP_CUDA_KERNEL(nearest_interp, ops::InterpolateOpCUDAKernel<float>, ops::InterpolateOpCUDAKernel<double>, ops::InterpolateOpCUDAKernel<int>); REGISTER_OP_CUDA_KERNEL(nearest_interp_grad, ops::InterpolateGradOpCUDAKernel<float>, ops::InterpolateGradOpCUDAKernel<double>); REGISTER_OP_CUDA_KERNEL(trilinear_interp, ops::InterpolateOpCUDAKernel<float>, ops::InterpolateOpCUDAKernel<double>, ops::InterpolateOpCUDAKernel<int>); REGISTER_OP_CUDA_KERNEL(trilinear_interp_grad, ops::InterpolateGradOpCUDAKernel<float>, ops::InterpolateGradOpCUDAKernel<double>); REGISTER_OP_CUDA_KERNEL(linear_interp, ops::InterpolateOpCUDAKernel<float>, ops::InterpolateOpCUDAKernel<double>, ops::InterpolateOpCUDAKernel<int>); REGISTER_OP_CUDA_KERNEL(linear_interp_grad, ops::InterpolateGradOpCUDAKernel<float>, ops::InterpolateGradOpCUDAKernel<double>); REGISTER_OP_CUDA_KERNEL(bicubic_interp, ops::InterpolateOpCUDAKernel<float>, ops::InterpolateOpCUDAKernel<double>, ops::InterpolateOpCUDAKernel<int>); REGISTER_OP_CUDA_KERNEL(bicubic_interp_grad, ops::InterpolateGradOpCUDAKernel<float>, ops::InterpolateGradOpCUDAKernel<double>);
0eee95c625dabc5fc285c740cd6b21844d27dd41.cu
/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include <algorithm> #include <string> #include "paddle/fluid/operators/interpolate_op.h" #include "paddle/fluid/platform/device/gpu/gpu_launch_config.h" #include "paddle/fluid/platform/device/gpu/gpu_primitives.h" namespace paddle { namespace operators { using framework::Tensor; using DataLayout = framework::DataLayout; template <typename T> __global__ void KeNearestNeighborInterpFw(const T* in, const size_t in_img_h, const size_t in_img_w, const size_t input_h, const size_t input_w, T* out, const size_t out_img_h, const size_t out_img_w, const size_t output_h, const size_t output_w, const size_t num_channels, const float ratio_h, const float ratio_w, const bool align_corners, const DataLayout data_layout) { int nthreads = output_h * output_w; int tid = blockIdx.x * blockDim.x + threadIdx.x; int stride = blockDim.x * gridDim.x; for (; tid < nthreads; tid += stride) { int out_id_h = tid / output_w; int out_id_w = tid % output_w; int in_img_size = input_w / num_channels; int out_img_size = output_w / num_channels; int channel_id, out_img_idy, out_img_idx; if (data_layout == DataLayout::kNCHW) { channel_id = out_id_w / out_img_size; out_img_idy = (out_id_w % out_img_size) / out_img_w; out_img_idx = tid % out_img_w; } else { out_img_idy = out_id_w / (out_img_w * num_channels); out_img_idx = out_id_w % (out_img_w * num_channels) / num_channels; channel_id = tid % num_channels; } int in_img_idy = (align_corners) ? static_cast<int>(ratio_h * out_img_idy + 0.5) : static_cast<int>(ratio_h * out_img_idy); int in_img_idx = (align_corners) ? static_cast<int>(ratio_w * out_img_idx + 0.5) : static_cast<int>(ratio_w * out_img_idx); if (data_layout == DataLayout::kNCHW) { out[tid] = in[out_id_h * input_w + channel_id * in_img_size + in_img_idy * in_img_w + in_img_idx]; } else { out[tid] = in[out_id_h * input_w + in_img_idy * in_img_w * num_channels + in_img_idx * num_channels + channel_id]; } } } template <typename T> __global__ void KeNearestNeighborInterpBw(T* in, const size_t in_img_h, const size_t in_img_w, const size_t input_h, const size_t input_w, const T* out, const size_t out_img_h, const size_t out_img_w, const size_t output_h, const size_t output_w, const size_t num_channels, const float ratio_h, const float ratio_w, const bool align_corners, const DataLayout data_layout) { int nthreads = output_h * output_w; int tid = blockIdx.x * blockDim.x + threadIdx.x; int stride = blockDim.x * gridDim.x; for (; tid < nthreads; tid += stride) { int out_id_h = tid / output_w; int out_id_w = tid % output_w; int in_img_size = input_w / num_channels; int out_img_size = output_w / num_channels; int channel_id, out_img_idy, out_img_idx; if (data_layout == DataLayout::kNCHW) { channel_id = out_id_w / out_img_size; out_img_idy = (out_id_w % out_img_size) / out_img_w; out_img_idx = tid % out_img_w; } else { out_img_idy = out_id_w / (out_img_w * num_channels); out_img_idx = out_id_w % (out_img_w * num_channels) / num_channels; channel_id = tid % num_channels; } int in_img_idy = (align_corners) ? static_cast<int>(ratio_h * out_img_idy + 0.5) : static_cast<int>(ratio_h * out_img_idy); int in_img_idx = (align_corners) ? static_cast<int>(ratio_w * out_img_idx + 0.5) : static_cast<int>(ratio_w * out_img_idx); T* in_pos; if (data_layout == DataLayout::kNCHW) { in_pos = &in[out_id_h * input_w + channel_id * in_img_size + in_img_idy * in_img_w + in_img_idx]; } else { in_pos = &in[out_id_h * input_w + in_img_idy * in_img_w * num_channels + in_img_idx * num_channels + channel_id]; } const T out_pos = out[out_id_h * output_w + out_id_w]; platform::CudaAtomicAdd(in_pos, out_pos); } } template <typename T> __global__ void KeLinearInterpFw(const T* in, const size_t in_img_w, const size_t input_w, T* out, const size_t out_img_w, const size_t output_h, const size_t output_w, const size_t num_channels, const float ratio_w, const bool align_corners, const int align_mode, const DataLayout data_layout) { int nthreads = output_h * output_w; int tid = blockIdx.x * blockDim.x + threadIdx.x; int stride = blockDim.x * gridDim.x; bool align_flag = (align_mode == 0 && !align_corners); for (; tid < nthreads; tid += stride) { int out_id_h = tid / output_w; int out_id_w = tid % output_w; int in_img_size = input_w / num_channels; int out_img_size = output_w / num_channels; int channel_id, out_img_idy, out_img_idx; if (data_layout == DataLayout::kNCHW) { channel_id = out_id_w / out_img_size; out_img_idx = tid % out_img_w; } else { out_img_idx = out_id_w % (out_img_w * num_channels) / num_channels; channel_id = tid % num_channels; } int in_img_idx = align_flag ? static_cast<int>(ratio_w * (out_img_idx + 0.5) - 0.5) : static_cast<int>(ratio_w * out_img_idx); in_img_idx = (in_img_idx > 0) ? in_img_idx : 0; // w int w_id = (in_img_idx < in_img_w - 1) ? 1 : 0; // w_id T src_w = ratio_w * (out_img_idx + 0.5) - 0.5; src_w = (src_w > 0) ? src_w : 0; T w1lambda = align_flag ? src_w - in_img_idx : ratio_w * out_img_idx - in_img_idx; T w2lambda = 1.f - w1lambda; if (data_layout == DataLayout::kNCHW) { const T* in_pos = &in[out_id_h * out_id_w + channel_id * in_img_size + in_img_idx]; // linear interpolation out[out_id_h * output_w + out_id_w] = w2lambda * in_pos[0] + w1lambda * in_pos[w_id]; } else { const T* in_pos = &in[out_id_h * input_w + in_img_idx * num_channels + channel_id]; // linear interpolation out[out_id_h * output_w + out_id_w] = w2lambda * in_pos[0] + w1lambda * in_pos[w_id * num_channels]; } } } template <typename T> __global__ void KeLinearInterpBw(T* in, const size_t in_img_w, const size_t input_w, const T* out, const size_t out_img_w, const size_t output_h, const size_t output_w, const size_t num_channels, const T ratio_w, const bool align_corners, const int align_mode, const DataLayout data_layout) { int nthreads = output_h * output_w; int tid = blockIdx.x * blockDim.x + threadIdx.x; int stride = blockDim.x * gridDim.x; bool align_flag = (align_mode == 0 && !align_corners); for (; tid < nthreads; tid += stride) { int out_id_h = tid / output_w; int out_id_w = tid % output_w; int in_img_size = input_w / num_channels; int out_img_size = output_w / num_channels; int channel_id, out_img_idx; if (data_layout == DataLayout::kNCHW) { channel_id = out_id_w / out_img_size; out_img_idx = tid % out_img_w; } else { out_img_idx = out_id_w % (out_img_w * num_channels) / num_channels; channel_id = tid % num_channels; } int in_img_idx = align_flag ? ratio_w * (out_img_idx + 0.5) - 0.5 : ratio_w * out_img_idx; in_img_idx = (in_img_idx > 0) ? in_img_idx : 0; // w int w_id = (in_img_idx < in_img_w - 1) ? 1 : 0; // w_id T src_w = ratio_w * (out_img_idx + 0.5) - 0.5; src_w = (src_w > 0) ? src_w : 0; T w1lambda = align_flag ? src_w - in_img_idx : ratio_w * out_img_idx - in_img_idx; T w2lambda = 1.f - w1lambda; T* in_pos; if (data_layout == DataLayout::kNCHW) { in_pos = &in[out_id_h * input_w + channel_id * in_img_size + in_img_idx]; } else { in_pos = &in[out_id_h * input_w + in_img_idx * num_channels + channel_id]; } const T* out_pos = &out[out_id_w]; if (data_layout == DataLayout::kNCHW) { platform::CudaAtomicAdd(&in_pos[0], w2lambda * out_pos[0]); platform::CudaAtomicAdd(&in_pos[w_id], w1lambda * out_pos[0]); } else { platform::CudaAtomicAdd(&in_pos[0], w2lambda * out_pos[0]); platform::CudaAtomicAdd(&in_pos[w_id * num_channels], w1lambda * out_pos[0]); } } } template <typename T> __global__ void KeBilinearInterpFw(const T* in, const size_t in_img_h, const size_t in_img_w, const size_t input_h, const size_t input_w, T* out, const size_t out_img_h, const size_t out_img_w, const size_t output_h, const size_t output_w, const size_t num_channels, const float ratio_h, const float ratio_w, const bool align_corners, const int align_mode, const DataLayout data_layout) { int nthreads = output_h * output_w; int tid = blockIdx.x * blockDim.x + threadIdx.x; int stride = blockDim.x * gridDim.x; bool align_flag = (align_mode == 0 && !align_corners); for (; tid < nthreads; tid += stride) { int out_id_h = tid / output_w; int out_id_w = tid % output_w; int in_img_size = input_w / num_channels; int out_img_size = output_w / num_channels; int channel_id, out_img_idy, out_img_idx; if (data_layout == DataLayout::kNCHW) { channel_id = out_id_w / out_img_size; out_img_idy = (out_id_w % out_img_size) / out_img_w; out_img_idx = tid % out_img_w; } else { out_img_idy = out_id_w / (out_img_w * num_channels); out_img_idx = out_id_w % (out_img_w * num_channels) / num_channels; channel_id = tid % num_channels; } int in_img_idy = align_flag ? static_cast<int>(ratio_h * (out_img_idy + 0.5) - 0.5) : static_cast<int>(ratio_h * out_img_idy); in_img_idy = (in_img_idy > 0) ? in_img_idy : 0; int h_id = (in_img_idy < in_img_h - 1) ? 1 : 0; T src_h = ratio_h * (out_img_idy + 0.5) - 0.5; src_h = (src_h > 0) ? src_h : 0; T h1lambda = align_flag ? src_h - in_img_idy : ratio_h * out_img_idy - in_img_idy; T h2lambda = 1.f - h1lambda; int in_img_idx = align_flag ? static_cast<int>(ratio_w * (out_img_idx + 0.5) - 0.5) : static_cast<int>(ratio_w * out_img_idx); in_img_idx = (in_img_idx > 0) ? in_img_idx : 0; int w_id = (in_img_idx < in_img_w - 1) ? 1 : 0; T src_w = ratio_w * (out_img_idx + 0.5) - 0.5; src_w = (src_w > 0) ? src_w : 0; T w1lambda = align_flag ? src_w - in_img_idx : ratio_w * out_img_idx - in_img_idx; T w2lambda = 1.f - w1lambda; if (data_layout == DataLayout::kNCHW) { const T* in_pos = &in[out_id_h * input_w + channel_id * in_img_size + in_img_idy * in_img_w + in_img_idx]; // bilinear interpolation out[out_id_h * output_w + out_id_w] = h2lambda * (w2lambda * in_pos[0] + w1lambda * in_pos[w_id]) + h1lambda * (w2lambda * in_pos[h_id * in_img_w] + w1lambda * in_pos[h_id * in_img_w + w_id]); } else { const T* in_pos = &in[out_id_h * input_w + in_img_idy * in_img_w * num_channels + in_img_idx * num_channels + channel_id]; // bilinear interpolation out[out_id_h * output_w + out_id_w] = h2lambda * (w2lambda * in_pos[0] + w1lambda * in_pos[w_id * num_channels]) + h1lambda * (w2lambda * in_pos[h_id * in_img_w * num_channels] + w1lambda * in_pos[h_id * in_img_w * num_channels + w_id * num_channels]); } } } template <typename T> __global__ void KeBilinearInterpBw(T* in, const size_t in_img_h, const size_t in_img_w, const size_t input_h, const size_t input_w, const T* out, const size_t out_img_h, const size_t out_img_w, const size_t output_h, const size_t output_w, const size_t num_channels, const T ratio_h, const T ratio_w, const bool align_corners, const int align_mode, const DataLayout data_layout) { int nthreads = output_h * output_w; int tid = blockIdx.x * blockDim.x + threadIdx.x; int stride = blockDim.x * gridDim.x; bool align_flag = (align_mode == 0 && !align_corners); for (; tid < nthreads; tid += stride) { int out_id_h = tid / output_w; int out_id_w = tid % output_w; int in_img_size = input_w / num_channels; int out_img_size = output_w / num_channels; int channel_id, out_img_idy, out_img_idx; if (data_layout == DataLayout::kNCHW) { channel_id = out_id_w / out_img_size; out_img_idy = (out_id_w % out_img_size) / out_img_w; out_img_idx = tid % out_img_w; } else { out_img_idy = out_id_w / (out_img_w * num_channels); out_img_idx = out_id_w % (out_img_w * num_channels) / num_channels; channel_id = tid % num_channels; } int in_img_idy = align_flag ? ratio_h * (out_img_idy + 0.5) - 0.5 : ratio_h * out_img_idy; in_img_idy = (in_img_idy > 0) ? in_img_idy : 0; int h_id = (in_img_idy < in_img_h - 1) ? 1 : 0; T src_h = ratio_h * (out_img_idy + 0.5) - 0.5; src_h = (src_h > 0) ? src_h : 0; T h1lambda = align_flag ? src_h - in_img_idy : ratio_h * out_img_idy - in_img_idy; T h2lambda = 1.f - h1lambda; int in_img_idx = align_flag ? ratio_w * (out_img_idx + 0.5) - 0.5 : ratio_w * out_img_idx; in_img_idx = (in_img_idx > 0) ? in_img_idx : 0; int w_id = (in_img_idx < in_img_w - 1) ? 1 : 0; T src_w = ratio_w * (out_img_idx + 0.5) - 0.5; src_w = (src_w > 0) ? src_w : 0; T w1lambda = align_flag ? src_w - in_img_idx : ratio_w * out_img_idx - in_img_idx; T w2lambda = 1.f - w1lambda; T* in_pos; if (data_layout == DataLayout::kNCHW) { in_pos = &in[out_id_h * input_w + channel_id * in_img_size + in_img_idy * in_img_w + in_img_idx]; } else { in_pos = &in[out_id_h * input_w + in_img_idy * in_img_w * num_channels + in_img_idx * num_channels + channel_id]; } const T* out_pos = &out[out_id_h * output_w + out_id_w]; if (data_layout == DataLayout::kNCHW) { platform::CudaAtomicAdd(&in_pos[0], h2lambda * w2lambda * out_pos[0]); platform::CudaAtomicAdd(&in_pos[w_id], h2lambda * w1lambda * out_pos[0]); platform::CudaAtomicAdd(&in_pos[h_id * in_img_w], h1lambda * w2lambda * out_pos[0]); platform::CudaAtomicAdd(&in_pos[h_id * in_img_w + w_id], h1lambda * w1lambda * out_pos[0]); } else { platform::CudaAtomicAdd(&in_pos[0], h2lambda * w2lambda * out_pos[0]); platform::CudaAtomicAdd(&in_pos[w_id * num_channels], h2lambda * w1lambda * out_pos[0]); platform::CudaAtomicAdd(&in_pos[h_id * in_img_w * num_channels], h1lambda * w2lambda * out_pos[0]); platform::CudaAtomicAdd( &in_pos[h_id * in_img_w * num_channels + w_id * num_channels], h1lambda * w1lambda * out_pos[0]); } } } template <typename T> __global__ void KeTrilinearInterpFw(const T* in, const size_t in_img_d, const size_t in_img_h, const size_t in_img_w, const size_t input_h, const size_t input_w, T* out, const size_t out_img_d, const size_t out_img_h, const size_t out_img_w, const size_t output_h, const size_t output_w, const size_t num_channels, const float ratio_d, const float ratio_h, const float ratio_w, const bool align_corners, const int align_mode, const DataLayout data_layout) { int nthreads = output_h * output_w; int tid = blockIdx.x * blockDim.x + threadIdx.x; int stride = blockDim.x * gridDim.x; bool align_flag = (align_mode == 0 && !align_corners); for (; tid < nthreads; tid += stride) { int out_id_h = tid / output_w; int out_id_w = tid % output_w; int in_img_size = input_w / num_channels; int out_img_size = output_w / num_channels; int channel_id, out_img_idt, out_img_idy, out_img_idx; if (data_layout == DataLayout::kNCHW) { channel_id = out_id_w / out_img_size; out_img_idt = (out_id_w % out_img_size) / out_img_h / out_img_w; out_img_idy = ((out_id_w % out_img_size) / out_img_w) % out_img_h; out_img_idx = tid % out_img_w; } else { out_img_idt = out_id_w / (out_img_h * out_img_w * num_channels); out_img_idy = out_id_w % (out_img_h * out_img_w * num_channels) / (out_img_w * num_channels); out_img_idx = out_id_w % (out_img_w * num_channels) / num_channels; channel_id = tid % num_channels; } int in_img_idt = align_flag ? static_cast<int>(ratio_d * (out_img_idt + 0.5) - 0.5) : static_cast<int>(ratio_d * out_img_idt); in_img_idt = (in_img_idt > 0) ? in_img_idt : 0; int d_id = (in_img_idt < in_img_d - 1) ? 1 : 0; T src_d = ratio_d * (out_img_idt + 0.5) - 0.5; src_d = (src_d > 0) ? src_d : 0; T d1lambda = align_flag ? src_d - in_img_idt : ratio_d * out_img_idt - in_img_idt; T d2lambda = 1.f - d1lambda; int in_img_idy = align_flag ? static_cast<int>(ratio_h * (out_img_idy + 0.5) - 0.5) : static_cast<int>(ratio_h * out_img_idy); in_img_idy = (in_img_idy > 0) ? in_img_idy : 0; int h_id = (in_img_idy < in_img_h - 1) ? 1 : 0; T src_h = ratio_h * (out_img_idy + 0.5) - 0.5; src_h = (src_h > 0) ? src_h : 0; T h1lambda = align_flag ? src_h - in_img_idy : ratio_h * out_img_idy - in_img_idy; T h2lambda = 1.f - h1lambda; int in_img_idx = align_flag ? static_cast<int>(ratio_w * (out_img_idx + 0.5) - 0.5) : static_cast<int>(ratio_w * out_img_idx); in_img_idx = (in_img_idx > 0) ? in_img_idx : 0; int w_id = (in_img_idx < in_img_w - 1) ? 1 : 0; T src_w = ratio_w * (out_img_idx + 0.5) - 0.5; src_w = (src_w > 0) ? src_w : 0; T w1lambda = align_flag ? src_w - in_img_idx : ratio_w * out_img_idx - in_img_idx; T w2lambda = 1.f - w1lambda; if (data_layout == DataLayout::kNCHW) { int in_pos1_idx = out_id_h * input_w + channel_id * in_img_size + (in_img_idt * in_img_h + in_img_idy) * in_img_w + in_img_idx; const T* in_pos1 = &in[in_pos1_idx]; int in_pos2_idx = in_pos1_idx + d_id * in_img_h * in_img_w; const T* in_pos2 = &in[in_pos2_idx]; // trilinear interpolation out[out_id_h * output_w + out_id_w] = d2lambda * (h2lambda * (w2lambda * in_pos1[0] + w1lambda * in_pos1[w_id]) + h1lambda * (w2lambda * in_pos1[h_id * in_img_w] + w1lambda * in_pos1[h_id * in_img_w + w_id])) + d1lambda * (h2lambda * (w2lambda * in_pos2[0] + w1lambda * in_pos2[w_id]) + h1lambda * (w2lambda * in_pos2[h_id * in_img_w] + w1lambda * in_pos2[h_id * in_img_w + w_id])); } else { int in_pos1_idx = out_id_h * input_w + in_img_idt * in_img_h * in_img_w * num_channels + in_img_idy * in_img_w * num_channels + in_img_idx * num_channels + channel_id; const T* in_pos1 = &in[in_pos1_idx]; int in_pos2_idx = in_pos1_idx + d_id * in_img_h * in_img_w * num_channels; const T* in_pos2 = &in[in_pos2_idx]; // trilinear interpolation out[out_id_h * output_w + out_id_w] = d2lambda * (h2lambda * (w2lambda * in_pos1[0] + w1lambda * in_pos1[w_id * num_channels]) + h1lambda * (w2lambda * in_pos1[h_id * in_img_w * num_channels] + w1lambda * in_pos1[h_id * in_img_w * num_channels + w_id * num_channels])) + d1lambda * (h2lambda * (w2lambda * in_pos2[0] + w1lambda * in_pos2[w_id * num_channels]) + h1lambda * (w2lambda * in_pos2[h_id * in_img_w * num_channels] + w1lambda * in_pos2[h_id * in_img_w * num_channels + w_id * num_channels])); } } } template <typename T> __global__ void KeTrilinearInterpBw(T* in, const size_t in_img_d, const size_t in_img_h, const size_t in_img_w, const size_t input_h, const size_t input_w, const T* out, const size_t out_img_d, const size_t out_img_h, const size_t out_img_w, const size_t output_h, const size_t output_w, const size_t num_channels, const T ratio_d, const T ratio_h, const T ratio_w, const bool align_corners, const int align_mode, const DataLayout data_layout) { int nthreads = output_h * output_w; int tid = blockIdx.x * blockDim.x + threadIdx.x; int stride = blockDim.x * gridDim.x; bool align_flag = (align_mode == 0 && !align_corners); for (; tid < nthreads; tid += stride) { int out_id_h = tid / output_w; int out_id_w = tid % output_w; int in_img_size = input_w / num_channels; int out_img_size = output_w / num_channels; int channel_id, out_img_idt, out_img_idy, out_img_idx; if (data_layout == DataLayout::kNCHW) { channel_id = out_id_w / out_img_size; out_img_idt = (out_id_w % out_img_size) / out_img_h / out_img_w; out_img_idy = ((out_id_w % out_img_size) / out_img_w) % out_img_h; out_img_idx = tid % out_img_w; } else { out_img_idt = out_id_w / (out_img_h * out_img_w * num_channels); out_img_idy = out_id_w % (out_img_h * out_img_w * num_channels) / (out_img_w * num_channels); out_img_idx = out_id_w % (out_img_w * num_channels) / num_channels; channel_id = tid % num_channels; } int in_img_idt = align_flag ? static_cast<int>(ratio_d * (out_img_idt + 0.5) - 0.5) : static_cast<int>(ratio_d * out_img_idt); in_img_idt = (in_img_idt > 0) ? in_img_idt : 0; int d_id = (in_img_idt < in_img_d - 1) ? 1 : 0; T src_d = ratio_d * (out_img_idt + 0.5) - 0.5; src_d = (src_d > 0) ? src_d : 0; T d1lambda = align_flag ? src_d - in_img_idt : ratio_d * out_img_idt - in_img_idt; T d2lambda = 1.f - d1lambda; int in_img_idy = align_flag ? static_cast<int>(ratio_h * (out_img_idy + 0.5) - 0.5) : static_cast<int>(ratio_h * out_img_idy); in_img_idy = (in_img_idy > 0) ? in_img_idy : 0; int h_id = (in_img_idy < in_img_h - 1) ? 1 : 0; T src_h = ratio_h * (out_img_idy + 0.5) - 0.5; src_h = (src_h > 0) ? src_h : 0; T h1lambda = align_flag ? src_h - in_img_idy : ratio_h * out_img_idy - in_img_idy; T h2lambda = 1.f - h1lambda; int in_img_idx = align_flag ? static_cast<int>(ratio_w * (out_img_idx + 0.5) - 0.5) : static_cast<int>(ratio_w * out_img_idx); in_img_idx = (in_img_idx > 0) ? in_img_idx : 0; int w_id = (in_img_idx < in_img_w - 1) ? 1 : 0; T src_w = ratio_w * (out_img_idx + 0.5) - 0.5; src_w = (src_w > 0) ? src_w : 0; T w1lambda = align_flag ? src_w - in_img_idx : ratio_w * out_img_idx - in_img_idx; T w2lambda = 1.f - w1lambda; if (data_layout == DataLayout::kNCHW) { int in_pos1_idx = out_id_h * input_w + channel_id * in_img_size + (in_img_idt * in_img_h + in_img_idy) * in_img_w + in_img_idx; T* in_pos1 = &in[in_pos1_idx]; int in_pos2_idx = in_pos1_idx + d_id * in_img_h * in_img_w; T* in_pos2 = &in[in_pos2_idx]; const T* out_pos = &out[out_id_h * output_w + out_id_w]; // trilinear interpolation grad platform::CudaAtomicAdd(&in_pos1[0], d2lambda * h2lambda * w2lambda * out_pos[0]); platform::CudaAtomicAdd(&in_pos1[w_id], d2lambda * h2lambda * w1lambda * out_pos[0]); platform::CudaAtomicAdd(&in_pos1[h_id * in_img_w], d2lambda * h1lambda * w2lambda * out_pos[0]); platform::CudaAtomicAdd(&in_pos1[h_id * in_img_w + w_id], d2lambda * h1lambda * w1lambda * out_pos[0]); platform::CudaAtomicAdd(&in_pos2[0], d1lambda * h2lambda * w2lambda * out_pos[0]); platform::CudaAtomicAdd(&in_pos2[w_id], d1lambda * h2lambda * w1lambda * out_pos[0]); platform::CudaAtomicAdd(&in_pos2[h_id * in_img_w], d1lambda * h1lambda * w2lambda * out_pos[0]); platform::CudaAtomicAdd(&in_pos2[h_id * in_img_w + w_id], d1lambda * h1lambda * w1lambda * out_pos[0]); } else { int in_pos1_idx = out_id_h * input_w + in_img_idt * in_img_h * in_img_w * num_channels + in_img_idy * in_img_w * num_channels + in_img_idx * num_channels + channel_id; T* in_pos1 = &in[in_pos1_idx]; int in_pos2_idx = in_pos1_idx + d_id * in_img_h * in_img_w * num_channels; T* in_pos2 = &in[in_pos2_idx]; const T* out_pos = &out[out_id_h * output_w + out_id_w]; // trilinear interpolation grad platform::CudaAtomicAdd(&in_pos1[0], d2lambda * h2lambda * w2lambda * out_pos[0]); platform::CudaAtomicAdd(&in_pos1[w_id * num_channels], d2lambda * h2lambda * w1lambda * out_pos[0]); platform::CudaAtomicAdd(&in_pos1[h_id * in_img_w * num_channels], d2lambda * h1lambda * w2lambda * out_pos[0]); platform::CudaAtomicAdd( &in_pos1[h_id * in_img_w * num_channels + w_id * num_channels], d2lambda * h1lambda * w1lambda * out_pos[0]); platform::CudaAtomicAdd(&in_pos2[0], d1lambda * h2lambda * w2lambda * out_pos[0]); platform::CudaAtomicAdd(&in_pos2[w_id * num_channels], d1lambda * h2lambda * w1lambda * out_pos[0]); platform::CudaAtomicAdd(&in_pos2[h_id * in_img_w * num_channels], d1lambda * h1lambda * w2lambda * out_pos[0]); platform::CudaAtomicAdd( &in_pos2[h_id * in_img_w * num_channels + w_id * num_channels], d1lambda * h1lambda * w1lambda * out_pos[0]); } } } template <typename T> __device__ __forceinline__ static T Kecubic_interp( const T x0, const T x1, const T x2, const T x3, T t) { T coeffs[4]; T a = -0.75; T x_1 = t; T x_2 = 1.0 - t; coeffs[0] = cubic_convolution2<T>(x_1 + 1.0, a); coeffs[1] = cubic_convolution1<T>(x_1, a); coeffs[2] = cubic_convolution1<T>(x_2, a); coeffs[3] = cubic_convolution2<T>(x_2 + 1.0, a); return x0 * coeffs[0] + x1 * coeffs[1] + x2 * coeffs[2] + x3 * coeffs[3]; } template <typename T> __global__ void KeBicubicInterpFw(const T* in, const size_t in_img_h, const size_t in_img_w, const size_t input_h, const size_t input_w, T* out, const size_t out_img_h, const size_t out_img_w, const size_t output_h, const size_t output_w, const size_t num_channels, const float ratio_h, const float ratio_w, const bool align_corners, const DataLayout data_layout) { int nthreads = output_h * output_w; int tid = blockIdx.x * blockDim.x + threadIdx.x; int stride = blockDim.x * gridDim.x; for (; tid < nthreads; tid += stride) { int out_id_h = tid / output_w; int out_id_w = tid % output_w; int in_img_size = input_w / num_channels; int out_img_size = output_w / num_channels; int channel_id, out_img_idy, out_img_idx; if (data_layout == DataLayout::kNCHW) { channel_id = out_id_w / out_img_size; out_img_idy = (out_id_w % out_img_size) / out_img_w; out_img_idx = tid % out_img_w; } else { out_img_idy = out_id_w / (out_img_w * num_channels); out_img_idx = out_id_w % (out_img_w * num_channels) / num_channels; channel_id = tid % num_channels; } T in_img_idy = align_corners ? static_cast<T>(ratio_h * out_img_idy) : static_cast<T>(ratio_h * (out_img_idy + 0.5) - 0.5); int input_y = floorf(in_img_idy); const T y_t = in_img_idy - input_y; T in_img_idx = align_corners ? static_cast<T>(ratio_w * out_img_idx) : static_cast<T>(ratio_w * (out_img_idx + 0.5) - 0.5); int input_x = floorf(in_img_idx); const T x_t = in_img_idx - input_x; T coefficients[4]; const T* in_pos_0; const T* in_pos_1; const T* in_pos_2; const T* in_pos_3; int access_x_0; if (data_layout == DataLayout::kNCHW) { for (int k = 0; k < 4; k++) { int access_y = max(min(input_y - 1 + k, static_cast<int>(in_img_h - 1)), 0); access_x_0 = max(min(input_x - 1, static_cast<int>(in_img_w - 1)), 0); int access_x_1 = max(min(input_x + 0, static_cast<int>(in_img_w - 1)), 0); int access_x_2 = max(min(input_x + 1, static_cast<int>(in_img_w - 1)), 0); int access_x_3 = max(min(input_x + 2, static_cast<int>(in_img_w - 1)), 0); in_pos_0 = &in[out_id_h * input_w + channel_id * in_img_size + access_y * in_img_w + access_x_0]; in_pos_1 = &in[out_id_h * input_w + channel_id * in_img_size + access_y * in_img_w + access_x_1]; in_pos_2 = &in[out_id_h * input_w + channel_id * in_img_size + access_y * in_img_w + access_x_2]; in_pos_3 = &in[out_id_h * input_w + channel_id * in_img_size + access_y * in_img_w + access_x_3]; coefficients[k] = Kecubic_interp<T>( in_pos_0[0], in_pos_1[0], in_pos_2[0], in_pos_3[0], x_t); } out[out_id_h * output_w + out_id_w] = Kecubic_interp<T>(coefficients[0], coefficients[1], coefficients[2], coefficients[3], y_t); } else { for (int k = 0; k < 4; k++) { int access_y = max(min(input_y - 1 + k, static_cast<int>((in_img_h - 1))), 0); int access_x_0 = max(min(input_x - 1, static_cast<int>((in_img_w - 1))), 0); int access_x_1 = max(min(input_x + 0, static_cast<int>((in_img_w - 1))), 0); int access_x_2 = max(min(input_x + 1, static_cast<int>((in_img_w - 1))), 0); int access_x_3 = max(min(input_x + 2, static_cast<int>((in_img_w - 1))), 0); const T* in_pos_0 = &in[out_id_h * input_w + access_y * in_img_w * num_channels + access_x_0 * num_channels + channel_id]; const T* in_pos_1 = &in[out_id_h * input_w + access_y * in_img_w * num_channels + access_x_1 * num_channels + channel_id]; const T* in_pos_2 = &in[out_id_h * input_w + access_y * in_img_w * num_channels + access_x_2 * num_channels + channel_id]; const T* in_pos_3 = &in[out_id_h * input_w + access_y * in_img_w * num_channels + access_x_3 * num_channels + channel_id]; coefficients[k] = Kecubic_interp( in_pos_0[0], in_pos_1[0], in_pos_2[0], in_pos_3[0], x_t); } out[out_id_h * output_w + out_id_w] = static_cast<T>(Kecubic_interp(coefficients[0], coefficients[1], coefficients[2], coefficients[3], y_t)); } } } template <typename T> __global__ void KeBicubicInterpBw(T* in, const size_t in_img_h, const size_t in_img_w, const size_t input_h, const size_t input_w, const T* out, const size_t out_img_h, const size_t out_img_w, const size_t output_h, const size_t output_w, const size_t num_channels, const float ratio_h, const float ratio_w, const bool align_corners, const DataLayout data_layout) { int nthreads = output_h * output_w; int tid = blockIdx.x * blockDim.x + threadIdx.x; int stride = blockDim.x * gridDim.x; for (; tid < nthreads; tid += stride) { int out_id_h = tid / output_w; int out_id_w = tid % output_w; int in_img_size = input_w / num_channels; int out_img_size = output_w / num_channels; int channel_id, out_img_idy, out_img_idx; if (data_layout == DataLayout::kNCHW) { channel_id = out_id_w / out_img_size; out_img_idy = (out_id_w % out_img_size) / out_img_w; out_img_idx = tid % out_img_w; } else { out_img_idy = out_id_w / (out_img_w * num_channels); out_img_idx = out_id_w % (out_img_w * num_channels) / num_channels; channel_id = tid % num_channels; } T in_img_idy = align_corners ? static_cast<T>(ratio_h * out_img_idy) : static_cast<T>(ratio_h * (out_img_idy + 0.5) - 0.5); int input_y = floorf(in_img_idy); const T y_t = in_img_idy - input_y; T in_img_idx = align_corners ? static_cast<T>(ratio_w * out_img_idx) : static_cast<T>(ratio_w * (out_img_idx + 0.5) - 0.5); int input_x = floorf(in_img_idx); const T x_t = in_img_idx - input_x; T x_coeffs[4]; T y_coeffs[4]; get_cubic_upsample_coefficients(x_coeffs, x_t); get_cubic_upsample_coefficients(y_coeffs, y_t); const T* out_pos = &out[out_id_h * output_w + out_id_w]; T* in_pos; for (int i = 0; i < 4; i++) { for (int j = 0; j < 4; j++) { int access_y = max(min(static_cast<int>(input_y - 1 + j), static_cast<int>(in_img_h - 1)), 0); int access_x = max(min(static_cast<int>(input_x - 1 + i), static_cast<int>(in_img_w - 1)), 0); if (data_layout == DataLayout::kNCHW) { in_pos = &in[out_id_h * input_w + channel_id * in_img_size + access_y * in_img_w + access_x]; } else { in_pos = &in[out_id_h * input_w + access_y * in_img_w * num_channels + access_x * num_channels + channel_id]; } platform::CudaAtomicAdd(&in_pos[0], (out_pos[0] * y_coeffs[j] * x_coeffs[i])); } } } } template <typename T> static void Interpolate1DCUDAFwd(const framework::ExecutionContext& ctx, const Tensor& input, Tensor* output) { auto* input_data = input.data<T>(); const std::string data_layout_str = ctx.Attr<std::string>("data_layout"); const DataLayout data_layout = framework::StringToDataLayout(data_layout_str); int n, c, in_d, in_h, in_w; ExtractNCDWH(input.dims(), data_layout, &n, &c, &in_d, &in_h, &in_w); auto interp_method = ctx.Attr<std::string>("interp_method"); bool align_corners = ctx.Attr<bool>("align_corners"); int align_mode = ctx.Attr<int>("align_mode"); int out_w = ctx.Attr<int>("out_w"); auto list_new_shape_tensor = ctx.MultiInput<framework::Tensor>("SizeTensor"); if (list_new_shape_tensor.size() > 0) { // have size tensor auto new_size = get_new_shape(list_new_shape_tensor); out_w = new_size[0]; } else { float scale; auto scale_tensor = ctx.Input<Tensor>("Scale"); if (scale_tensor != nullptr) { auto scale_data = get_new_data_from_tensor<float>(scale_tensor); scale = scale_data[0]; } else { scale = ctx.Attr<float>("scale"); } if (scale > 0) { out_w = static_cast<int>(in_w * scale); } auto out_size = ctx.Input<Tensor>("OutSize"); if (out_size != nullptr) { Tensor sizes; framework::TensorCopySync(*out_size, platform::CPUPlace(), &sizes); auto size_data = sizes.data<int>(); out_w = size_data[0]; } } PADDLE_ENFORCE_GT(out_w, 0, platform::errors::InvalidArgument( "out_w in Attr(out_shape) of Op(interpolate) " "should be greater than 0.")); framework::DDim dim_out; if (data_layout == DataLayout::kNCHW) { dim_out = {n, c, out_w}; } else { dim_out = {n, out_w, c}; } auto output_data = output->mutable_data<T>(dim_out, ctx.GetPlace()); if (in_w == out_w) { framework::TensorCopy(input, ctx.GetPlace(), output); return; } float ratio_w = 0.f; if (out_w > 1) { ratio_w = (align_corners) ? static_cast<float>(in_w - 1.0) / (out_w - 1.0) : static_cast<float>(in_w) / out_w; } int in_cw = c * in_w; int out_cw = c * out_w; int pixelNum = n * out_cw; platform::GpuLaunchConfig config = platform::GetGpuLaunchConfig1D(ctx.cuda_device_context(), pixelNum); if ("linear" == interp_method) { KeLinearInterpFw<T><<<config.block_per_grid, config.thread_per_block, 0, ctx.cuda_device_context().stream()>>>(input_data, in_w, in_cw, output_data, out_w, n, out_cw, c, ratio_w, align_corners, align_mode, data_layout); } } template <typename T> static void Interpolate2DCUDAFwd(const framework::ExecutionContext& ctx, const Tensor& input, Tensor* output) { auto* input_data = input.data<T>(); const std::string data_layout_str = ctx.Attr<std::string>("data_layout"); const DataLayout data_layout = framework::StringToDataLayout(data_layout_str); int n, c, in_d, in_h, in_w; ExtractNCDWH(input.dims(), data_layout, &n, &c, &in_d, &in_h, &in_w); auto interp_method = ctx.Attr<std::string>("interp_method"); bool align_corners = ctx.Attr<bool>("align_corners"); int align_mode = ctx.Attr<int>("align_mode"); int out_h = ctx.Attr<int>("out_h"); int out_w = ctx.Attr<int>("out_w"); auto list_new_shape_tensor = ctx.MultiInput<framework::Tensor>("SizeTensor"); if (list_new_shape_tensor.size() > 0) { // have size tensor auto new_size = get_new_shape(list_new_shape_tensor); out_h = new_size[0]; out_w = new_size[1]; } else { float scale; auto scale_tensor = ctx.Input<Tensor>("Scale"); if (scale_tensor != nullptr) { auto scale_data = get_new_data_from_tensor<float>(scale_tensor); scale = scale_data[0]; } else { scale = ctx.Attr<float>("scale"); } if (scale > 0) { out_h = static_cast<int>(in_h * scale); out_w = static_cast<int>(in_w * scale); } auto out_size = ctx.Input<Tensor>("OutSize"); if (out_size != nullptr) { Tensor sizes; framework::TensorCopySync(*out_size, platform::CPUPlace(), &sizes); auto size_data = sizes.data<int>(); out_h = size_data[0]; out_w = size_data[1]; } } PADDLE_ENFORCE_GT(out_h, 0, platform::errors::InvalidArgument( "out_h in Attr(out_shape) of Op(interpolate) " "should be greater than 0.")); PADDLE_ENFORCE_GT(out_w, 0, platform::errors::InvalidArgument( "out_w in Attr(out_shape) of Op(interpolate) " "should be greater than 0.")); framework::DDim dim_out; if (data_layout == DataLayout::kNCHW) { dim_out = {n, c, out_h, out_w}; } else { dim_out = {n, out_h, out_w, c}; } auto output_data = output->mutable_data<T>(dim_out, ctx.GetPlace()); if (in_h == out_h && in_w == out_w) { framework::TensorCopy(input, ctx.GetPlace(), output); return; } float ratio_h = 0.f; float ratio_w = 0.f; if (out_h > 1) { ratio_h = (align_corners) ? static_cast<float>(in_h - 1) / (out_h - 1) : static_cast<float>(in_h) / out_h; } if (out_w > 1) { ratio_w = (align_corners) ? static_cast<float>(in_w - 1) / (out_w - 1) : static_cast<float>(in_w) / out_w; } int in_hw = in_h * in_w; int out_hw = out_h * out_w; int in_chw = c * in_hw; int out_chw = c * out_hw; int pixelNum = n * out_chw; platform::GpuLaunchConfig config = platform::GetGpuLaunchConfig1D(ctx.cuda_device_context(), pixelNum); if ("nearest" == interp_method) { KeNearestNeighborInterpFw<T> <<<config.block_per_grid, config.thread_per_block, 0, ctx.cuda_device_context().stream()>>>(input_data, in_h, in_w, n, in_chw, output_data, out_h, out_w, n, out_chw, c, ratio_h, ratio_w, align_corners, data_layout); } else if ("bilinear" == interp_method) { KeBilinearInterpFw<T><<<config.block_per_grid, config.thread_per_block, 0, ctx.cuda_device_context().stream()>>>(input_data, in_h, in_w, n, in_chw, output_data, out_h, out_w, n, out_chw, c, ratio_h, ratio_w, align_corners, align_mode, data_layout); } else if ("bicubic" == interp_method) { KeBicubicInterpFw<T> <<<config.block_per_grid, 512, 0, ctx.cuda_device_context().stream()>>>( input_data, in_h, in_w, n, in_chw, output_data, out_h, out_w, n, out_chw, c, ratio_h, ratio_w, align_corners, data_layout); } } template <typename T> static void Interpolate3DCUDAFwd(const framework::ExecutionContext& ctx, const Tensor& input, Tensor* output) { auto* input_data = input.data<T>(); const std::string data_layout_str = ctx.Attr<std::string>("data_layout"); const DataLayout data_layout = framework::StringToDataLayout(data_layout_str); int n, c, in_d, in_h, in_w; ExtractNCDWH(input.dims(), data_layout, &n, &c, &in_d, &in_h, &in_w); auto interp_method = ctx.Attr<std::string>("interp_method"); bool align_corners = ctx.Attr<bool>("align_corners"); int align_mode = ctx.Attr<int>("align_mode"); int out_d = ctx.Attr<int>("out_d"); int out_h = ctx.Attr<int>("out_h"); int out_w = ctx.Attr<int>("out_w"); auto list_new_shape_tensor = ctx.MultiInput<framework::Tensor>("SizeTensor"); if (list_new_shape_tensor.size() > 0) { // have size tensor auto new_size = get_new_shape(list_new_shape_tensor); out_d = new_size[0]; out_h = new_size[1]; out_w = new_size[2]; } else { float scale; auto scale_tensor = ctx.Input<Tensor>("Scale"); if (scale_tensor != nullptr) { auto scale_data = get_new_data_from_tensor<float>(scale_tensor); scale = scale_data[0]; } else { scale = ctx.Attr<float>("scale"); } if (scale > 0) { out_d = static_cast<int>(in_d * scale); out_h = static_cast<int>(in_h * scale); out_w = static_cast<int>(in_w * scale); } auto out_size = ctx.Input<Tensor>("OutSize"); if (out_size != nullptr) { Tensor sizes; framework::TensorCopySync(*out_size, platform::CPUPlace(), &sizes); auto size_data = sizes.data<int>(); out_d = size_data[0]; out_h = size_data[1]; out_w = size_data[2]; } } PADDLE_ENFORCE_GT(out_d, 0, platform::errors::InvalidArgument( "out_d in Attr(out_shape) of Op(interpolate) " "should be greater than 0.")); PADDLE_ENFORCE_GT(out_h, 0, platform::errors::InvalidArgument( "out_h in Attr(out_shape) of Op(interpolate) " "should be greater than 0.")); PADDLE_ENFORCE_GT(out_w, 0, platform::errors::InvalidArgument( "out_w in Attr(out_shape) of Op(interpolate) " "should be greater than 0.")); framework::DDim dim_out; if (data_layout == DataLayout::kNCHW) { dim_out = {n, c, out_d, out_h, out_w}; } else { dim_out = {n, out_d, out_h, out_w, c}; } auto output_data = output->mutable_data<T>(dim_out, ctx.GetPlace()); if (in_d == out_d && in_h == out_h && in_w == out_w) { framework::TensorCopy(input, ctx.GetPlace(), output); return; } float ratio_d = 0.f; float ratio_h = 0.f; float ratio_w = 0.f; if (out_d > 1) { ratio_d = (align_corners) ? static_cast<float>(in_d - 1) / (out_d - 1) : static_cast<float>(in_d) / out_d; } if (out_h > 1) { ratio_h = (align_corners) ? static_cast<float>(in_h - 1) / (out_h - 1) : static_cast<float>(in_h) / out_h; } if (out_w > 1) { ratio_w = (align_corners) ? static_cast<float>(in_w - 1) / (out_w - 1) : static_cast<float>(in_w) / out_w; } int in_dhw = in_d * in_h * in_w; int out_dhw = out_d * out_h * out_w; int in_cdhw = c * in_dhw; int out_cdhw = c * out_dhw; int pixelNum = n * out_cdhw; platform::GpuLaunchConfig config = platform::GetGpuLaunchConfig1D(ctx.cuda_device_context(), pixelNum); if ("trilinear" == interp_method) { KeTrilinearInterpFw<T> <<<config.block_per_grid, config.thread_per_block, 0, ctx.cuda_device_context().stream()>>>(input_data, in_d, in_h, in_w, n, in_cdhw, output_data, out_d, out_h, out_w, n, out_cdhw, c, ratio_d, ratio_h, ratio_w, align_corners, align_mode, data_layout); } } template <typename T> static void Interpolate1DCUDABwd(const framework::ExecutionContext& ctx, Tensor* input_grad, const Tensor output_grad) { auto* input = ctx.Input<Tensor>("X"); const std::string data_layout_str = ctx.Attr<std::string>("data_layout"); const DataLayout data_layout = framework::StringToDataLayout(data_layout_str); int n, c, in_d, in_h, in_w; ExtractNCDWH(input->dims(), data_layout, &n, &c, &in_d, &in_h, &in_w); auto interp_method = ctx.Attr<std::string>("interp_method"); bool align_corners = ctx.Attr<bool>("align_corners"); int align_mode = ctx.Attr<int>("align_mode"); int out_w = ctx.Attr<int>("out_w"); float scale; auto scale_tensor = ctx.Input<Tensor>("Scale"); if (scale_tensor != nullptr) { auto scale_data = get_new_data_from_tensor<float>(scale_tensor); scale = scale_data[0]; } else { scale = ctx.Attr<float>("scale"); } if (scale > 0) { out_w = static_cast<int>(in_w * scale); } auto out_size = ctx.Input<Tensor>("OutSize"); if (out_size != nullptr) { Tensor sizes; framework::TensorCopySync(*out_size, platform::CPUPlace(), &sizes); auto size_data = sizes.data<int>(); out_w = size_data[0]; } auto list_new_size_tensor = ctx.MultiInput<framework::Tensor>("SizeTensor"); if (list_new_size_tensor.size() > 0) { // have size tensor auto new_size = get_new_shape(list_new_size_tensor); out_w = new_size[0]; } auto* output_grad_data = output_grad.data<T>(); framework::DDim dim_grad; if (data_layout == DataLayout::kNCHW) { dim_grad = {n, c, in_w}; } else { dim_grad = {n, in_w, c}; } input_grad->mutable_data<T>(dim_grad, ctx.GetPlace()); auto* input_grad_data = input_grad->mutable_data<T>(dim_grad, ctx.GetPlace()); auto& device_ctx = ctx.template device_context<phi::GPUContext>(); phi::funcs::SetConstant<phi::GPUContext, T> zero; zero(device_ctx, input_grad, static_cast<T>(0.0)); if (in_w == out_w) { framework::TensorCopy(output_grad, ctx.GetPlace(), input_grad); return; } float ratio_w = 0.f; if (out_w > 1) { ratio_w = (align_corners) ? static_cast<float>(in_w - 1) / (out_w - 1) : static_cast<float>(in_w) / out_w; } int in_cw = c * in_w; int out_cw = c * out_w; int pixelNum = n * out_cw; platform::GpuLaunchConfig config = platform::GetGpuLaunchConfig1D(ctx.cuda_device_context(), pixelNum); if ("linear" == interp_method) { KeLinearInterpBw<T> <<<config.block_per_grid, config.thread_per_block, 0, ctx.cuda_device_context().stream()>>>(input_grad_data, in_w, in_cw, output_grad_data, out_w, n, out_cw, c, ratio_w, align_corners, align_mode, data_layout); } } template <typename T> static void Interpolate2DCUDABwd(const framework::ExecutionContext& ctx, Tensor* input_grad, const Tensor output_grad) { auto* input = ctx.Input<Tensor>("X"); const std::string data_layout_str = ctx.Attr<std::string>("data_layout"); const DataLayout data_layout = framework::StringToDataLayout(data_layout_str); int n, c, in_d, in_h, in_w; ExtractNCDWH(input->dims(), data_layout, &n, &c, &in_d, &in_h, &in_w); auto interp_method = ctx.Attr<std::string>("interp_method"); bool align_corners = ctx.Attr<bool>("align_corners"); int align_mode = ctx.Attr<int>("align_mode"); int out_h = ctx.Attr<int>("out_h"); int out_w = ctx.Attr<int>("out_w"); float scale; auto scale_tensor = ctx.Input<Tensor>("Scale"); if (scale_tensor != nullptr) { auto scale_data = get_new_data_from_tensor<float>(scale_tensor); scale = scale_data[0]; } else { scale = ctx.Attr<float>("scale"); } if (scale > 0) { out_h = static_cast<int>(in_h * scale); out_w = static_cast<int>(in_w * scale); } auto out_size = ctx.Input<Tensor>("OutSize"); if (out_size != nullptr) { Tensor sizes; framework::TensorCopySync(*out_size, platform::CPUPlace(), &sizes); auto size_data = sizes.data<int>(); out_h = size_data[0]; out_w = size_data[1]; } auto list_new_size_tensor = ctx.MultiInput<framework::Tensor>("SizeTensor"); if (list_new_size_tensor.size() > 0) { // have size tensor auto new_size = get_new_shape(list_new_size_tensor); out_h = new_size[0]; out_w = new_size[1]; } auto* output_grad_data = output_grad.data<T>(); framework::DDim dim_grad; if (data_layout == DataLayout::kNCHW) { dim_grad = {n, c, in_h, in_w}; } else { dim_grad = {n, in_h, in_w, c}; } input_grad->mutable_data<T>(dim_grad, ctx.GetPlace()); auto* input_grad_data = input_grad->mutable_data<T>(dim_grad, ctx.GetPlace()); auto& device_ctx = ctx.template device_context<phi::GPUContext>(); phi::funcs::SetConstant<phi::GPUContext, T> zero; zero(device_ctx, input_grad, static_cast<T>(0.0)); if (in_h == out_h && in_w == out_w) { framework::TensorCopy(output_grad, ctx.GetPlace(), input_grad); return; } float ratio_h = 0.f; float ratio_w = 0.f; if (out_h > 1) { ratio_h = (align_corners) ? static_cast<float>(in_h - 1) / (out_h - 1) : static_cast<float>(in_h) / out_h; } if (out_w > 1) { ratio_w = (align_corners) ? static_cast<float>(in_w - 1) / (out_w - 1) : static_cast<float>(in_w) / out_w; } int in_hw = in_h * in_w; int out_hw = out_h * out_w; int in_chw = c * in_hw; int out_chw = c * out_hw; int pixelNum = n * out_chw; platform::GpuLaunchConfig config = platform::GetGpuLaunchConfig1D(ctx.cuda_device_context(), pixelNum); if ("nearest" == interp_method) { KeNearestNeighborInterpBw<T> <<<config.block_per_grid, config.thread_per_block, 0, ctx.cuda_device_context().stream()>>>(input_grad_data, in_h, in_w, n, in_chw, output_grad_data, out_h, out_w, n, out_chw, c, ratio_h, ratio_w, align_corners, data_layout); } else if ("bilinear" == interp_method) { KeBilinearInterpBw<T> <<<config.block_per_grid, config.thread_per_block, 0, ctx.cuda_device_context().stream()>>>(input_grad_data, in_h, in_w, n, in_chw, output_grad_data, out_h, out_w, n, out_chw, c, ratio_h, ratio_w, align_corners, align_mode, data_layout); } else if ("bicubic" == interp_method) { KeBicubicInterpBw<T> <<<config.block_per_grid, 512, 0, ctx.cuda_device_context().stream()>>>( input_grad_data, in_h, in_w, n, in_chw, output_grad_data, out_h, out_w, n, out_chw, c, ratio_h, ratio_w, align_corners, data_layout); } } template <typename T> static void Interpolate3DCUDABwd(const framework::ExecutionContext& ctx, Tensor* input_grad, const Tensor& output_grad) { auto* input = ctx.Input<Tensor>("X"); const std::string data_layout_str = ctx.Attr<std::string>("data_layout"); const DataLayout data_layout = framework::StringToDataLayout(data_layout_str); int n, c, in_d, in_h, in_w; ExtractNCDWH(input->dims(), data_layout, &n, &c, &in_d, &in_h, &in_w); auto interp_method = ctx.Attr<std::string>("interp_method"); bool align_corners = ctx.Attr<bool>("align_corners"); int align_mode = ctx.Attr<int>("align_mode"); int out_d = ctx.Attr<int>("out_d"); int out_h = ctx.Attr<int>("out_h"); int out_w = ctx.Attr<int>("out_w"); float scale; auto scale_tensor = ctx.Input<Tensor>("Scale"); if (scale_tensor != nullptr) { auto scale_data = get_new_data_from_tensor<float>(scale_tensor); scale = scale_data[0]; } else { scale = ctx.Attr<float>("scale"); } if (scale > 0) { out_d = static_cast<int>(in_d * scale); out_h = static_cast<int>(in_h * scale); out_w = static_cast<int>(in_w * scale); } auto out_size = ctx.Input<Tensor>("OutSize"); if (out_size != nullptr) { Tensor sizes; framework::TensorCopySync(*out_size, platform::CPUPlace(), &sizes); auto size_data = sizes.data<int>(); out_d = size_data[0]; out_h = size_data[1]; out_w = size_data[2]; } auto list_new_size_tensor = ctx.MultiInput<framework::Tensor>("SizeTensor"); if (list_new_size_tensor.size() > 0) { // have size tensor auto new_size = get_new_shape(list_new_size_tensor); out_d = new_size[0]; out_h = new_size[1]; out_w = new_size[2]; } auto* output_grad_data = output_grad.data<T>(); framework::DDim dim_grad; if (data_layout == DataLayout::kNCHW) { dim_grad = {n, c, in_d, in_h, in_w}; } else { dim_grad = {n, in_d, in_h, in_w, c}; } auto* input_grad_data = input_grad->mutable_data<T>(dim_grad, ctx.GetPlace()); auto& device_ctx = ctx.template device_context<phi::GPUContext>(); phi::funcs::SetConstant<phi::GPUContext, T> zero; zero(device_ctx, input_grad, static_cast<T>(0.0)); if (in_d == out_d && in_h == out_h && in_w == out_w) { framework::TensorCopy(output_grad, ctx.GetPlace(), input_grad); return; } float ratio_d = 0.f; float ratio_h = 0.f; float ratio_w = 0.f; if (out_d > 1) { ratio_d = (align_corners) ? static_cast<float>(in_d - 1) / (out_d - 1) : static_cast<float>(in_d) / out_d; } if (out_h > 1) { ratio_h = (align_corners) ? static_cast<float>(in_h - 1) / (out_h - 1) : static_cast<float>(in_h) / out_h; } if (out_w > 1) { ratio_w = (align_corners) ? static_cast<float>(in_w - 1) / (out_w - 1) : static_cast<float>(in_w) / out_w; } int in_dhw = in_d * in_h * in_w; int out_dhw = out_d * out_h * out_w; int in_cdhw = c * in_dhw; int out_cdhw = c * out_dhw; int pixelNum = n * out_cdhw; platform::GpuLaunchConfig config = platform::GetGpuLaunchConfig1D(ctx.cuda_device_context(), pixelNum); if ("trilinear" == interp_method) { KeTrilinearInterpBw<T> <<<config.block_per_grid, config.thread_per_block, 0, ctx.cuda_device_context().stream()>>>(input_grad_data, in_d, in_h, in_w, n, in_cdhw, output_grad_data, out_d, out_h, out_w, n, out_cdhw, c, ratio_d, ratio_h, ratio_w, align_corners, align_mode, data_layout); } } template <typename T> class InterpolateOpCUDAKernel : public framework::OpKernel<T> { public: void Compute(const framework::ExecutionContext& ctx) const override { PADDLE_ENFORCE_EQ( platform::is_gpu_place(ctx.GetPlace()), true, platform::errors::NotFound("This kernel only runs on GPU device.")); auto* input = ctx.Input<Tensor>("X"); auto* output = ctx.Output<Tensor>("Out"); auto input_dims = input->dims(); if (input_dims.size() == 3) { // 1D interpolation Interpolate1DCUDAFwd<T>(ctx, *input, output); } else if (input_dims.size() == 4) { // 2D interpolation Interpolate2DCUDAFwd<T>(ctx, *input, output); } else if (input_dims.size() == 5) { // 3D interpolation Interpolate3DCUDAFwd<T>(ctx, *input, output); } } }; template <typename T> class InterpolateGradOpCUDAKernel : public framework::OpKernel<T> { public: void Compute(const framework::ExecutionContext& ctx) const override { PADDLE_ENFORCE_EQ( platform::is_gpu_place(ctx.GetPlace()), true, platform::errors::NotFound("This kernel only runs on GPU device.")); auto* input_grad = ctx.Output<Tensor>(framework::GradVarName("X")); auto* output_grad = ctx.Input<Tensor>(framework::GradVarName("Out")); auto output_grad_dims = output_grad->dims(); if (output_grad_dims.size() == 3) { // 1D interpolation Interpolate1DCUDABwd<T>(ctx, input_grad, *output_grad); } else if (output_grad_dims.size() == 4) { // 2D interpolation Interpolate2DCUDABwd<T>(ctx, input_grad, *output_grad); } else if (output_grad_dims.size() == 5) { // 3D interpolation Interpolate3DCUDABwd<T>(ctx, input_grad, *output_grad); } } }; } // namespace operators } // namespace paddle namespace ops = paddle::operators; REGISTER_OP_CUDA_KERNEL(bilinear_interp, ops::InterpolateOpCUDAKernel<float>, ops::InterpolateOpCUDAKernel<double>, ops::InterpolateOpCUDAKernel<int>); REGISTER_OP_CUDA_KERNEL(bilinear_interp_grad, ops::InterpolateGradOpCUDAKernel<float>, ops::InterpolateGradOpCUDAKernel<double>); REGISTER_OP_CUDA_KERNEL(nearest_interp, ops::InterpolateOpCUDAKernel<float>, ops::InterpolateOpCUDAKernel<double>, ops::InterpolateOpCUDAKernel<int>); REGISTER_OP_CUDA_KERNEL(nearest_interp_grad, ops::InterpolateGradOpCUDAKernel<float>, ops::InterpolateGradOpCUDAKernel<double>); REGISTER_OP_CUDA_KERNEL(trilinear_interp, ops::InterpolateOpCUDAKernel<float>, ops::InterpolateOpCUDAKernel<double>, ops::InterpolateOpCUDAKernel<int>); REGISTER_OP_CUDA_KERNEL(trilinear_interp_grad, ops::InterpolateGradOpCUDAKernel<float>, ops::InterpolateGradOpCUDAKernel<double>); REGISTER_OP_CUDA_KERNEL(linear_interp, ops::InterpolateOpCUDAKernel<float>, ops::InterpolateOpCUDAKernel<double>, ops::InterpolateOpCUDAKernel<int>); REGISTER_OP_CUDA_KERNEL(linear_interp_grad, ops::InterpolateGradOpCUDAKernel<float>, ops::InterpolateGradOpCUDAKernel<double>); REGISTER_OP_CUDA_KERNEL(bicubic_interp, ops::InterpolateOpCUDAKernel<float>, ops::InterpolateOpCUDAKernel<double>, ops::InterpolateOpCUDAKernel<int>); REGISTER_OP_CUDA_KERNEL(bicubic_interp_grad, ops::InterpolateGradOpCUDAKernel<float>, ops::InterpolateGradOpCUDAKernel<double>);
56ad20ef98b0b8504c46ce76e453a04f06f7ac59.hip
// !!! This is a file automatically generated by hipify!!! /* * Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. */ #include "nv_nms.h" #include "nv_utils.h" #include <algorithm> #include <iostream> #include <stdexcept> #include <cstdint> #include <vector> #include <cmath> #include <hip/hip_runtime.h> #include <thrust/device_ptr.h> #include <thrust/sequence.h> #include <thrust/execution_policy.h> #include <thrust/gather.h> #include <thrust/system/hip/detail/hipcub/hipcub.hpp> #include <thrust/system/hip/detail/cub/iterator/counting_input_iterator.cuh> namespace retinanet { namespace cuda { __global__ void nms_kernel( const int num_per_thread, const float threshold, const int num_detections, const int *indices, float *scores, const float *classes, const float4 *boxes) { // Go through detections by descending score for (int m = 0; m < num_detections; m++) { for (int n = 0; n < num_per_thread; n++) { int i = threadIdx.x * num_per_thread + n; if (i < num_detections && m < i && scores[m] > 0.0f) { int idx = indices[i]; int max_idx = indices[m]; int icls = classes[idx]; int mcls = classes[max_idx]; if (mcls == icls) { float4 ibox = boxes[idx]; float4 mbox = boxes[max_idx]; float x1 = max(ibox.x, mbox.x); float y1 = max(ibox.y, mbox.y); float x2 = min(ibox.z, mbox.z); float y2 = min(ibox.w, mbox.w); float w = max(0.0f, x2 - x1 + 1); float h = max(0.0f, y2 - y1 + 1); float iarea = (ibox.z - ibox.x + 1) * (ibox.w - ibox.y + 1); float marea = (mbox.z - mbox.x + 1) * (mbox.w - mbox.y + 1); float inter = w * h; float overlap = inter / (iarea + marea - inter); if (overlap > threshold) { scores[i] = 0.0f; } } } } // Sync discarded detections __syncthreads(); } } int nms(int batch_size, const void *const *inputs, void **outputs, size_t count, int detections_per_im, float nms_thresh, void *workspace, size_t workspace_size, hipStream_t stream) { if (!workspace || !workspace_size) { // Return required scratch space size cub style workspace_size = get_size_aligned<bool>(count); // flags workspace_size += get_size_aligned<int>(count); // indices workspace_size += get_size_aligned<int>(count); // indices_sorted workspace_size += get_size_aligned<float>(count); // scores workspace_size += get_size_aligned<float>(count); // scores_sorted size_t temp_size_flag = 0; thrust::cuda_cub::hipcub::DeviceSelect::Flagged((void *)nullptr, temp_size_flag, thrust::cuda_cub::hipcub::CountingInputIterator<int>(count), (bool *)nullptr, (int *)nullptr, (int *)nullptr, count); size_t temp_size_sort = 0; thrust::cuda_cub::hipcub::DeviceRadixSort::SortPairsDescending((void *)nullptr, temp_size_sort, (float *)nullptr, (float *)nullptr, (int *)nullptr, (int *)nullptr, count); workspace_size += ::max(temp_size_flag, temp_size_sort); return workspace_size; } auto on_stream = thrust::hip::par.on(stream); auto flags = get_next_ptr<bool>(count, workspace, workspace_size); auto indices = get_next_ptr<int>(count, workspace, workspace_size); auto indices_sorted = get_next_ptr<int>(count, workspace, workspace_size); auto scores = get_next_ptr<float>(count, workspace, workspace_size); auto scores_sorted = get_next_ptr<float>(count, workspace, workspace_size); for (int batch = 0; batch < batch_size; batch++) { auto in_scores = static_cast<const float *>(inputs[0]) + batch * count; auto in_boxes = static_cast<const float4 *>(inputs[1]) + batch * count; auto in_classes = static_cast<const float *>(inputs[2]) + batch * count; auto out_scores = static_cast<float *>(outputs[0]) + batch * detections_per_im; auto out_boxes = static_cast<float4 *>(outputs[1]) + batch * detections_per_im; auto out_classes = static_cast<float *>(outputs[2]) + batch * detections_per_im; // Discard null scores thrust::transform(on_stream, in_scores, in_scores + count, flags, thrust::placeholders::_1 > 0.0f); int *num_selected = reinterpret_cast<int *>(indices_sorted); thrust::cuda_cub::hipcub::DeviceSelect::Flagged(workspace, workspace_size, thrust::cuda_cub::hipcub::CountingInputIterator<int>(0), flags, indices, num_selected, count, stream); hipStreamSynchronize(stream); int num_detections = *thrust::device_pointer_cast(num_selected); // Sort scores and corresponding indices thrust::gather(on_stream, indices, indices + num_detections, in_scores, scores); thrust::cuda_cub::hipcub::DeviceRadixSort::SortPairsDescending(workspace, workspace_size, scores, scores_sorted, indices, indices_sorted, num_detections, 0, sizeof(*scores)*8, stream); // Launch actual NMS kernel - 1 block with each thread handling n detections const int max_threads = 1024; int num_per_thread = ceil((float)num_detections / max_threads); hipLaunchKernelGGL(( nms_kernel), dim3(1), dim3(max_threads), 0, stream, num_per_thread, nms_thresh, num_detections, indices_sorted, scores_sorted, in_classes, in_boxes); // Re-sort with updated scores thrust::cuda_cub::hipcub::DeviceRadixSort::SortPairsDescending(workspace, workspace_size, scores_sorted, scores, indices_sorted, indices, num_detections, 0, sizeof(*scores)*8, stream); // Gather filtered scores, boxes, classes num_detections = min(detections_per_im, num_detections); hipMemcpyAsync(out_scores, scores, num_detections * sizeof *scores, hipMemcpyDeviceToDevice, stream); if (num_detections < detections_per_im) { thrust::fill_n(on_stream, out_scores + num_detections, detections_per_im - num_detections, 0); } thrust::gather(on_stream, indices, indices + num_detections, in_boxes, out_boxes); thrust::gather(on_stream, indices, indices + num_detections, in_classes, out_classes); } return 0; } } }
56ad20ef98b0b8504c46ce76e453a04f06f7ac59.cu
/* * Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. */ #include "nv_nms.h" #include "nv_utils.h" #include <algorithm> #include <iostream> #include <stdexcept> #include <cstdint> #include <vector> #include <cmath> #include <cuda.h> #include <thrust/device_ptr.h> #include <thrust/sequence.h> #include <thrust/execution_policy.h> #include <thrust/gather.h> #include <thrust/system/cuda/detail/cub/device/device_radix_sort.cuh> #include <thrust/system/cuda/detail/cub/iterator/counting_input_iterator.cuh> namespace retinanet { namespace cuda { __global__ void nms_kernel( const int num_per_thread, const float threshold, const int num_detections, const int *indices, float *scores, const float *classes, const float4 *boxes) { // Go through detections by descending score for (int m = 0; m < num_detections; m++) { for (int n = 0; n < num_per_thread; n++) { int i = threadIdx.x * num_per_thread + n; if (i < num_detections && m < i && scores[m] > 0.0f) { int idx = indices[i]; int max_idx = indices[m]; int icls = classes[idx]; int mcls = classes[max_idx]; if (mcls == icls) { float4 ibox = boxes[idx]; float4 mbox = boxes[max_idx]; float x1 = max(ibox.x, mbox.x); float y1 = max(ibox.y, mbox.y); float x2 = min(ibox.z, mbox.z); float y2 = min(ibox.w, mbox.w); float w = max(0.0f, x2 - x1 + 1); float h = max(0.0f, y2 - y1 + 1); float iarea = (ibox.z - ibox.x + 1) * (ibox.w - ibox.y + 1); float marea = (mbox.z - mbox.x + 1) * (mbox.w - mbox.y + 1); float inter = w * h; float overlap = inter / (iarea + marea - inter); if (overlap > threshold) { scores[i] = 0.0f; } } } } // Sync discarded detections __syncthreads(); } } int nms(int batch_size, const void *const *inputs, void **outputs, size_t count, int detections_per_im, float nms_thresh, void *workspace, size_t workspace_size, cudaStream_t stream) { if (!workspace || !workspace_size) { // Return required scratch space size cub style workspace_size = get_size_aligned<bool>(count); // flags workspace_size += get_size_aligned<int>(count); // indices workspace_size += get_size_aligned<int>(count); // indices_sorted workspace_size += get_size_aligned<float>(count); // scores workspace_size += get_size_aligned<float>(count); // scores_sorted size_t temp_size_flag = 0; thrust::cuda_cub::cub::DeviceSelect::Flagged((void *)nullptr, temp_size_flag, thrust::cuda_cub::cub::CountingInputIterator<int>(count), (bool *)nullptr, (int *)nullptr, (int *)nullptr, count); size_t temp_size_sort = 0; thrust::cuda_cub::cub::DeviceRadixSort::SortPairsDescending((void *)nullptr, temp_size_sort, (float *)nullptr, (float *)nullptr, (int *)nullptr, (int *)nullptr, count); workspace_size += std::max(temp_size_flag, temp_size_sort); return workspace_size; } auto on_stream = thrust::cuda::par.on(stream); auto flags = get_next_ptr<bool>(count, workspace, workspace_size); auto indices = get_next_ptr<int>(count, workspace, workspace_size); auto indices_sorted = get_next_ptr<int>(count, workspace, workspace_size); auto scores = get_next_ptr<float>(count, workspace, workspace_size); auto scores_sorted = get_next_ptr<float>(count, workspace, workspace_size); for (int batch = 0; batch < batch_size; batch++) { auto in_scores = static_cast<const float *>(inputs[0]) + batch * count; auto in_boxes = static_cast<const float4 *>(inputs[1]) + batch * count; auto in_classes = static_cast<const float *>(inputs[2]) + batch * count; auto out_scores = static_cast<float *>(outputs[0]) + batch * detections_per_im; auto out_boxes = static_cast<float4 *>(outputs[1]) + batch * detections_per_im; auto out_classes = static_cast<float *>(outputs[2]) + batch * detections_per_im; // Discard null scores thrust::transform(on_stream, in_scores, in_scores + count, flags, thrust::placeholders::_1 > 0.0f); int *num_selected = reinterpret_cast<int *>(indices_sorted); thrust::cuda_cub::cub::DeviceSelect::Flagged(workspace, workspace_size, thrust::cuda_cub::cub::CountingInputIterator<int>(0), flags, indices, num_selected, count, stream); cudaStreamSynchronize(stream); int num_detections = *thrust::device_pointer_cast(num_selected); // Sort scores and corresponding indices thrust::gather(on_stream, indices, indices + num_detections, in_scores, scores); thrust::cuda_cub::cub::DeviceRadixSort::SortPairsDescending(workspace, workspace_size, scores, scores_sorted, indices, indices_sorted, num_detections, 0, sizeof(*scores)*8, stream); // Launch actual NMS kernel - 1 block with each thread handling n detections const int max_threads = 1024; int num_per_thread = ceil((float)num_detections / max_threads); nms_kernel<<<1, max_threads, 0, stream>>>(num_per_thread, nms_thresh, num_detections, indices_sorted, scores_sorted, in_classes, in_boxes); // Re-sort with updated scores thrust::cuda_cub::cub::DeviceRadixSort::SortPairsDescending(workspace, workspace_size, scores_sorted, scores, indices_sorted, indices, num_detections, 0, sizeof(*scores)*8, stream); // Gather filtered scores, boxes, classes num_detections = min(detections_per_im, num_detections); cudaMemcpyAsync(out_scores, scores, num_detections * sizeof *scores, cudaMemcpyDeviceToDevice, stream); if (num_detections < detections_per_im) { thrust::fill_n(on_stream, out_scores + num_detections, detections_per_im - num_detections, 0); } thrust::gather(on_stream, indices, indices + num_detections, in_boxes, out_boxes); thrust::gather(on_stream, indices, indices + num_detections, in_classes, out_classes); } return 0; } } }
0cbfb085395c908e7671cbf0f54ec5ea4c5e57a6.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include<stdlib.h> #include<stdio.h> #include <math.h> #define REP_TIME 100 #define TILE 32 #define SIDE 8 float *initial_matrix(int rows, int cols); void print_matrix(float *matrix, int rows,int cols); void check(float *src, float *dst, int rows,int cols); __global__ void kernel(float *d_src, float *d_dst, int cols, int rows) { __shared__ float mat[TILE][TILE+1]; //block start element int bx=blockIdx.x*TILE; //block start x int by=blockIdx.y*TILE; //block start y //thread element int i = by+ threadIdx.y; int j = bx+ threadIdx.x; #pragma unroll for(int k=0;k<TILE;k+=SIDE){ if(i+k<rows&&j<cols) mat[threadIdx.y+k][threadIdx.x]=d_src[((i+k)*cols)+j]; } __syncthreads(); int ti=bx+threadIdx.y; int tj=by+threadIdx.x; #pragma unroll for(int k=0;k<TILE;k+=SIDE){ if((ti+k)<cols&&tj<rows) d_dst[(ti+k)*rows+tj]=mat[threadIdx.x][threadIdx.y+k]; } } int main(int argc, char *argv[]) { int rows, cols; if (argc >= 3) { rows = atoi(argv[1]); cols = atoi(argv[2]); } else { rows = 4096; cols = 4096; } //initialization float *src, *dst; float *d_src, *d_dst; src = initial_matrix(rows, cols); dst = (float *) malloc(rows * cols * sizeof(float)); //size_t pitch; //hipMallocPitch(&d_src, &pitch, cols * sizeof(int), rows); //Upload Data int size = rows * cols * sizeof(float); hipMalloc((void **) &d_src, size); hipMalloc((void **) &d_dst, size); hipMemcpy(d_src, src, size, hipMemcpyHostToDevice); //Kernel dim3 GridDim(cols/TILE, rows/TILE); dim3 BlockDim(TILE, SIDE); //we don't need so TILE*TILE threads in fact //count time hipEvent_t start, stop; hipEventCreate(&start); hipEventCreate(&stop); hipEventRecord(start, 0); for(int i=0;i<REP_TIME;++i) hipLaunchKernelGGL(( kernel) , dim3(GridDim), dim3(BlockDim) , 0, 0, d_src, d_dst, cols, rows); hipEventRecord(stop, 0); hipEventSynchronize(stop); float kernelTime; hipEventElapsedTime(&kernelTime, start, stop); printf("Bandwidth:%.4fGB/s\nTotal Time:%.4f(ms)\nIter:%d\nSize:(%d,%d)\n", size*2.0*1000.0/1024/1024/1024/(kernelTime/REP_TIME), kernelTime, REP_TIME, rows,cols); //Download Data hipMemcpy(dst, d_dst, size, hipMemcpyDeviceToHost); check(src,dst,rows,cols); // print_matrix(src,rows,cols); // printf("\n\n\n"); // print_matrix(dst,cols,rows); hipFree(d_src); hipFree(d_dst); free(src); free(dst); return 0; } float *initial_matrix(int rows, int cols) { float *pointer = (float *) malloc(rows * cols * sizeof(float)); for (int i = 0; i < rows * cols; ++i) pointer[i] = rand() % 100; return pointer; } void check(float *src, float *dst, int rows,int cols) { for (int i = 0; i < rows; ++i) { for (int j = 0; j < cols; ++j) { if (abs(src[i * cols + j] - dst[j * rows + i]) > 0.01) { printf("Result dismatch\n"); return; } } } printf("\nResult match!\n"); return; } void print_matrix(float *matrix, int rows,int cols) { for (int i = 0; i < rows; ++i) { for (int j = 0; j < cols; ++j) { printf("%.0f ", matrix[i * cols + j]); } printf("\n"); } }
0cbfb085395c908e7671cbf0f54ec5ea4c5e57a6.cu
#include<stdlib.h> #include<stdio.h> #include <math.h> #define REP_TIME 100 #define TILE 32 #define SIDE 8 float *initial_matrix(int rows, int cols); void print_matrix(float *matrix, int rows,int cols); void check(float *src, float *dst, int rows,int cols); __global__ void kernel(float *d_src, float *d_dst, int cols, int rows) { __shared__ float mat[TILE][TILE+1]; //block start element int bx=blockIdx.x*TILE; //block start x int by=blockIdx.y*TILE; //block start y //thread element int i = by+ threadIdx.y; int j = bx+ threadIdx.x; #pragma unroll for(int k=0;k<TILE;k+=SIDE){ if(i+k<rows&&j<cols) mat[threadIdx.y+k][threadIdx.x]=d_src[((i+k)*cols)+j]; } __syncthreads(); int ti=bx+threadIdx.y; int tj=by+threadIdx.x; #pragma unroll for(int k=0;k<TILE;k+=SIDE){ if((ti+k)<cols&&tj<rows) d_dst[(ti+k)*rows+tj]=mat[threadIdx.x][threadIdx.y+k]; } } int main(int argc, char *argv[]) { int rows, cols; if (argc >= 3) { rows = atoi(argv[1]); cols = atoi(argv[2]); } else { rows = 4096; cols = 4096; } //initialization float *src, *dst; float *d_src, *d_dst; src = initial_matrix(rows, cols); dst = (float *) malloc(rows * cols * sizeof(float)); //size_t pitch; //cudaMallocPitch(&d_src, &pitch, cols * sizeof(int), rows); //Upload Data int size = rows * cols * sizeof(float); cudaMalloc((void **) &d_src, size); cudaMalloc((void **) &d_dst, size); cudaMemcpy(d_src, src, size, cudaMemcpyHostToDevice); //Kernel dim3 GridDim(cols/TILE, rows/TILE); dim3 BlockDim(TILE, SIDE); //we don't need so TILE*TILE threads in fact //count time cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start, 0); for(int i=0;i<REP_TIME;++i) kernel <<< GridDim, BlockDim >>> (d_src, d_dst, cols, rows); cudaEventRecord(stop, 0); cudaEventSynchronize(stop); float kernelTime; cudaEventElapsedTime(&kernelTime, start, stop); printf("Bandwidth:%.4fGB/s\nTotal Time:%.4f(ms)\nIter:%d\nSize:(%d,%d)\n", size*2.0*1000.0/1024/1024/1024/(kernelTime/REP_TIME), kernelTime, REP_TIME, rows,cols); //Download Data cudaMemcpy(dst, d_dst, size, cudaMemcpyDeviceToHost); check(src,dst,rows,cols); // print_matrix(src,rows,cols); // printf("\n\n\n"); // print_matrix(dst,cols,rows); cudaFree(d_src); cudaFree(d_dst); free(src); free(dst); return 0; } float *initial_matrix(int rows, int cols) { float *pointer = (float *) malloc(rows * cols * sizeof(float)); for (int i = 0; i < rows * cols; ++i) pointer[i] = rand() % 100; return pointer; } void check(float *src, float *dst, int rows,int cols) { for (int i = 0; i < rows; ++i) { for (int j = 0; j < cols; ++j) { if (abs(src[i * cols + j] - dst[j * rows + i]) > 0.01) { printf("Result dismatch\n"); return; } } } printf("\nResult match!\n"); return; } void print_matrix(float *matrix, int rows,int cols) { for (int i = 0; i < rows; ++i) { for (int j = 0; j < cols; ++j) { printf("%.0f ", matrix[i * cols + j]); } printf("\n"); } }
fba8a7d5f9606f9ada45226ae6bdee1bdd6701e7.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* -- MAGMA (version 2.4.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date June 2018 @author Tingxing Dong @author Azzam Haidar @precisions normal z -> s d c */ #include "magma_internal.h" #include "magma_templates.h" #define PRECISION_z #define NB 256 //NB is the 1st level blocking in recursive blocking, NUM_THREADS is the 2ed level, NB=256, NUM_THREADS=64 is optimal for batched #define NUM_THREADS 128 //64 //128 #define BLOCK_SIZE_N 128 #define DIM_X_N 128 #define DIM_Y_N 1 #define BLOCK_SIZE_T 32 #define DIM_X_T 16 #define DIM_Y_T 8 #include "ztrsv_template_device.cuh" #define A(i, j) (A + (i) + (j)*lda) // A(i, j) means at i row, j column extern __shared__ magmaDoubleComplex shared_data[]; /******************************************************************************/ template< const int BLOCK_SIZE, const int DIM_X, const int DIM_Y, const int TILE_SIZE, const int flag, const magma_uplo_t uplo, const magma_trans_t trans, const magma_diag_t diag > __global__ void ztrsv_notrans_kernel_outplace( int n, const magmaDoubleComplex * __restrict__ A, int lda, magmaDoubleComplex *b, int incb, magmaDoubleComplex *x) { ztrsv_notrans_device< BLOCK_SIZE, DIM_X, DIM_Y, TILE_SIZE, flag, uplo, trans, diag >( n, A, lda, b, incb, x); } /******************************************************************************/ template< const int BLOCK_SIZE, const int DIM_X, const int DIM_Y, const int TILE_SIZE, const int flag, const magma_uplo_t uplo, const magma_trans_t trans, const magma_diag_t diag > __global__ void ztrsv_trans_kernel_outplace( int n, const magmaDoubleComplex * __restrict__ A, int lda, magmaDoubleComplex *b, int incb, magmaDoubleComplex *x) { ztrsv_trans_device< BLOCK_SIZE, DIM_X, DIM_Y, TILE_SIZE, flag, uplo, trans, diag >( n, A, lda, b, incb, x); } /******************************************************************************/ extern "C" void magmablas_ztrsv_outofplace( magma_uplo_t uplo, magma_trans_t trans, magma_diag_t diag, magma_int_t n, magmaDoubleComplex_const_ptr A, magma_int_t lda, magmaDoubleComplex_ptr b, magma_int_t incb, magmaDoubleComplex_ptr x, magma_queue_t queue, magma_int_t flag=0) { /* Check arguments */ magma_int_t info = 0; if ( uplo != MagmaUpper && uplo != MagmaLower ) { info = -1; } else if ( trans != MagmaNoTrans && trans != MagmaTrans && trans != MagmaConjTrans ) { info = -2; } else if ( diag != MagmaUnit && diag != MagmaNonUnit ) { info = -3; } else if (n < 0) { info = -5; } else if (lda < max(1,n)) { info = -8; } if (info != 0) { magma_xerbla( __func__, -(info) ); return; } // quick return if possible. if (n == 0) return; dim3 threads( NUM_THREADS ); dim3 blocks( 1, 1, 1 ); size_t shmem = n * sizeof(magmaDoubleComplex); if (trans == MagmaNoTrans) { if (uplo == MagmaUpper) { if (diag == MagmaNonUnit) { if (flag == 0) { hipLaunchKernelGGL(( ztrsv_notrans_kernel_outplace< BLOCK_SIZE_N, DIM_X_N, DIM_Y_N, MagmaBigTileSize, 0, MagmaUpper, MagmaNoTrans, MagmaNonUnit >) , dim3(blocks), dim3(threads), shmem, queue->cuda_stream() , n, A, lda, b, incb, x); } else { hipLaunchKernelGGL(( ztrsv_notrans_kernel_outplace< BLOCK_SIZE_N, DIM_X_N, DIM_Y_N, MagmaBigTileSize, 1, MagmaUpper, MagmaNoTrans, MagmaNonUnit >) , dim3(blocks), dim3(threads), shmem, queue->cuda_stream() , n, A, lda, b, incb, x); } } else if (diag == MagmaUnit) { if (flag == 0) { hipLaunchKernelGGL(( ztrsv_notrans_kernel_outplace< BLOCK_SIZE_N, DIM_X_N, DIM_Y_N, MagmaBigTileSize, 0, MagmaUpper, MagmaNoTrans, MagmaUnit >) , dim3(blocks), dim3(threads), shmem, queue->cuda_stream() , n, A, lda, b, incb, x); } else { hipLaunchKernelGGL(( ztrsv_notrans_kernel_outplace< BLOCK_SIZE_N, DIM_X_N, DIM_Y_N, MagmaBigTileSize, 1, MagmaUpper, MagmaNoTrans, MagmaUnit >) , dim3(blocks), dim3(threads), shmem, queue->cuda_stream() , n, A, lda, b, incb, x); } } } else //Lower { if (diag == MagmaNonUnit) { if (flag == 0) { hipLaunchKernelGGL(( ztrsv_notrans_kernel_outplace< BLOCK_SIZE_N, DIM_X_N, DIM_Y_N, MagmaBigTileSize, 0, MagmaLower, MagmaNoTrans, MagmaNonUnit >) , dim3(blocks), dim3(threads), shmem, queue->cuda_stream() , n, A, lda, b, incb, x); } else { hipLaunchKernelGGL(( ztrsv_notrans_kernel_outplace< BLOCK_SIZE_N, DIM_X_N, DIM_Y_N, MagmaBigTileSize, 1, MagmaLower, MagmaNoTrans, MagmaNonUnit >) , dim3(blocks), dim3(threads), shmem, queue->cuda_stream() , n, A, lda, b, incb, x); } } else if (diag == MagmaUnit) { if (flag == 0) { hipLaunchKernelGGL(( ztrsv_notrans_kernel_outplace< BLOCK_SIZE_N, DIM_X_N, DIM_Y_N, MagmaBigTileSize, 0, MagmaLower, MagmaNoTrans, MagmaUnit>) , dim3(blocks), dim3(threads), shmem, queue->cuda_stream() , n, A, lda, b, incb, x); } else { hipLaunchKernelGGL(( ztrsv_notrans_kernel_outplace< BLOCK_SIZE_N, DIM_X_N, DIM_Y_N, MagmaBigTileSize, 1, MagmaLower, MagmaNoTrans, MagmaUnit>) , dim3(blocks), dim3(threads), shmem, queue->cuda_stream() , n, A, lda, b, incb, x); } } } } else if (trans == MagmaTrans) { if (uplo == MagmaUpper) { if (diag == MagmaNonUnit) { if (flag == 0) { hipLaunchKernelGGL(( ztrsv_trans_kernel_outplace< BLOCK_SIZE_T, DIM_X_T, DIM_Y_T, MagmaBigTileSize, 0, MagmaUpper, MagmaTrans, MagmaNonUnit >) , dim3(blocks), dim3(threads), shmem, queue->cuda_stream() , n, A, lda, b, incb, x); } else { hipLaunchKernelGGL(( ztrsv_trans_kernel_outplace< BLOCK_SIZE_T, DIM_X_T, DIM_Y_T, MagmaBigTileSize, 1, MagmaUpper, MagmaTrans, MagmaNonUnit >) , dim3(blocks), dim3(threads), shmem, queue->cuda_stream() , n, A, lda, b, incb, x); } } else if (diag == MagmaUnit) { if (flag == 0) { hipLaunchKernelGGL(( ztrsv_trans_kernel_outplace< BLOCK_SIZE_T, DIM_X_T, DIM_Y_T, MagmaBigTileSize, 0, MagmaUpper, MagmaTrans, MagmaUnit >) , dim3(blocks), dim3(threads), shmem, queue->cuda_stream() , n, A, lda, b, incb, x); } else { hipLaunchKernelGGL(( ztrsv_trans_kernel_outplace< BLOCK_SIZE_T, DIM_X_T, DIM_Y_T, MagmaBigTileSize, 1, MagmaUpper, MagmaTrans, MagmaUnit >) , dim3(blocks), dim3(threads), shmem, queue->cuda_stream() , n, A, lda, b, incb, x); } } } else { if (diag == MagmaNonUnit) { if (flag == 0) { hipLaunchKernelGGL(( ztrsv_trans_kernel_outplace< BLOCK_SIZE_T, DIM_X_T, DIM_Y_T,MagmaBigTileSize, 0, MagmaLower, MagmaTrans, MagmaNonUnit >) , dim3(blocks), dim3(threads), shmem, queue->cuda_stream() , n, A, lda, b, incb, x); } else { hipLaunchKernelGGL(( ztrsv_trans_kernel_outplace< BLOCK_SIZE_T, DIM_X_T, DIM_Y_T, MagmaBigTileSize, 1, MagmaLower, MagmaTrans, MagmaNonUnit >) , dim3(blocks), dim3(threads), shmem, queue->cuda_stream() , n, A, lda, b, incb, x); } } else if (diag == MagmaUnit) { if (flag == 0) { hipLaunchKernelGGL(( ztrsv_trans_kernel_outplace< BLOCK_SIZE_T, DIM_X_T, DIM_Y_T,MagmaBigTileSize, 0, MagmaLower, MagmaTrans, MagmaUnit >) , dim3(blocks), dim3(threads), shmem, queue->cuda_stream() , n, A, lda, b, incb, x); } else { hipLaunchKernelGGL(( ztrsv_trans_kernel_outplace< BLOCK_SIZE_T, DIM_X_T, DIM_Y_T, MagmaBigTileSize, 1, MagmaLower, MagmaTrans, MagmaUnit >) , dim3(blocks), dim3(threads), shmem, queue->cuda_stream() , n, A, lda, b, incb, x); } } } } else if (trans == MagmaConjTrans) { if (uplo == MagmaUpper) { if (diag == MagmaNonUnit) { if (flag == 0) { hipLaunchKernelGGL(( ztrsv_trans_kernel_outplace< BLOCK_SIZE_T, DIM_X_T, DIM_Y_T, MagmaBigTileSize, 0, MagmaUpper, MagmaConjTrans, MagmaNonUnit >) , dim3(blocks), dim3(threads), shmem, queue->cuda_stream() , n, A, lda, b, incb, x); } else { hipLaunchKernelGGL(( ztrsv_trans_kernel_outplace< BLOCK_SIZE_T, DIM_X_T, DIM_Y_T, MagmaBigTileSize, 1, MagmaUpper, MagmaConjTrans, MagmaNonUnit >) , dim3(blocks), dim3(threads), shmem, queue->cuda_stream() , n, A, lda, b, incb, x); } } else if (diag == MagmaUnit) { if (flag == 0) { hipLaunchKernelGGL(( ztrsv_trans_kernel_outplace< BLOCK_SIZE_T, DIM_X_T, DIM_Y_T, MagmaBigTileSize, 0, MagmaUpper, MagmaConjTrans, MagmaUnit >) , dim3(blocks), dim3(threads), shmem, queue->cuda_stream() , n, A, lda, b, incb, x); } else { hipLaunchKernelGGL(( ztrsv_trans_kernel_outplace< BLOCK_SIZE_T, DIM_X_T, DIM_Y_T, MagmaBigTileSize, 1, MagmaUpper, MagmaConjTrans, MagmaUnit >) , dim3(blocks), dim3(threads), shmem, queue->cuda_stream() , n, A, lda, b, incb, x); } } } else { if (diag == MagmaNonUnit) { if (flag == 0) { hipLaunchKernelGGL(( ztrsv_trans_kernel_outplace< BLOCK_SIZE_T, DIM_X_T, DIM_Y_T,MagmaBigTileSize, 0, MagmaLower, MagmaConjTrans, MagmaNonUnit >) , dim3(blocks), dim3(threads), shmem, queue->cuda_stream() , n, A, lda, b, incb, x); } else { hipLaunchKernelGGL(( ztrsv_trans_kernel_outplace< BLOCK_SIZE_T, DIM_X_T, DIM_Y_T, MagmaBigTileSize, 1, MagmaLower, MagmaConjTrans, MagmaNonUnit >) , dim3(blocks), dim3(threads), shmem, queue->cuda_stream() , n, A, lda, b, incb, x); } } else if (diag == MagmaUnit) { if (flag == 0) { hipLaunchKernelGGL(( ztrsv_trans_kernel_outplace< BLOCK_SIZE_T, DIM_X_T, DIM_Y_T,MagmaBigTileSize, 0, MagmaLower, MagmaConjTrans, MagmaUnit >) , dim3(blocks), dim3(threads), shmem, queue->cuda_stream() , n, A, lda, b, incb, x); } else { hipLaunchKernelGGL(( ztrsv_trans_kernel_outplace< BLOCK_SIZE_T, DIM_X_T, DIM_Y_T, MagmaBigTileSize, 1, MagmaLower, MagmaConjTrans, MagmaUnit >) , dim3(blocks), dim3(threads), shmem, queue->cuda_stream() , n, A, lda, b, incb, x); } } } } } /******************************************************************************/ /* README: flag decides if the ztrsv_outplace see an updated x or not. 0: No; other: Yes In recursive, flag must be nonzero except the 1st call */ extern "C" void magmablas_ztrsv_recursive_outofplace( magma_uplo_t uplo, magma_trans_t trans, magma_diag_t diag, magma_int_t n, magmaDoubleComplex_const_ptr A, magma_int_t lda, magmaDoubleComplex_ptr b, magma_int_t incb, magmaDoubleComplex_ptr x, magma_queue_t queue) { /* Check arguments */ magma_int_t info = 0; if ( uplo != MagmaUpper && uplo != MagmaLower ) { info = -1; } else if ( trans != MagmaNoTrans && trans != MagmaTrans && trans != MagmaConjTrans ) { info = -2; } else if ( diag != MagmaUnit && diag != MagmaNonUnit ) { info = -3; } else if (n < 0) { info = -5; } else if (lda < max(1,n)) { info = -8; } if (info != 0) { magma_xerbla( __func__, -(info) ); return; } // quick return if possible. if (n == 0) return; //Init x with zero //magmablas_zlaset( MagmaFull, n, incb, MAGMA_Z_ZERO, MAGMA_Z_ZERO, x, n, queue ); magma_int_t col = n; if (trans == MagmaNoTrans) { for (magma_int_t i=0; i < n; i+= NB) { magma_int_t jb = min(NB, n-i); if (uplo == MagmaUpper) { col -= jb; //assume x_array contains zero elements, magmablas_zgemv will cause slow down magma_zgemv( MagmaNoTrans, jb, i, MAGMA_Z_ONE, A(col, col+jb), lda, x+col+jb, 1, MAGMA_Z_ONE, x+col, 1, queue ); } else { col = i; magma_zgemv( MagmaNoTrans, jb, i, MAGMA_Z_ONE, A(col, 0), lda, x, 1, MAGMA_Z_ONE, x+col, 1, queue ); } magmablas_ztrsv_outofplace( uplo, trans, diag, jb, A(col, col), lda, b+col, incb, x+col, queue, i ); } } else { for (magma_int_t i=0; i < n; i += NB) { magma_int_t jb = min(NB, n-i); if (uplo == MagmaLower) { col -= jb; magma_zgemv( MagmaConjTrans, i, jb, MAGMA_Z_ONE, A(col+jb, col), lda, x+col+jb, 1, MAGMA_Z_ONE, x+col, 1, queue ); } else { col = i; magma_zgemv( MagmaConjTrans, i, jb, MAGMA_Z_ONE, A(0, col), lda, x, 1, MAGMA_Z_ONE, x+col, 1, queue ); } magmablas_ztrsv_outofplace( uplo, trans, diag, jb, A(col, col), lda, b+col, incb, x+col, queue, i ); } } } /***************************************************************************//** Purpose ------- ztrsv solves one of the matrix equations on gpu op(A)*x = B, or x*op(A) = B, where alpha is a scalar, X and B are vectors, A is a unit, or non-unit, upper or lower triangular matrix and op(A) is one of op(A) = A, or op(A) = A^T, or op(A) = A^H. The vector x is overwritten on b. Arguments ---------- @param[in] uplo magma_uplo_t. On entry, uplo specifies whether the matrix A is an upper or lower triangular matrix as follows: - = MagmaUpper: A is an upper triangular matrix. - = MagmaLower: A is a lower triangular matrix. @param[in] trans magma_trans_t. On entry, trans specifies the form of op(A) to be used in the matrix multiplication as follows: - = MagmaNoTrans: op(A) = A. - = MagmaTrans: op(A) = A^T. - = MagmaConjTrans: op(A) = A^H. @param[in] diag magma_diag_t. On entry, diag specifies whether or not A is unit triangular as follows: - = MagmaUnit: A is assumed to be unit triangular. - = MagmaNonUnit: A is not assumed to be unit triangular. @param[in] n INTEGER. On entry, n N specifies the order of the matrix A. n >= 0. @param[in] dA COMPLEX_16 array of dimension ( lda, n ) Before entry with uplo = MagmaUpper, the leading n by n upper triangular part of the array A must contain the upper triangular matrix and the strictly lower triangular part of A is not referenced. Before entry with uplo = MagmaLower, the leading n by n lower triangular part of the array A must contain the lower triangular matrix and the strictly upper triangular part of A is not referenced. Note that when diag = MagmaUnit, the diagonal elements of A are not referenced either, but are assumed to be unity. @param[in] ldda INTEGER. On entry, lda specifies the first dimension of A. lda >= max( 1, n ). @param[in] db COMPLEX_16 array of dimension n On exit, b is overwritten with the solution vector X. @param[in] incb INTEGER. On entry, incb specifies the increment for the elements of b. incb must not be zero. Unchanged on exit. @param[in] queue magma_queue_t Queue to execute in. @ingroup magma_trsv *******************************************************************************/ extern "C" void magmablas_ztrsv( magma_uplo_t uplo, magma_trans_t trans, magma_diag_t diag, magma_int_t n, magmaDoubleComplex_const_ptr dA, magma_int_t ldda, magmaDoubleComplex_ptr db, magma_int_t incb, magma_queue_t queue) { magma_int_t size_x = n * incb; magmaDoubleComplex_ptr dx=NULL; magma_zmalloc( &dx, size_x ); magmablas_zlaset( MagmaFull, n, 1, MAGMA_Z_ZERO, MAGMA_Z_ZERO, dx, n, queue ); magmablas_ztrsv_recursive_outofplace( uplo, trans, diag, n, dA, ldda, db, incb, dx, queue ); magmablas_zlacpy( MagmaFull, n, 1, dx, n, db, n, queue ); magma_free( dx ); }
fba8a7d5f9606f9ada45226ae6bdee1bdd6701e7.cu
/* -- MAGMA (version 2.4.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date June 2018 @author Tingxing Dong @author Azzam Haidar @precisions normal z -> s d c */ #include "magma_internal.h" #include "magma_templates.h" #define PRECISION_z #define NB 256 //NB is the 1st level blocking in recursive blocking, NUM_THREADS is the 2ed level, NB=256, NUM_THREADS=64 is optimal for batched #define NUM_THREADS 128 //64 //128 #define BLOCK_SIZE_N 128 #define DIM_X_N 128 #define DIM_Y_N 1 #define BLOCK_SIZE_T 32 #define DIM_X_T 16 #define DIM_Y_T 8 #include "ztrsv_template_device.cuh" #define A(i, j) (A + (i) + (j)*lda) // A(i, j) means at i row, j column extern __shared__ magmaDoubleComplex shared_data[]; /******************************************************************************/ template< const int BLOCK_SIZE, const int DIM_X, const int DIM_Y, const int TILE_SIZE, const int flag, const magma_uplo_t uplo, const magma_trans_t trans, const magma_diag_t diag > __global__ void ztrsv_notrans_kernel_outplace( int n, const magmaDoubleComplex * __restrict__ A, int lda, magmaDoubleComplex *b, int incb, magmaDoubleComplex *x) { ztrsv_notrans_device< BLOCK_SIZE, DIM_X, DIM_Y, TILE_SIZE, flag, uplo, trans, diag >( n, A, lda, b, incb, x); } /******************************************************************************/ template< const int BLOCK_SIZE, const int DIM_X, const int DIM_Y, const int TILE_SIZE, const int flag, const magma_uplo_t uplo, const magma_trans_t trans, const magma_diag_t diag > __global__ void ztrsv_trans_kernel_outplace( int n, const magmaDoubleComplex * __restrict__ A, int lda, magmaDoubleComplex *b, int incb, magmaDoubleComplex *x) { ztrsv_trans_device< BLOCK_SIZE, DIM_X, DIM_Y, TILE_SIZE, flag, uplo, trans, diag >( n, A, lda, b, incb, x); } /******************************************************************************/ extern "C" void magmablas_ztrsv_outofplace( magma_uplo_t uplo, magma_trans_t trans, magma_diag_t diag, magma_int_t n, magmaDoubleComplex_const_ptr A, magma_int_t lda, magmaDoubleComplex_ptr b, magma_int_t incb, magmaDoubleComplex_ptr x, magma_queue_t queue, magma_int_t flag=0) { /* Check arguments */ magma_int_t info = 0; if ( uplo != MagmaUpper && uplo != MagmaLower ) { info = -1; } else if ( trans != MagmaNoTrans && trans != MagmaTrans && trans != MagmaConjTrans ) { info = -2; } else if ( diag != MagmaUnit && diag != MagmaNonUnit ) { info = -3; } else if (n < 0) { info = -5; } else if (lda < max(1,n)) { info = -8; } if (info != 0) { magma_xerbla( __func__, -(info) ); return; } // quick return if possible. if (n == 0) return; dim3 threads( NUM_THREADS ); dim3 blocks( 1, 1, 1 ); size_t shmem = n * sizeof(magmaDoubleComplex); if (trans == MagmaNoTrans) { if (uplo == MagmaUpper) { if (diag == MagmaNonUnit) { if (flag == 0) { ztrsv_notrans_kernel_outplace< BLOCK_SIZE_N, DIM_X_N, DIM_Y_N, MagmaBigTileSize, 0, MagmaUpper, MagmaNoTrans, MagmaNonUnit > <<< blocks, threads, shmem, queue->cuda_stream() >>> (n, A, lda, b, incb, x); } else { ztrsv_notrans_kernel_outplace< BLOCK_SIZE_N, DIM_X_N, DIM_Y_N, MagmaBigTileSize, 1, MagmaUpper, MagmaNoTrans, MagmaNonUnit > <<< blocks, threads, shmem, queue->cuda_stream() >>> (n, A, lda, b, incb, x); } } else if (diag == MagmaUnit) { if (flag == 0) { ztrsv_notrans_kernel_outplace< BLOCK_SIZE_N, DIM_X_N, DIM_Y_N, MagmaBigTileSize, 0, MagmaUpper, MagmaNoTrans, MagmaUnit > <<< blocks, threads, shmem, queue->cuda_stream() >>> (n, A, lda, b, incb, x); } else { ztrsv_notrans_kernel_outplace< BLOCK_SIZE_N, DIM_X_N, DIM_Y_N, MagmaBigTileSize, 1, MagmaUpper, MagmaNoTrans, MagmaUnit > <<< blocks, threads, shmem, queue->cuda_stream() >>> (n, A, lda, b, incb, x); } } } else //Lower { if (diag == MagmaNonUnit) { if (flag == 0) { ztrsv_notrans_kernel_outplace< BLOCK_SIZE_N, DIM_X_N, DIM_Y_N, MagmaBigTileSize, 0, MagmaLower, MagmaNoTrans, MagmaNonUnit > <<< blocks, threads, shmem, queue->cuda_stream() >>> (n, A, lda, b, incb, x); } else { ztrsv_notrans_kernel_outplace< BLOCK_SIZE_N, DIM_X_N, DIM_Y_N, MagmaBigTileSize, 1, MagmaLower, MagmaNoTrans, MagmaNonUnit > <<< blocks, threads, shmem, queue->cuda_stream() >>> (n, A, lda, b, incb, x); } } else if (diag == MagmaUnit) { if (flag == 0) { ztrsv_notrans_kernel_outplace< BLOCK_SIZE_N, DIM_X_N, DIM_Y_N, MagmaBigTileSize, 0, MagmaLower, MagmaNoTrans, MagmaUnit> <<< blocks, threads, shmem, queue->cuda_stream() >>> (n, A, lda, b, incb, x); } else { ztrsv_notrans_kernel_outplace< BLOCK_SIZE_N, DIM_X_N, DIM_Y_N, MagmaBigTileSize, 1, MagmaLower, MagmaNoTrans, MagmaUnit> <<< blocks, threads, shmem, queue->cuda_stream() >>> (n, A, lda, b, incb, x); } } } } else if (trans == MagmaTrans) { if (uplo == MagmaUpper) { if (diag == MagmaNonUnit) { if (flag == 0) { ztrsv_trans_kernel_outplace< BLOCK_SIZE_T, DIM_X_T, DIM_Y_T, MagmaBigTileSize, 0, MagmaUpper, MagmaTrans, MagmaNonUnit > <<< blocks, threads, shmem, queue->cuda_stream() >>> (n, A, lda, b, incb, x); } else { ztrsv_trans_kernel_outplace< BLOCK_SIZE_T, DIM_X_T, DIM_Y_T, MagmaBigTileSize, 1, MagmaUpper, MagmaTrans, MagmaNonUnit > <<< blocks, threads, shmem, queue->cuda_stream() >>> (n, A, lda, b, incb, x); } } else if (diag == MagmaUnit) { if (flag == 0) { ztrsv_trans_kernel_outplace< BLOCK_SIZE_T, DIM_X_T, DIM_Y_T, MagmaBigTileSize, 0, MagmaUpper, MagmaTrans, MagmaUnit > <<< blocks, threads, shmem, queue->cuda_stream() >>> (n, A, lda, b, incb, x); } else { ztrsv_trans_kernel_outplace< BLOCK_SIZE_T, DIM_X_T, DIM_Y_T, MagmaBigTileSize, 1, MagmaUpper, MagmaTrans, MagmaUnit > <<< blocks, threads, shmem, queue->cuda_stream() >>> (n, A, lda, b, incb, x); } } } else { if (diag == MagmaNonUnit) { if (flag == 0) { ztrsv_trans_kernel_outplace< BLOCK_SIZE_T, DIM_X_T, DIM_Y_T,MagmaBigTileSize, 0, MagmaLower, MagmaTrans, MagmaNonUnit > <<< blocks, threads, shmem, queue->cuda_stream() >>> (n, A, lda, b, incb, x); } else { ztrsv_trans_kernel_outplace< BLOCK_SIZE_T, DIM_X_T, DIM_Y_T, MagmaBigTileSize, 1, MagmaLower, MagmaTrans, MagmaNonUnit > <<< blocks, threads, shmem, queue->cuda_stream() >>> (n, A, lda, b, incb, x); } } else if (diag == MagmaUnit) { if (flag == 0) { ztrsv_trans_kernel_outplace< BLOCK_SIZE_T, DIM_X_T, DIM_Y_T,MagmaBigTileSize, 0, MagmaLower, MagmaTrans, MagmaUnit > <<< blocks, threads, shmem, queue->cuda_stream() >>> (n, A, lda, b, incb, x); } else { ztrsv_trans_kernel_outplace< BLOCK_SIZE_T, DIM_X_T, DIM_Y_T, MagmaBigTileSize, 1, MagmaLower, MagmaTrans, MagmaUnit > <<< blocks, threads, shmem, queue->cuda_stream() >>> (n, A, lda, b, incb, x); } } } } else if (trans == MagmaConjTrans) { if (uplo == MagmaUpper) { if (diag == MagmaNonUnit) { if (flag == 0) { ztrsv_trans_kernel_outplace< BLOCK_SIZE_T, DIM_X_T, DIM_Y_T, MagmaBigTileSize, 0, MagmaUpper, MagmaConjTrans, MagmaNonUnit > <<< blocks, threads, shmem, queue->cuda_stream() >>> (n, A, lda, b, incb, x); } else { ztrsv_trans_kernel_outplace< BLOCK_SIZE_T, DIM_X_T, DIM_Y_T, MagmaBigTileSize, 1, MagmaUpper, MagmaConjTrans, MagmaNonUnit > <<< blocks, threads, shmem, queue->cuda_stream() >>> (n, A, lda, b, incb, x); } } else if (diag == MagmaUnit) { if (flag == 0) { ztrsv_trans_kernel_outplace< BLOCK_SIZE_T, DIM_X_T, DIM_Y_T, MagmaBigTileSize, 0, MagmaUpper, MagmaConjTrans, MagmaUnit > <<< blocks, threads, shmem, queue->cuda_stream() >>> (n, A, lda, b, incb, x); } else { ztrsv_trans_kernel_outplace< BLOCK_SIZE_T, DIM_X_T, DIM_Y_T, MagmaBigTileSize, 1, MagmaUpper, MagmaConjTrans, MagmaUnit > <<< blocks, threads, shmem, queue->cuda_stream() >>> (n, A, lda, b, incb, x); } } } else { if (diag == MagmaNonUnit) { if (flag == 0) { ztrsv_trans_kernel_outplace< BLOCK_SIZE_T, DIM_X_T, DIM_Y_T,MagmaBigTileSize, 0, MagmaLower, MagmaConjTrans, MagmaNonUnit > <<< blocks, threads, shmem, queue->cuda_stream() >>> (n, A, lda, b, incb, x); } else { ztrsv_trans_kernel_outplace< BLOCK_SIZE_T, DIM_X_T, DIM_Y_T, MagmaBigTileSize, 1, MagmaLower, MagmaConjTrans, MagmaNonUnit > <<< blocks, threads, shmem, queue->cuda_stream() >>> (n, A, lda, b, incb, x); } } else if (diag == MagmaUnit) { if (flag == 0) { ztrsv_trans_kernel_outplace< BLOCK_SIZE_T, DIM_X_T, DIM_Y_T,MagmaBigTileSize, 0, MagmaLower, MagmaConjTrans, MagmaUnit > <<< blocks, threads, shmem, queue->cuda_stream() >>> (n, A, lda, b, incb, x); } else { ztrsv_trans_kernel_outplace< BLOCK_SIZE_T, DIM_X_T, DIM_Y_T, MagmaBigTileSize, 1, MagmaLower, MagmaConjTrans, MagmaUnit > <<< blocks, threads, shmem, queue->cuda_stream() >>> (n, A, lda, b, incb, x); } } } } } /******************************************************************************/ /* README: flag decides if the ztrsv_outplace see an updated x or not. 0: No; other: Yes In recursive, flag must be nonzero except the 1st call */ extern "C" void magmablas_ztrsv_recursive_outofplace( magma_uplo_t uplo, magma_trans_t trans, magma_diag_t diag, magma_int_t n, magmaDoubleComplex_const_ptr A, magma_int_t lda, magmaDoubleComplex_ptr b, magma_int_t incb, magmaDoubleComplex_ptr x, magma_queue_t queue) { /* Check arguments */ magma_int_t info = 0; if ( uplo != MagmaUpper && uplo != MagmaLower ) { info = -1; } else if ( trans != MagmaNoTrans && trans != MagmaTrans && trans != MagmaConjTrans ) { info = -2; } else if ( diag != MagmaUnit && diag != MagmaNonUnit ) { info = -3; } else if (n < 0) { info = -5; } else if (lda < max(1,n)) { info = -8; } if (info != 0) { magma_xerbla( __func__, -(info) ); return; } // quick return if possible. if (n == 0) return; //Init x with zero //magmablas_zlaset( MagmaFull, n, incb, MAGMA_Z_ZERO, MAGMA_Z_ZERO, x, n, queue ); magma_int_t col = n; if (trans == MagmaNoTrans) { for (magma_int_t i=0; i < n; i+= NB) { magma_int_t jb = min(NB, n-i); if (uplo == MagmaUpper) { col -= jb; //assume x_array contains zero elements, magmablas_zgemv will cause slow down magma_zgemv( MagmaNoTrans, jb, i, MAGMA_Z_ONE, A(col, col+jb), lda, x+col+jb, 1, MAGMA_Z_ONE, x+col, 1, queue ); } else { col = i; magma_zgemv( MagmaNoTrans, jb, i, MAGMA_Z_ONE, A(col, 0), lda, x, 1, MAGMA_Z_ONE, x+col, 1, queue ); } magmablas_ztrsv_outofplace( uplo, trans, diag, jb, A(col, col), lda, b+col, incb, x+col, queue, i ); } } else { for (magma_int_t i=0; i < n; i += NB) { magma_int_t jb = min(NB, n-i); if (uplo == MagmaLower) { col -= jb; magma_zgemv( MagmaConjTrans, i, jb, MAGMA_Z_ONE, A(col+jb, col), lda, x+col+jb, 1, MAGMA_Z_ONE, x+col, 1, queue ); } else { col = i; magma_zgemv( MagmaConjTrans, i, jb, MAGMA_Z_ONE, A(0, col), lda, x, 1, MAGMA_Z_ONE, x+col, 1, queue ); } magmablas_ztrsv_outofplace( uplo, trans, diag, jb, A(col, col), lda, b+col, incb, x+col, queue, i ); } } } /***************************************************************************//** Purpose ------- ztrsv solves one of the matrix equations on gpu op(A)*x = B, or x*op(A) = B, where alpha is a scalar, X and B are vectors, A is a unit, or non-unit, upper or lower triangular matrix and op(A) is one of op(A) = A, or op(A) = A^T, or op(A) = A^H. The vector x is overwritten on b. Arguments ---------- @param[in] uplo magma_uplo_t. On entry, uplo specifies whether the matrix A is an upper or lower triangular matrix as follows: - = MagmaUpper: A is an upper triangular matrix. - = MagmaLower: A is a lower triangular matrix. @param[in] trans magma_trans_t. On entry, trans specifies the form of op(A) to be used in the matrix multiplication as follows: - = MagmaNoTrans: op(A) = A. - = MagmaTrans: op(A) = A^T. - = MagmaConjTrans: op(A) = A^H. @param[in] diag magma_diag_t. On entry, diag specifies whether or not A is unit triangular as follows: - = MagmaUnit: A is assumed to be unit triangular. - = MagmaNonUnit: A is not assumed to be unit triangular. @param[in] n INTEGER. On entry, n N specifies the order of the matrix A. n >= 0. @param[in] dA COMPLEX_16 array of dimension ( lda, n ) Before entry with uplo = MagmaUpper, the leading n by n upper triangular part of the array A must contain the upper triangular matrix and the strictly lower triangular part of A is not referenced. Before entry with uplo = MagmaLower, the leading n by n lower triangular part of the array A must contain the lower triangular matrix and the strictly upper triangular part of A is not referenced. Note that when diag = MagmaUnit, the diagonal elements of A are not referenced either, but are assumed to be unity. @param[in] ldda INTEGER. On entry, lda specifies the first dimension of A. lda >= max( 1, n ). @param[in] db COMPLEX_16 array of dimension n On exit, b is overwritten with the solution vector X. @param[in] incb INTEGER. On entry, incb specifies the increment for the elements of b. incb must not be zero. Unchanged on exit. @param[in] queue magma_queue_t Queue to execute in. @ingroup magma_trsv *******************************************************************************/ extern "C" void magmablas_ztrsv( magma_uplo_t uplo, magma_trans_t trans, magma_diag_t diag, magma_int_t n, magmaDoubleComplex_const_ptr dA, magma_int_t ldda, magmaDoubleComplex_ptr db, magma_int_t incb, magma_queue_t queue) { magma_int_t size_x = n * incb; magmaDoubleComplex_ptr dx=NULL; magma_zmalloc( &dx, size_x ); magmablas_zlaset( MagmaFull, n, 1, MAGMA_Z_ZERO, MAGMA_Z_ZERO, dx, n, queue ); magmablas_ztrsv_recursive_outofplace( uplo, trans, diag, n, dA, ldda, db, incb, dx, queue ); magmablas_zlacpy( MagmaFull, n, 1, dx, n, db, n, queue ); magma_free( dx ); }
e145dbdb5e1371c9c505d8f01b3af3367862329d.hip
// !!! This is a file automatically generated by hipify!!! #ifndef __GDD_LOG_CU__ #define __GDD_LOG_CU__ #include "common.hip" /* Logarithm. Computes log(x) in double-double precision. This is a natural logarithm (i.e., base e). */ __device__ gdd_real log(const gdd_real &a) { if (is_one(a)) { return make_dd(0.0); } //!!!!!!!!! //TO DO: return an errro if (a.x <= 0.0) { //return _nan; return make_dd(0.0); } gdd_real x = make_dd(log(a.x)); // Initial approximation x = x + a * exp(negative(x)) - 1.0; return x; } #endif /* __GDD_LOG_CU__ */
e145dbdb5e1371c9c505d8f01b3af3367862329d.cu
#ifndef __GDD_LOG_CU__ #define __GDD_LOG_CU__ #include "common.cu" /* Logarithm. Computes log(x) in double-double precision. This is a natural logarithm (i.e., base e). */ __device__ gdd_real log(const gdd_real &a) { if (is_one(a)) { return make_dd(0.0); } //!!!!!!!!! //TO DO: return an errro if (a.x <= 0.0) { //return _nan; return make_dd(0.0); } gdd_real x = make_dd(log(a.x)); // Initial approximation x = x + a * exp(negative(x)) - 1.0; return x; } #endif /* __GDD_LOG_CU__ */
9d85d5639f5b3e6df4a167c866b6f45307be8812.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* CUDA good to knows: Basics: Per thread: registers (fast) local memory (off-chip [still on the GPU though], slow) Per block: multiple threads shared memory (semi-fast) Per GPU: Multiple kernels that each run multiple blocks Global memory (off-chip [still on the GPU though], slow) Threads are executed by thread processors Threads reside in thread blocks Thread blocks are executed by multiprocessors Several concurrent thread blocks can reside on one multiprocessor Limited by multiprocessor resources (shared memory and registers) A kernel is launched as a grid of thread blocks. Only one kernel can execute on a device at a time. Advanced: hipMemcpy(dst, src, size, direction) blocks CPU thread. Compiler tips: nvcc <filename>.cu [-o <executable>] Builds release mode nvcc -g <filename>.cu Builds debug mode Can debug host code but not device code nvcc -deviceemu <filename>.cu Builds device emulation mode All code runs on CPU, no debug symbols nvcc -deviceemu -g <filename>.cu Builds debug device emulation mode All code runs on CPU, with debug symbols Tips and tricks: If our arrays A,B,C are shorter than 1024 elements, N < 1024, then one thread block is enough N threads in the thread block If our arrays are longer than 1024, then Choose the number of threads in the thread blocks to be integer*32 Calculate how many thread blocks you need There will be some threads that should do nothing Why multiples of 32? Threads are executed synchronously in bunches of 32 = warp All threads must have their data ready before the warp runs Cache lines are 4 B x warp size = 128 B GPU resources can be fully utilized when these parameters are used # of blocks = ceil(N/threadsInBlock) = (N+threadsInBlock-1)/threadsInBlock Compile: nvcc -o galaxy galaxy_program.cu -res-usage Run: time ./galaxy */ #include <stdio.h> #include <iostream> #include <fstream> using namespace std; // Declare functions and classes that are below main. class GalaxyFile{ public: int number_of_galaxies; float *alphas, *deltas; GalaxyFile(){} GalaxyFile(int num, float *as, float *ds) { number_of_galaxies = num; alphas = as; deltas = ds; } }; void print_omegas(float*, int); void write_omegas_to_file(string, float*); void write_histogram_to_file(string, int*); void print_histogram(string, int*, int); GalaxyFile readFile(string); // Define some useful macros #define BIN_WIDTH 0.25f #define BIN_MIN 0.0f #define BIN_MAX 180.0f #define NUMBER_OF_BINS (int)(BIN_MAX*(1.0f/BIN_WIDTH)) // Google is your friend. #define ARCMINS_TO_RADIANS 0.000290888209f #define RADIANS_TO_DEGREES 57.295779513f __global__ void angle_between_galaxies(float *alphas1, float *deltas1, float *alphas2, float *deltas2, int *gpu_hist){ int idx = blockDim.x*blockIdx.x + threadIdx.x; if(idx < 100000){ for(int i=0; i<100000; i++){ float angle = 0.0f; // Don't do duplicates if( alphas1[i] != alphas2[idx] && deltas1[i] != deltas2[idx] ) { float x = sin(deltas1[i]) * sin(deltas2[idx]) + cos(deltas1[i]) * cos(deltas2[idx]) * cos(alphas1[i] - alphas2[idx]); angle = acosf(fmaxf(-1.0f, fminf(x, 1.0f))) * RADIANS_TO_DEGREES; } int ix = (int)(floor(angle * (1.0f/BIN_WIDTH))) % NUMBER_OF_BINS; __syncthreads(); atomicAdd(&gpu_hist[ix], 1); } } } int* calculate_histogram(GalaxyFile galaxies1, GalaxyFile galaxies2){ // Declare and allocate memory for histogram arrays that will be accessible on CPU float galaxy_array_size = galaxies1.number_of_galaxies * sizeof(float); float histogram_size = NUMBER_OF_BINS * sizeof(int); int *histogram; int *total_histogram; histogram = (int *) malloc(NUMBER_OF_BINS*sizeof(int)); total_histogram = (int *) malloc(NUMBER_OF_BINS*sizeof(int)); memset(total_histogram, 0, NUMBER_OF_BINS*sizeof(int)); // Declare angle arrays that will be accessible on GPU float *gpu_alphas1; float *gpu_deltas1; float *gpu_alphas2; float *gpu_deltas2; int *gpu_histogram; // Allocate memory on GPU for angle arrays hipMalloc((void**) &gpu_alphas1, galaxy_array_size); hipMalloc((void**) &gpu_deltas1, galaxy_array_size); hipMalloc((void**) &gpu_alphas2, galaxy_array_size); hipMalloc((void**) &gpu_deltas2, galaxy_array_size); hipMalloc((void**) &gpu_histogram, NUMBER_OF_BINS*sizeof(int)); // Copy angles from CPU onto GPU hipMemcpy(gpu_alphas1, galaxies1.alphas, galaxy_array_size, hipMemcpyHostToDevice); hipMemcpy(gpu_deltas1, galaxies1.deltas, galaxy_array_size, hipMemcpyHostToDevice); hipMemcpy(gpu_alphas2, galaxies2.alphas, galaxy_array_size, hipMemcpyHostToDevice); hipMemcpy(gpu_deltas2, galaxies2.deltas, galaxy_array_size, hipMemcpyHostToDevice); hipMemcpy(gpu_histogram, histogram, galaxy_array_size, hipMemcpyHostToDevice); int warp_size = 32; int threadsInBlock = 11 * warp_size; int blocksInGrid = ceil((galaxies1.number_of_galaxies + threadsInBlock) / threadsInBlock); // Define the grid size (blocks per grid) dim3 dimGrid(blocksInGrid); // Define block size (threads per block) dim3 dimBlock(threadsInBlock); // Write histogram full of zeros hipMemset(gpu_histogram, 0, histogram_size); // Calculate angles between galaxies1[i] and every galaxy in galaxies2 hipLaunchKernelGGL(( angle_between_galaxies), dim3(dimGrid), dim3(dimBlock), 0, 0, gpu_alphas1, gpu_deltas1, gpu_alphas2, gpu_deltas2, gpu_histogram); // Copy result histogram into CPU histogram hipMemcpy(histogram, gpu_histogram, histogram_size, hipMemcpyDeviceToHost); // Free all the memory we allocated on the GPU hipFree( gpu_alphas1 ); hipFree( gpu_deltas1 ); hipFree( gpu_alphas2 ); hipFree( gpu_deltas2 ); hipFree( gpu_histogram ); return histogram; } float* calculate_omegas(int* DD, int* DR, int* RR){ float* omegas; omegas = (float *) malloc(NUMBER_OF_BINS*sizeof(float)); for(int i=0; i<NUMBER_OF_BINS; i++){ if(RR[i] != 0.0f){ omegas[i] = (DD[i] - 2.0f*DR[i] + RR[i]) / RR[i]; }else{ omegas[i] = 0.0f; } } return omegas; } // CUDA program that calculates distribution of galaxies int main() { // Read files and store data in GalaxyFile classes. GalaxyFile galaxies1; GalaxyFile galaxies2; galaxies1 = readFile("test_data/flat_100k_arcmin.txt"); galaxies2 = readFile("test_data/data_100k_arcmin.txt"); int* DD_hist = calculate_histogram(galaxies1, galaxies1); int* DR_hist = calculate_histogram(galaxies1, galaxies2); int* RR_hist = calculate_histogram(galaxies2, galaxies2); print_histogram("DD", DD_hist, 20); print_histogram("DR", DR_hist, 20); print_histogram("RR", RR_hist, 20); write_histogram_to_file("dd_histogram.txt", DD_hist); write_histogram_to_file("dr_histogram.txt", DR_hist); write_histogram_to_file("rr_histogram.txt", RR_hist); float* omegas = calculate_omegas(DD_hist, DR_hist, RR_hist); print_omegas(omegas, 15); write_omegas_to_file("omegas.txt", omegas); return EXIT_SUCCESS; } /* UTILITY FUNCTIONS/CLASSES BELOW */ GalaxyFile readFile(string filename) { ifstream infile(filename); int number_of_galaxies; // Read first line which is the number of galaxies that's stored in the file. infile >> number_of_galaxies; float galaxy_array_size = number_of_galaxies * sizeof(float); float *alphas, *deltas; alphas = (float*) malloc(galaxy_array_size); deltas = (float*) malloc(galaxy_array_size); float alpha; float delta; // Read arc minute angles for each galaxy // Then convert those angles to radians and store those in alphas and deltas arrays for(int i=0; i<number_of_galaxies; i++) { infile >> alpha >> delta; alphas[i] = alpha * ARCMINS_TO_RADIANS; deltas[i] = delta * ARCMINS_TO_RADIANS; } infile.close(); GalaxyFile galaxyFile(number_of_galaxies, alphas, deltas); return galaxyFile; } void print_omegas(float* omegas, int bins_to_print){ for (int i=0; i<NUMBER_OF_BINS; i++){ if(omegas[i] != 0.0f && i < bins_to_print){ printf("omegas[%d]: %f\n", i, omegas[i]); } } } void print_histogram(string label, int *histogram, int bins_to_print){ long long galaxies_counted = 0; // Print each bucket bin that has 1 or more galaxy-pair-angle in it. for (int i=0; i<NUMBER_OF_BINS; i++) { float bucket_min = (float)i / (1.0f/BIN_WIDTH); float bucket_max = (float)i / (1.0f/BIN_WIDTH) + BIN_WIDTH; int bucket_value = histogram[i]; galaxies_counted += histogram[i]; if(bucket_value > 0 && i < bins_to_print){ printf("[%f, %f]: %d\n", bucket_min, bucket_max, bucket_value); } } cout << "Galaxy pairs counted in " << label << ": " << galaxies_counted << endl; } void write_omegas_to_file(string filename, float* omegas){ ofstream file; file.open("output/"+filename); for (int i=0; i<NUMBER_OF_BINS; i++){ file << omegas[i]; if(i<NUMBER_OF_BINS-1) file << "\n"; } file.close(); } void write_histogram_to_file(string filename, int* histogram){ ofstream file; file.open("output/"+filename); for (int i=0; i<NUMBER_OF_BINS; i++){ file << histogram[i]; if(i<NUMBER_OF_BINS-1) file << "\n"; } file.close(); }
9d85d5639f5b3e6df4a167c866b6f45307be8812.cu
/* CUDA good to knows: Basics: Per thread: registers (fast) local memory (off-chip [still on the GPU though], slow) Per block: multiple threads shared memory (semi-fast) Per GPU: Multiple kernels that each run multiple blocks Global memory (off-chip [still on the GPU though], slow) Threads are executed by thread processors Threads reside in thread blocks Thread blocks are executed by multiprocessors Several concurrent thread blocks can reside on one multiprocessor Limited by multiprocessor resources (shared memory and registers) A kernel is launched as a grid of thread blocks. Only one kernel can execute on a device at a time. Advanced: cudaMemcpy(dst, src, size, direction) blocks CPU thread. Compiler tips: nvcc <filename>.cu [-o <executable>] Builds release mode nvcc -g <filename>.cu Builds debug mode Can debug host code but not device code nvcc -deviceemu <filename>.cu Builds device emulation mode All code runs on CPU, no debug symbols nvcc -deviceemu -g <filename>.cu Builds debug device emulation mode All code runs on CPU, with debug symbols Tips and tricks: If our arrays A,B,C are shorter than 1024 elements, N < 1024, then – one thread block is enough – N threads in the thread block If our arrays are longer than 1024, then – Choose the number of threads in the thread blocks to be integer*32 – Calculate how many thread blocks you need – There will be some threads that should do nothing Why multiples of 32? – Threads are executed synchronously in bunches of 32 = warp – All threads must have their data ready before the warp runs – Cache lines are 4 B x warp size = 128 B – GPU resources can be fully utilized when these parameters are used # of blocks = ceil(N/threadsInBlock) = (N+threadsInBlock-1)/threadsInBlock Compile: nvcc -o galaxy galaxy_program.cu -res-usage Run: time ./galaxy */ #include <stdio.h> #include <iostream> #include <fstream> using namespace std; // Declare functions and classes that are below main. class GalaxyFile{ public: int number_of_galaxies; float *alphas, *deltas; GalaxyFile(){} GalaxyFile(int num, float *as, float *ds) { number_of_galaxies = num; alphas = as; deltas = ds; } }; void print_omegas(float*, int); void write_omegas_to_file(string, float*); void write_histogram_to_file(string, int*); void print_histogram(string, int*, int); GalaxyFile readFile(string); // Define some useful macros #define BIN_WIDTH 0.25f #define BIN_MIN 0.0f #define BIN_MAX 180.0f #define NUMBER_OF_BINS (int)(BIN_MAX*(1.0f/BIN_WIDTH)) // Google is your friend. #define ARCMINS_TO_RADIANS 0.000290888209f #define RADIANS_TO_DEGREES 57.295779513f __global__ void angle_between_galaxies(float *alphas1, float *deltas1, float *alphas2, float *deltas2, int *gpu_hist){ int idx = blockDim.x*blockIdx.x + threadIdx.x; if(idx < 100000){ for(int i=0; i<100000; i++){ float angle = 0.0f; // Don't do duplicates if( alphas1[i] != alphas2[idx] && deltas1[i] != deltas2[idx] ) { float x = sin(deltas1[i]) * sin(deltas2[idx]) + cos(deltas1[i]) * cos(deltas2[idx]) * cos(alphas1[i] - alphas2[idx]); angle = acosf(fmaxf(-1.0f, fminf(x, 1.0f))) * RADIANS_TO_DEGREES; } int ix = (int)(floor(angle * (1.0f/BIN_WIDTH))) % NUMBER_OF_BINS; __syncthreads(); atomicAdd(&gpu_hist[ix], 1); } } } int* calculate_histogram(GalaxyFile galaxies1, GalaxyFile galaxies2){ // Declare and allocate memory for histogram arrays that will be accessible on CPU float galaxy_array_size = galaxies1.number_of_galaxies * sizeof(float); float histogram_size = NUMBER_OF_BINS * sizeof(int); int *histogram; int *total_histogram; histogram = (int *) malloc(NUMBER_OF_BINS*sizeof(int)); total_histogram = (int *) malloc(NUMBER_OF_BINS*sizeof(int)); memset(total_histogram, 0, NUMBER_OF_BINS*sizeof(int)); // Declare angle arrays that will be accessible on GPU float *gpu_alphas1; float *gpu_deltas1; float *gpu_alphas2; float *gpu_deltas2; int *gpu_histogram; // Allocate memory on GPU for angle arrays cudaMalloc((void**) &gpu_alphas1, galaxy_array_size); cudaMalloc((void**) &gpu_deltas1, galaxy_array_size); cudaMalloc((void**) &gpu_alphas2, galaxy_array_size); cudaMalloc((void**) &gpu_deltas2, galaxy_array_size); cudaMalloc((void**) &gpu_histogram, NUMBER_OF_BINS*sizeof(int)); // Copy angles from CPU onto GPU cudaMemcpy(gpu_alphas1, galaxies1.alphas, galaxy_array_size, cudaMemcpyHostToDevice); cudaMemcpy(gpu_deltas1, galaxies1.deltas, galaxy_array_size, cudaMemcpyHostToDevice); cudaMemcpy(gpu_alphas2, galaxies2.alphas, galaxy_array_size, cudaMemcpyHostToDevice); cudaMemcpy(gpu_deltas2, galaxies2.deltas, galaxy_array_size, cudaMemcpyHostToDevice); cudaMemcpy(gpu_histogram, histogram, galaxy_array_size, cudaMemcpyHostToDevice); int warp_size = 32; int threadsInBlock = 11 * warp_size; int blocksInGrid = ceil((galaxies1.number_of_galaxies + threadsInBlock) / threadsInBlock); // Define the grid size (blocks per grid) dim3 dimGrid(blocksInGrid); // Define block size (threads per block) dim3 dimBlock(threadsInBlock); // Write histogram full of zeros cudaMemset(gpu_histogram, 0, histogram_size); // Calculate angles between galaxies1[i] and every galaxy in galaxies2 angle_between_galaxies<<<dimGrid, dimBlock>>>(gpu_alphas1, gpu_deltas1, gpu_alphas2, gpu_deltas2, gpu_histogram); // Copy result histogram into CPU histogram cudaMemcpy(histogram, gpu_histogram, histogram_size, cudaMemcpyDeviceToHost); // Free all the memory we allocated on the GPU cudaFree( gpu_alphas1 ); cudaFree( gpu_deltas1 ); cudaFree( gpu_alphas2 ); cudaFree( gpu_deltas2 ); cudaFree( gpu_histogram ); return histogram; } float* calculate_omegas(int* DD, int* DR, int* RR){ float* omegas; omegas = (float *) malloc(NUMBER_OF_BINS*sizeof(float)); for(int i=0; i<NUMBER_OF_BINS; i++){ if(RR[i] != 0.0f){ omegas[i] = (DD[i] - 2.0f*DR[i] + RR[i]) / RR[i]; }else{ omegas[i] = 0.0f; } } return omegas; } // CUDA program that calculates distribution of galaxies int main() { // Read files and store data in GalaxyFile classes. GalaxyFile galaxies1; GalaxyFile galaxies2; galaxies1 = readFile("test_data/flat_100k_arcmin.txt"); galaxies2 = readFile("test_data/data_100k_arcmin.txt"); int* DD_hist = calculate_histogram(galaxies1, galaxies1); int* DR_hist = calculate_histogram(galaxies1, galaxies2); int* RR_hist = calculate_histogram(galaxies2, galaxies2); print_histogram("DD", DD_hist, 20); print_histogram("DR", DR_hist, 20); print_histogram("RR", RR_hist, 20); write_histogram_to_file("dd_histogram.txt", DD_hist); write_histogram_to_file("dr_histogram.txt", DR_hist); write_histogram_to_file("rr_histogram.txt", RR_hist); float* omegas = calculate_omegas(DD_hist, DR_hist, RR_hist); print_omegas(omegas, 15); write_omegas_to_file("omegas.txt", omegas); return EXIT_SUCCESS; } /* UTILITY FUNCTIONS/CLASSES BELOW */ GalaxyFile readFile(string filename) { ifstream infile(filename); int number_of_galaxies; // Read first line which is the number of galaxies that's stored in the file. infile >> number_of_galaxies; float galaxy_array_size = number_of_galaxies * sizeof(float); float *alphas, *deltas; alphas = (float*) malloc(galaxy_array_size); deltas = (float*) malloc(galaxy_array_size); float alpha; float delta; // Read arc minute angles for each galaxy // Then convert those angles to radians and store those in alphas and deltas arrays for(int i=0; i<number_of_galaxies; i++) { infile >> alpha >> delta; alphas[i] = alpha * ARCMINS_TO_RADIANS; deltas[i] = delta * ARCMINS_TO_RADIANS; } infile.close(); GalaxyFile galaxyFile(number_of_galaxies, alphas, deltas); return galaxyFile; } void print_omegas(float* omegas, int bins_to_print){ for (int i=0; i<NUMBER_OF_BINS; i++){ if(omegas[i] != 0.0f && i < bins_to_print){ printf("omegas[%d]: %f\n", i, omegas[i]); } } } void print_histogram(string label, int *histogram, int bins_to_print){ long long galaxies_counted = 0; // Print each bucket bin that has 1 or more galaxy-pair-angle in it. for (int i=0; i<NUMBER_OF_BINS; i++) { float bucket_min = (float)i / (1.0f/BIN_WIDTH); float bucket_max = (float)i / (1.0f/BIN_WIDTH) + BIN_WIDTH; int bucket_value = histogram[i]; galaxies_counted += histogram[i]; if(bucket_value > 0 && i < bins_to_print){ printf("[%f, %f]: %d\n", bucket_min, bucket_max, bucket_value); } } cout << "Galaxy pairs counted in " << label << ": " << galaxies_counted << endl; } void write_omegas_to_file(string filename, float* omegas){ ofstream file; file.open("output/"+filename); for (int i=0; i<NUMBER_OF_BINS; i++){ file << omegas[i]; if(i<NUMBER_OF_BINS-1) file << "\n"; } file.close(); } void write_histogram_to_file(string filename, int* histogram){ ofstream file; file.open("output/"+filename); for (int i=0; i<NUMBER_OF_BINS; i++){ file << histogram[i]; if(i<NUMBER_OF_BINS-1) file << "\n"; } file.close(); }
75d1f7a991a6bcf94a63af3de761fb8ed195b749.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #include <time.h> #include <stdlib.h> enum { grid_count=16 }; __global__ void vectorAdditionKernel(float * A , float * B , float * C ,int dataCount){ int index = blockIdx.x *blockDim.x + threadIdx.x; if(index < dataCount) C[index] = A[index] + B[index]; } int main(){ int dataCount = 2100; float h_A[dataCount]; float h_B[dataCount]; float h_C[dataCount]; // initialize the values for(int i = 0 ; i < dataCount ; i++){ h_A[i] = (float) i ; h_B[i] = (float) i ; } float * d_A; hipMalloc(&d_A , dataCount * sizeof(float)); hipMemcpy(d_A ,h_A ,sizeof(float) * dataCount , hipMemcpyHostToDevice ); float * d_B; hipMalloc(&d_B , dataCount * sizeof(float)); hipMemcpy(d_B , h_B , sizeof(float) * dataCount , hipMemcpyHostToDevice); float * d_C; hipMalloc(&d_C , dataCount * sizeof(float)); hipMemcpy(d_C , h_C , dataCount * sizeof(float) , hipMemcpyHostToDevice); // call the kernel int threadPerBlock = dataCount/grid_count; hipLaunchKernelGGL(( vectorAdditionKernel), dim3(grid_count),dim3(threadPerBlock), 0, 0, d_A, d_B , d_C, dataCount); // get the data hipMemcpy(h_C , d_C , dataCount * sizeof(float) , hipMemcpyDeviceToHost); for(int i = 0 ; i < dataCount ;i++){ printf("%f \n" , h_C[i]); } return 0 ; }
75d1f7a991a6bcf94a63af3de761fb8ed195b749.cu
#include <stdio.h> #include <time.h> #include <stdlib.h> enum { grid_count=16 }; __global__ void vectorAdditionKernel(float * A , float * B , float * C ,int dataCount){ int index = blockIdx.x *blockDim.x + threadIdx.x; if(index < dataCount) C[index] = A[index] + B[index]; } int main(){ int dataCount = 2100; float h_A[dataCount]; float h_B[dataCount]; float h_C[dataCount]; // initialize the values for(int i = 0 ; i < dataCount ; i++){ h_A[i] = (float) i ; h_B[i] = (float) i ; } float * d_A; cudaMalloc(&d_A , dataCount * sizeof(float)); cudaMemcpy(d_A ,h_A ,sizeof(float) * dataCount , cudaMemcpyHostToDevice ); float * d_B; cudaMalloc(&d_B , dataCount * sizeof(float)); cudaMemcpy(d_B , h_B , sizeof(float) * dataCount , cudaMemcpyHostToDevice); float * d_C; cudaMalloc(&d_C , dataCount * sizeof(float)); cudaMemcpy(d_C , h_C , dataCount * sizeof(float) , cudaMemcpyHostToDevice); // call the kernel int threadPerBlock = dataCount/grid_count; vectorAdditionKernel<<<grid_count,threadPerBlock>>> (d_A, d_B , d_C, dataCount); // get the data cudaMemcpy(h_C , d_C , dataCount * sizeof(float) , cudaMemcpyDeviceToHost); for(int i = 0 ; i < dataCount ;i++){ printf("%f \n" , h_C[i]); } return 0 ; }
4b57666338b14a8d0b69e6e9d3919b870a7346cf.hip
// !!! This is a file automatically generated by hipify!!! #include <hip/hip_runtime.h> #include <float.h> #include <stdint.h> #include <limits> #include "BufferCompaction.h" #include "ExtensionFunctions.hpp" #include "GpuRtConstants.h" #include "HyperLogLogRank.h" extern "C" __device__ int32_t pos_start_impl(const int32_t* row_index_resume) { return blockIdx.x * blockDim.x + threadIdx.x; } extern "C" __device__ int32_t group_buff_idx_impl() { return pos_start_impl(NULL); } extern "C" __device__ int32_t pos_step_impl() { return blockDim.x * gridDim.x; } extern "C" __device__ int8_t thread_warp_idx(const int8_t warp_sz) { return threadIdx.x % warp_sz; } extern "C" __device__ const int64_t* init_shared_mem_nop( const int64_t* groups_buffer, const int32_t groups_buffer_size) { return groups_buffer; } extern "C" __device__ void write_back_nop(int64_t* dest, int64_t* src, const int32_t sz) { } extern "C" __device__ const int64_t* init_shared_mem(const int64_t* groups_buffer, const int32_t groups_buffer_size) { extern __shared__ int64_t fast_bins[]; if (threadIdx.x == 0) { memcpy(fast_bins, groups_buffer, groups_buffer_size); } __syncthreads(); return fast_bins; } /** * Dynamically allocates shared memory per block. * The amount of shared memory allocated is defined at kernel launch time. * Returns a pointer to the beginning of allocated shared memory */ extern "C" __device__ int64_t* alloc_shared_mem_dynamic() { extern __shared__ int64_t groups_buffer_smem[]; return groups_buffer_smem; } /** * Set the allocated shared memory elements to be equal to the 'identity_element'. * groups_buffer_size: number of 64-bit elements in shared memory per thread-block * NOTE: groups_buffer_size is in units of 64-bit elements. */ extern "C" __device__ void set_shared_mem_to_identity( int64_t* groups_buffer_smem, const int32_t groups_buffer_size, const int64_t identity_element = 0) { #pragma unroll for (int i = threadIdx.x; i < groups_buffer_size; i += blockDim.x) { groups_buffer_smem[i] = identity_element; } __syncthreads(); } /** * Initialize dynamic shared memory: * 1. Allocates dynamic shared memory * 2. Set every allocated element to be equal to the 'identity element', by default zero. */ extern "C" __device__ const int64_t* init_shared_mem_dynamic( const int64_t* groups_buffer, const int32_t groups_buffer_size) { int64_t* groups_buffer_smem = alloc_shared_mem_dynamic(); set_shared_mem_to_identity(groups_buffer_smem, groups_buffer_size); return groups_buffer_smem; } extern "C" __device__ void write_back(int64_t* dest, int64_t* src, const int32_t sz) { __syncthreads(); if (threadIdx.x == 0) { memcpy(dest, src, sz); } } extern "C" __device__ void write_back_smem_nop(int64_t* dest, int64_t* src, const int32_t sz) {} extern "C" __device__ void agg_from_smem_to_gmem_nop(int64_t* gmem_dest, int64_t* smem_src, const int32_t num_elements) {} /** * Aggregate the result stored into shared memory back into global memory. * It also writes back the stored binId, if any, back into global memory. * Memory layout assumption: each 64-bit shared memory unit of data is as follows: * [0..31: the stored bin ID, to be written back][32..63: the count result, to be * aggregated] */ extern "C" __device__ void agg_from_smem_to_gmem_binId_count(int64_t* gmem_dest, int64_t* smem_src, const int32_t num_elements) { __syncthreads(); #pragma unroll for (int i = threadIdx.x; i < num_elements; i += blockDim.x) { int32_t bin_id = *reinterpret_cast<int32_t*>(smem_src + i); int32_t count_result = *(reinterpret_cast<int32_t*>(smem_src + i) + 1); if (count_result) { // non-zero count atomicAdd(reinterpret_cast<unsigned int*>(gmem_dest + i) + 1, static_cast<int32_t>(count_result)); // writing back the binId, only if count_result is non-zero *reinterpret_cast<unsigned int*>(gmem_dest + i) = static_cast<int32_t>(bin_id); } } } /** * Aggregate the result stored into shared memory back into global memory. * It also writes back the stored binId, if any, back into global memory. * Memory layout assumption: each 64-bit shared memory unit of data is as follows: * [0..31: the count result, to be aggregated][32..63: the stored bin ID, to be written * back] */ extern "C" __device__ void agg_from_smem_to_gmem_count_binId(int64_t* gmem_dest, int64_t* smem_src, const int32_t num_elements) { __syncthreads(); #pragma unroll for (int i = threadIdx.x; i < num_elements; i += blockDim.x) { int32_t count_result = *reinterpret_cast<int32_t*>(smem_src + i); int32_t bin_id = *(reinterpret_cast<int32_t*>(smem_src + i) + 1); if (count_result) { // non-zero count atomicAdd(reinterpret_cast<unsigned int*>(gmem_dest + i), static_cast<int32_t>(count_result)); // writing back the binId, only if count_result is non-zero *(reinterpret_cast<unsigned int*>(gmem_dest + i) + 1) = static_cast<int32_t>(bin_id); } } } #define init_group_by_buffer_gpu_impl init_group_by_buffer_gpu #include "GpuInitGroups.cu" #undef init_group_by_buffer_gpu_impl // Dynamic watchdog: monitoring up to 64 SMs. E.g. GP100 config may have 60: // 6 Graphics Processing Clusters (GPCs) * 10 Streaming Multiprocessors // TODO(Saman): move these into a kernel parameter, allocated and initialized through CUDA __device__ int64_t dw_sm_cycle_start[128]; // Set from host before launching the kernel // TODO(Saman): make this cycle budget something constant in codegen level __device__ int64_t dw_cycle_budget = 0; // Set from host before launching the kernel __device__ int32_t dw_abort = 0; // TBD: set from host (async) __inline__ __device__ uint32_t get_smid(void) { uint32_t ret; asm("mov.u32 %0, %%smid;" : "=r"(ret)); return ret; } /* * The main objective of this funciton is to return true, if any of the following two * scnearios happen: * 1. receives a host request for aborting the kernel execution * 2. kernel execution takes longer clock cycles than it was initially allowed * The assumption is that all (or none) threads within a block return true for the * watchdog, and the first thread within each block compares the recorded clock cycles for * its occupying SM with the allowed budget. It also assumess that all threads entering * this function are active (no critical edge exposure) * NOTE: dw_cycle_budget, dw_abort, and dw_sm_cycle_start[] are all variables in global * memory scope. */ extern "C" __device__ bool dynamic_watchdog() { // check for dynamic watchdog, if triggered all threads return true if (dw_cycle_budget == 0LL) { return false; // Uninitialized watchdog can't check time } if (dw_abort == 1) { return true; // Received host request to abort } uint32_t smid = get_smid(); if (smid >= 128) { return false; } __shared__ volatile int64_t dw_block_cycle_start; // Thread block shared cycle start __shared__ volatile bool dw_should_terminate; // all threads within a block should return together if // watchdog criteria is met // thread 0 either initializes or read the initial clock cycle, the result is stored // into shared memory. Since all threads wihtin a block shares the same SM, there's no // point in using more threads here. if (threadIdx.x == 0) { dw_block_cycle_start = 0LL; int64_t cycle_count = static_cast<int64_t>(clock64()); // Make sure the block hasn't switched SMs if (smid == get_smid()) { dw_block_cycle_start = static_cast<int64_t>( atomicCAS(reinterpret_cast<unsigned long long*>(&dw_sm_cycle_start[smid]), 0ULL, static_cast<unsigned long long>(cycle_count))); } int64_t cycles = cycle_count - dw_block_cycle_start; if ((smid == get_smid()) && (dw_block_cycle_start > 0LL) && (cycles > dw_cycle_budget)) { // Check if we're out of time on this particular SM dw_should_terminate = true; } else { dw_should_terminate = false; } } __syncthreads(); return dw_should_terminate; } template <typename T = unsigned long long> inline __device__ T get_empty_key() { return EMPTY_KEY_64; } template <> inline __device__ unsigned int get_empty_key() { return EMPTY_KEY_32; } template <typename T> inline __device__ int64_t* get_matching_group_value(int64_t* groups_buffer, const uint32_t h, const T* key, const uint32_t key_count, const uint32_t row_size_quad) { const T empty_key = get_empty_key<T>(); uint32_t off = h * row_size_quad; auto row_ptr = reinterpret_cast<T*>(groups_buffer + off); { const T old = atomicCAS(row_ptr, empty_key, *key); if (empty_key == old && key_count > 1) { for (size_t i = 1; i <= key_count - 1; ++i) { atomicExch(row_ptr + i, key[i]); } } } if (key_count > 1) { while (atomicAdd(row_ptr + key_count - 1, 0) == empty_key) { // spin until the winning thread has finished writing the entire key and the init // value } } bool match = true; for (uint32_t i = 0; i < key_count; ++i) { if (row_ptr[i] != key[i]) { match = false; break; } } if (match) { auto row_ptr_i8 = reinterpret_cast<int8_t*>(row_ptr + key_count); return reinterpret_cast<int64_t*>(align_to_int64(row_ptr_i8)); } return NULL; } extern "C" __device__ int64_t* get_matching_group_value(int64_t* groups_buffer, const uint32_t h, const int64_t* key, const uint32_t key_count, const uint32_t key_width, const uint32_t row_size_quad, const int64_t* init_vals) { switch (key_width) { case 4: return get_matching_group_value(groups_buffer, h, reinterpret_cast<const unsigned int*>(key), key_count, row_size_quad); case 8: return get_matching_group_value(groups_buffer, h, reinterpret_cast<const unsigned long long*>(key), key_count, row_size_quad); default: return NULL; } } template <typename T> __device__ int32_t get_matching_group_value_columnar_slot(int64_t* groups_buffer, const uint32_t entry_count, const uint32_t h, const T* key, const uint32_t key_count) { uint32_t off = h; { const uint64_t old = atomicCAS(reinterpret_cast<T*>(groups_buffer + off), get_empty_key<T>(), *key); if (old == get_empty_key<T>()) { for (size_t i = 0; i < key_count; ++i) { groups_buffer[off] = key[i]; off += entry_count; } return h; } } __syncthreads(); off = h; for (size_t i = 0; i < key_count; ++i) { if (groups_buffer[off] != key[i]) { return -1; } off += entry_count; } return h; } extern "C" __device__ int32_t get_matching_group_value_columnar_slot(int64_t* groups_buffer, const uint32_t entry_count, const uint32_t h, const int64_t* key, const uint32_t key_count, const uint32_t key_width) { switch (key_width) { case 4: return get_matching_group_value_columnar_slot( groups_buffer, entry_count, h, reinterpret_cast<const unsigned int*>(key), key_count); case 8: return get_matching_group_value_columnar_slot( groups_buffer, entry_count, h, reinterpret_cast<const unsigned long long*>(key), key_count); default: return -1; } } extern "C" __device__ int64_t* get_matching_group_value_columnar( int64_t* groups_buffer, const uint32_t h, const int64_t* key, const uint32_t key_qw_count, const size_t entry_count) { uint32_t off = h; { const uint64_t old = atomicCAS( reinterpret_cast<unsigned long long*>(groups_buffer + off), EMPTY_KEY_64, *key); if (EMPTY_KEY_64 == old) { for (size_t i = 0; i < key_qw_count; ++i) { groups_buffer[off] = key[i]; off += entry_count; } return &groups_buffer[off]; } } __syncthreads(); off = h; for (size_t i = 0; i < key_qw_count; ++i) { if (groups_buffer[off] != key[i]) { return NULL; } off += entry_count; } return &groups_buffer[off]; } #include "GroupByRuntime.cpp" #include "JoinHashTableQueryRuntime.cpp" #include "MurmurHash.cpp" #include "TopKRuntime.cpp" __device__ int64_t atomicMax64(int64_t* address, int64_t val) { unsigned long long int* address_as_ull = (unsigned long long int*)address; unsigned long long int old = *address_as_ull, assumed; do { assumed = old; old = atomicCAS(address_as_ull, assumed, max((long long)val, (long long)assumed)); } while (assumed != old); return old; } __device__ int64_t atomicMin64(int64_t* address, int64_t val) { unsigned long long int* address_as_ull = (unsigned long long int*)address; unsigned long long int old = *address_as_ull, assumed; do { assumed = old; old = atomicCAS(address_as_ull, assumed, min((long long)val, (long long)assumed)); } while (assumed != old); return old; } // As of 20160418, CUDA 8.0EA only defines `atomicAdd(double*, double)` for compute // capability >= 6.0. #if TORCH_HIP_VERSION < 8000 || (defined(__CUDA_ARCH__) && __CUDA_ARCH__ < 600) __device__ double atomicAdd(double* address, double val) { unsigned long long int* address_as_ull = (unsigned long long int*)address; unsigned long long int old = *address_as_ull, assumed; do { assumed = old; old = atomicCAS(address_as_ull, assumed, __double_as_longlong(val + __longlong_as_double(assumed))); // Note: uses integer comparison to avoid hang in case of NaN (since NaN != NaN) } while (assumed != old); return __longlong_as_double(old); } #endif __device__ double atomicMax(double* address, double val) { unsigned long long int* address_as_ull = (unsigned long long int*)address; unsigned long long int old = *address_as_ull, assumed; do { assumed = old; old = atomicCAS(address_as_ull, assumed, __double_as_longlong(max(val, __longlong_as_double(assumed)))); // Note: uses integer comparison to avoid hang in case of NaN (since NaN != NaN) } while (assumed != old); return __longlong_as_double(old); } __device__ float atomicMax(float* address, float val) { int* address_as_int = (int*)address; int old = *address_as_int, assumed; do { assumed = old; old = atomicCAS( address_as_int, assumed, __float_as_int(max(val, __int_as_float(assumed)))); // Note: uses integer comparison to avoid hang in case of NaN (since NaN != NaN) } while (assumed != old); return __int_as_float(old); } __device__ double atomicMin(double* address, double val) { unsigned long long int* address_as_ull = (unsigned long long int*)address; unsigned long long int old = *address_as_ull, assumed; do { assumed = old; old = atomicCAS(address_as_ull, assumed, __double_as_longlong(min(val, __longlong_as_double(assumed)))); } while (assumed != old); return __longlong_as_double(old); } __device__ double atomicMin(float* address, float val) { int* address_as_ull = (int*)address; int old = *address_as_ull, assumed; do { assumed = old; old = atomicCAS( address_as_ull, assumed, __float_as_int(min(val, __int_as_float(assumed)))); } while (assumed != old); return __int_as_float(old); } extern "C" __device__ uint64_t agg_count_shared(uint64_t* agg, const int64_t val) { return static_cast<uint64_t>(atomicAdd(reinterpret_cast<uint32_t*>(agg), 1UL)); } extern "C" __device__ uint32_t agg_count_int32_shared(uint32_t* agg, const int32_t val) { return atomicAdd(agg, 1UL); } extern "C" __device__ uint64_t agg_count_double_shared(uint64_t* agg, const double val) { return agg_count_shared(agg, val); } extern "C" __device__ uint32_t agg_count_float_shared(uint32_t* agg, const float val) { return agg_count_int32_shared(agg, val); } extern "C" __device__ int64_t agg_sum_shared(int64_t* agg, const int64_t val) { return atomicAdd(reinterpret_cast<unsigned long long*>(agg), val); } extern "C" __device__ int32_t agg_sum_int32_shared(int32_t* agg, const int32_t val) { return atomicAdd(agg, val); } extern "C" __device__ void agg_sum_float_shared(int32_t* agg, const float val) { atomicAdd(reinterpret_cast<float*>(agg), val); } extern "C" __device__ void agg_sum_double_shared(int64_t* agg, const double val) { atomicAdd(reinterpret_cast<double*>(agg), val); } extern "C" __device__ void agg_max_shared(int64_t* agg, const int64_t val) { atomicMax64(agg, val); } extern "C" __device__ void agg_max_int32_shared(int32_t* agg, const int32_t val) { atomicMax(agg, val); } extern "C" __device__ void agg_max_double_shared(int64_t* agg, const double val) { atomicMax(reinterpret_cast<double*>(agg), val); } extern "C" __device__ void agg_max_float_shared(int32_t* agg, const float val) { atomicMax(reinterpret_cast<float*>(agg), val); } extern "C" __device__ void agg_min_shared(int64_t* agg, const int64_t val) { atomicMin64(agg, val); } extern "C" __device__ void agg_min_int32_shared(int32_t* agg, const int32_t val) { atomicMin(agg, val); } // TODO(Saman): use 16-bit atomicCAS for Turing extern "C" __device__ void atomicMax16(int16_t* agg, const int16_t val) { // properly align the input pointer: unsigned int* base_address_u32 = reinterpret_cast<unsigned int*>(reinterpret_cast<size_t>(agg) & ~0x3); unsigned int old_value = *base_address_u32; unsigned int swap_value, compare_value; do { compare_value = old_value; swap_value = (reinterpret_cast<size_t>(agg) & 0x2) ? static_cast<unsigned int>(max(static_cast<int16_t>(old_value >> 16), val)) << 16 | (old_value & 0xFFFF) : (old_value & 0xFFFF0000) | static_cast<unsigned int>( max(static_cast<int16_t>(old_value & 0xFFFF), val)); old_value = atomicCAS(base_address_u32, compare_value, swap_value); } while (old_value != compare_value); } extern "C" __device__ void atomicMax8(int8_t* agg, const int8_t val) { // properly align the input pointer: unsigned int* base_address_u32 = reinterpret_cast<unsigned int*>(reinterpret_cast<size_t>(agg) & ~0x3); // __byte_perm(unsigned int A, unsigned int B, unsigned int s): // if s == 0x3214 returns {A[31..24], A[23..16], A[15..8], B[7..0]} // if s == 0x3240 returns {A[31..24], A[23..16], B[7...0], A[7..0]} // if s == 0x3410 returns {A[31..24], B[7....0], A[15..8], A[7..0]} // if s == 0x4210 returns {B[7....0], A[23..16], A[15..8], A[7..0]} constexpr unsigned int byte_permutations[] = {0x3214, 0x3240, 0x3410, 0x4210}; unsigned int old_value = *base_address_u32; unsigned int swap_value, compare_value; do { compare_value = old_value; auto max_value = static_cast<unsigned int>( // compare val with its corresponding bits in the compare_value max(val, static_cast<int8_t>(__byte_perm( compare_value, 0, (reinterpret_cast<size_t>(agg) & 0x3) | 0x4440)))); swap_value = __byte_perm( compare_value, max_value, byte_permutations[reinterpret_cast<size_t>(agg) & 0x3]); old_value = atomicCAS(base_address_u32, compare_value, swap_value); } while (compare_value != old_value); } extern "C" __device__ void atomicMin16(int16_t* agg, const int16_t val) { // properly align the input pointer: unsigned int* base_address_u32 = reinterpret_cast<unsigned int*>(reinterpret_cast<size_t>(agg) & ~0x3); unsigned int old_value = *base_address_u32; unsigned int swap_value, compare_value; do { compare_value = old_value; swap_value = (reinterpret_cast<size_t>(agg) & 0x2) ? static_cast<unsigned int>(min(static_cast<int16_t>(old_value >> 16), val)) << 16 | (old_value & 0xFFFF) : (old_value & 0xFFFF0000) | static_cast<unsigned int>( min(static_cast<int16_t>(old_value & 0xFFFF), val)); old_value = atomicCAS(base_address_u32, compare_value, swap_value); } while (old_value != compare_value); } extern "C" __device__ void atomicMin16SkipVal(int16_t* agg, const int16_t val, const int16_t skip_val) { // properly align the input pointer: unsigned int* base_address_u32 = reinterpret_cast<unsigned int*>(reinterpret_cast<size_t>(agg) & ~0x3); unsigned int old_value = *base_address_u32; unsigned int swap_value, compare_value; do { compare_value = old_value; int16_t selected_old_val = (reinterpret_cast<size_t>(agg) & 0x2) ? static_cast<int16_t>(old_value >> 16) : static_cast<int16_t>(old_value & 0xFFFF); swap_value = (reinterpret_cast<size_t>(agg) & 0x2) ? static_cast<unsigned int>( selected_old_val == skip_val ? val : min(selected_old_val, val)) << 16 | (old_value & 0xFFFF) : (old_value & 0xFFFF0000) | static_cast<unsigned int>( selected_old_val == skip_val ? val : min(selected_old_val, val)); old_value = atomicCAS(base_address_u32, compare_value, swap_value); } while (old_value != compare_value); } extern "C" __device__ void atomicMin8(int8_t* agg, const int8_t val) { // properly align the input pointer: unsigned int* base_address_u32 = reinterpret_cast<unsigned int*>(reinterpret_cast<size_t>(agg) & ~0x3); constexpr unsigned int byte_permutations[] = {0x3214, 0x3240, 0x3410, 0x4210}; unsigned int old_value = *base_address_u32; unsigned int swap_value, compare_value; do { compare_value = old_value; auto min_value = static_cast<unsigned int>( min(val, static_cast<int8_t>(__byte_perm( compare_value, 0, (reinterpret_cast<size_t>(agg) & 0x3) | 0x4440)))); swap_value = __byte_perm( compare_value, min_value, byte_permutations[reinterpret_cast<size_t>(agg) & 0x3]); old_value = atomicCAS(base_address_u32, compare_value, swap_value); } while (compare_value != old_value); } extern "C" __device__ void atomicMin8SkipVal(int8_t* agg, const int8_t val, const int8_t skip_val) { // properly align the input pointer: unsigned int* base_address_u32 = reinterpret_cast<unsigned int*>(reinterpret_cast<size_t>(agg) & ~0x3); constexpr unsigned int byte_permutations[] = {0x3214, 0x3240, 0x3410, 0x4210}; unsigned int old_value = *base_address_u32; unsigned int swap_value, compare_value; do { compare_value = old_value; int8_t selected_old_val = static_cast<int8_t>( __byte_perm(compare_value, 0, (reinterpret_cast<size_t>(agg) & 0x3) | 0x4440)); auto min_value = static_cast<unsigned int>( selected_old_val == skip_val ? val : min(val, selected_old_val)); swap_value = __byte_perm( compare_value, min_value, byte_permutations[reinterpret_cast<size_t>(agg) & 0x3]); old_value = atomicCAS(base_address_u32, compare_value, swap_value); } while (compare_value != old_value); } extern "C" __device__ void agg_max_int16_shared(int16_t* agg, const int16_t val) { return atomicMax16(agg, val); } extern "C" __device__ void agg_max_int8_shared(int8_t* agg, const int8_t val) { return atomicMax8(agg, val); } extern "C" __device__ void agg_min_int16_shared(int16_t* agg, const int16_t val) { return atomicMin16(agg, val); } extern "C" __device__ void agg_min_int8_shared(int8_t* agg, const int8_t val) { return atomicMin8(agg, val); } extern "C" __device__ void agg_min_double_shared(int64_t* agg, const double val) { atomicMin(reinterpret_cast<double*>(agg), val); } extern "C" __device__ void agg_min_float_shared(int32_t* agg, const float val) { atomicMin(reinterpret_cast<float*>(agg), val); } extern "C" __device__ void agg_id_shared(int64_t* agg, const int64_t val) { *agg = val; } #define DEF_AGG_ID_INT_SHARED(n) \ extern "C" __device__ void agg_id_int##n##_shared(int##n##_t* agg, \ const int##n##_t val) { \ *agg = val; \ } DEF_AGG_ID_INT_SHARED(32) DEF_AGG_ID_INT_SHARED(16) DEF_AGG_ID_INT_SHARED(8) #undef DEF_AGG_ID_INT_SHARED extern "C" __device__ void agg_id_double_shared(int64_t* agg, const double val) { *agg = *(reinterpret_cast<const int64_t*>(&val)); } extern "C" __device__ void agg_id_double_shared_slow(int64_t* agg, const double* val) { *agg = *(reinterpret_cast<const int64_t*>(val)); } extern "C" __device__ void agg_id_float_shared(int32_t* agg, const float val) { *agg = __float_as_int(val); } #define DEF_SKIP_AGG(base_agg_func) \ extern "C" __device__ ADDR_T base_agg_func##_skip_val_shared( \ ADDR_T* agg, const DATA_T val, const DATA_T skip_val) { \ if (val != skip_val) { \ return base_agg_func##_shared(agg, val); \ } \ return 0; \ } #define DATA_T int64_t #define ADDR_T uint64_t DEF_SKIP_AGG(agg_count) #undef DATA_T #undef ADDR_T #define DATA_T int32_t #define ADDR_T uint32_t DEF_SKIP_AGG(agg_count_int32) #undef DATA_T #undef ADDR_T // Initial value for nullable column is INT32_MIN extern "C" __device__ void agg_max_int32_skip_val_shared(int32_t* agg, const int32_t val, const int32_t skip_val) { if (val != skip_val) { agg_max_int32_shared(agg, val); } } extern "C" __device__ void agg_max_int16_skip_val_shared(int16_t* agg, const int16_t val, const int16_t skip_val) { if (val != skip_val) { agg_max_int16_shared(agg, val); } } extern "C" __device__ void agg_min_int16_skip_val_shared(int16_t* agg, const int16_t val, const int16_t skip_val) { if (val != skip_val) { atomicMin16SkipVal(agg, val, skip_val); } } extern "C" __device__ void agg_max_int8_skip_val_shared(int8_t* agg, const int8_t val, const int8_t skip_val) { if (val != skip_val) { agg_max_int8_shared(agg, val); } } extern "C" __device__ void agg_min_int8_skip_val_shared(int8_t* agg, const int8_t val, const int8_t skip_val) { if (val != skip_val) { atomicMin8SkipVal(agg, val, skip_val); } } __device__ int32_t atomicMin32SkipVal(int32_t* address, int32_t val, const int32_t skip_val) { int32_t old = atomicExch(address, INT_MAX); return atomicMin(address, old == skip_val ? val : min(old, val)); } extern "C" __device__ void agg_min_int32_skip_val_shared(int32_t* agg, const int32_t val, const int32_t skip_val) { if (val != skip_val) { atomicMin32SkipVal(agg, val, skip_val); } } __device__ int32_t atomicSum32SkipVal(int32_t* address, const int32_t val, const int32_t skip_val) { unsigned int* address_as_int = (unsigned int*)address; int32_t old = atomicExch(address_as_int, 0); int32_t old2 = atomicAdd(address_as_int, old == skip_val ? val : (val + old)); return old == skip_val ? old2 : (old2 + old); } extern "C" __device__ int32_t agg_sum_int32_skip_val_shared(int32_t* agg, const int32_t val, const int32_t skip_val) { if (val != skip_val) { const int32_t old = atomicSum32SkipVal(agg, val, skip_val); return old; } return 0; } __device__ int64_t atomicSum64SkipVal(int64_t* address, const int64_t val, const int64_t skip_val) { unsigned long long int* address_as_ull = (unsigned long long int*)address; int64_t old = atomicExch(address_as_ull, 0); int64_t old2 = atomicAdd(address_as_ull, old == skip_val ? val : (val + old)); return old == skip_val ? old2 : (old2 + old); } extern "C" __device__ int64_t agg_sum_skip_val_shared(int64_t* agg, const int64_t val, const int64_t skip_val) { if (val != skip_val) { return atomicSum64SkipVal(agg, val, skip_val); } return 0; } __device__ int64_t atomicMin64SkipVal(int64_t* address, int64_t val, const int64_t skip_val) { unsigned long long int* address_as_ull = reinterpret_cast<unsigned long long int*>(address); unsigned long long int old = *address_as_ull, assumed; do { assumed = old; old = atomicCAS(address_as_ull, assumed, assumed == skip_val ? val : min((long long)val, (long long)assumed)); } while (assumed != old); return old; } extern "C" __device__ void agg_min_skip_val_shared(int64_t* agg, const int64_t val, const int64_t skip_val) { if (val != skip_val) { atomicMin64SkipVal(agg, val, skip_val); } } __device__ int64_t atomicMax64SkipVal(int64_t* address, int64_t val, const int64_t skip_val) { unsigned long long int* address_as_ull = reinterpret_cast<unsigned long long int*>(address); unsigned long long int old = *address_as_ull, assumed; do { assumed = old; old = atomicCAS(address_as_ull, assumed, assumed == skip_val ? val : max((long long)val, (long long)assumed)); } while (assumed != old); return old; } extern "C" __device__ void agg_max_skip_val_shared(int64_t* agg, const int64_t val, const int64_t skip_val) { if (val != skip_val) { atomicMax64SkipVal(agg, val, skip_val); } } #undef DEF_SKIP_AGG #define DEF_SKIP_AGG(base_agg_func) \ extern "C" __device__ ADDR_T base_agg_func##_skip_val_shared( \ ADDR_T* agg, const DATA_T val, const DATA_T skip_val) { \ if (val != skip_val) { \ return base_agg_func##_shared(agg, val); \ } \ return *agg; \ } #define DATA_T double #define ADDR_T uint64_t DEF_SKIP_AGG(agg_count_double) #undef ADDR_T #undef DATA_T #define DATA_T float #define ADDR_T uint32_t DEF_SKIP_AGG(agg_count_float) #undef ADDR_T #undef DATA_T // Initial value for nullable column is FLOAT_MIN extern "C" __device__ void agg_max_float_skip_val_shared(int32_t* agg, const float val, const float skip_val) { if (__float_as_int(val) != __float_as_int(skip_val)) { float old = atomicExch(reinterpret_cast<float*>(agg), -FLT_MAX); atomicMax(reinterpret_cast<float*>(agg), __float_as_int(old) == __float_as_int(skip_val) ? val : fmaxf(old, val)); } } __device__ float atomicMinFltSkipVal(int32_t* address, float val, const float skip_val) { float old = atomicExch(reinterpret_cast<float*>(address), FLT_MAX); return atomicMin( reinterpret_cast<float*>(address), __float_as_int(old) == __float_as_int(skip_val) ? val : fminf(old, val)); } extern "C" __device__ void agg_min_float_skip_val_shared(int32_t* agg, const float val, const float skip_val) { if (__float_as_int(val) != __float_as_int(skip_val)) { atomicMinFltSkipVal(agg, val, skip_val); } } __device__ void atomicSumFltSkipVal(float* address, const float val, const float skip_val) { float old = atomicExch(address, 0.f); atomicAdd(address, __float_as_int(old) == __float_as_int(skip_val) ? val : (val + old)); } extern "C" __device__ void agg_sum_float_skip_val_shared(int32_t* agg, const float val, const float skip_val) { if (__float_as_int(val) != __float_as_int(skip_val)) { atomicSumFltSkipVal(reinterpret_cast<float*>(agg), val, skip_val); } } __device__ void atomicSumDblSkipVal(double* address, const double val, const double skip_val) { unsigned long long int* address_as_ull = (unsigned long long int*)address; double old = __longlong_as_double(atomicExch(address_as_ull, __double_as_longlong(0.))); atomicAdd( address, __double_as_longlong(old) == __double_as_longlong(skip_val) ? val : (val + old)); } extern "C" __device__ void agg_sum_double_skip_val_shared(int64_t* agg, const double val, const double skip_val) { if (__double_as_longlong(val) != __double_as_longlong(skip_val)) { atomicSumDblSkipVal(reinterpret_cast<double*>(agg), val, skip_val); } } __device__ double atomicMinDblSkipVal(double* address, double val, const double skip_val) { unsigned long long int* address_as_ull = reinterpret_cast<unsigned long long int*>(address); unsigned long long int old = *address_as_ull; unsigned long long int skip_val_as_ull = *reinterpret_cast<const unsigned long long*>(&skip_val); unsigned long long int assumed; do { assumed = old; old = atomicCAS(address_as_ull, assumed, assumed == skip_val_as_ull ? *reinterpret_cast<unsigned long long*>(&val) : __double_as_longlong(min(val, __longlong_as_double(assumed)))); } while (assumed != old); return __longlong_as_double(old); } extern "C" __device__ void agg_min_double_skip_val_shared(int64_t* agg, const double val, const double skip_val) { if (val != skip_val) { atomicMinDblSkipVal(reinterpret_cast<double*>(agg), val, skip_val); } } __device__ double atomicMaxDblSkipVal(double* address, double val, const double skip_val) { unsigned long long int* address_as_ull = (unsigned long long int*)address; unsigned long long int old = *address_as_ull; unsigned long long int skip_val_as_ull = *((unsigned long long int*)&skip_val); unsigned long long int assumed; do { assumed = old; old = atomicCAS(address_as_ull, assumed, assumed == skip_val_as_ull ? *((unsigned long long int*)&val) : __double_as_longlong(max(val, __longlong_as_double(assumed)))); } while (assumed != old); return __longlong_as_double(old); } extern "C" __device__ void agg_max_double_skip_val_shared(int64_t* agg, const double val, const double skip_val) { if (val != skip_val) { atomicMaxDblSkipVal(reinterpret_cast<double*>(agg), val, skip_val); } } #undef DEF_SKIP_AGG extern "C" __device__ bool slotEmptyKeyCAS(int64_t* slot, int64_t new_val, int64_t init_val) { auto slot_address = reinterpret_cast<unsigned long long int*>(slot); const auto empty_key = static_cast<unsigned long long int*>(static_cast<void*>(&init_val)); const auto new_val_cast = static_cast<unsigned long long int*>(static_cast<void*>(&new_val)); const auto old_val = atomicCAS(slot_address, *empty_key, *new_val_cast); if (old_val == *empty_key) { return true; } else { return false; } } extern "C" __device__ bool slotEmptyKeyCAS_int32(int32_t* slot, int32_t new_val, int32_t init_val) { unsigned int* slot_address = reinterpret_cast<unsigned int*>(slot); unsigned int compare_value = static_cast<unsigned int>(init_val); unsigned int swap_value = static_cast<unsigned int>(new_val); const unsigned int old_value = atomicCAS(slot_address, compare_value, swap_value); return old_value == compare_value; } #include <stdio.h> extern "C" __device__ bool slotEmptyKeyCAS_int16(int16_t* slot, int16_t new_val, int16_t init_val) { unsigned int* base_slot_address = reinterpret_cast<unsigned int*>(reinterpret_cast<size_t>(slot) & ~0x3); unsigned int old_value = *base_slot_address; unsigned int swap_value, compare_value; do { compare_value = old_value; // exit criteria: if init_val does not exist in the slot (some other thread has // succeeded) if (static_cast<unsigned int>(init_val) != __byte_perm( compare_value, 0, (reinterpret_cast<size_t>(slot) & 0x2 ? 0x3244 : 0x4410))) { return false; } swap_value = __byte_perm(compare_value, static_cast<unsigned int>(new_val), (reinterpret_cast<size_t>(slot) & 0x2) ? 0x5410 : 0x3254); old_value = atomicCAS(base_slot_address, compare_value, swap_value); } while (compare_value != old_value); return true; } extern "C" __device__ bool slotEmptyKeyCAS_int8(int8_t* slot, int8_t new_val, int8_t init_val) { // properly align the slot address: unsigned int* base_slot_address = reinterpret_cast<unsigned int*>(reinterpret_cast<size_t>(slot) & ~0x3); constexpr unsigned int byte_permutations[] = {0x3214, 0x3240, 0x3410, 0x4210}; unsigned int old_value = *base_slot_address; unsigned int swap_value, compare_value; do { compare_value = old_value; // exit criteria: if init_val does not exist in the slot (some other thread has // succeeded) if (static_cast<unsigned int>(init_val) != __byte_perm(compare_value, 0, (reinterpret_cast<size_t>(slot) & 0x3) | 0x4440)) { return false; } swap_value = __byte_perm(compare_value, static_cast<unsigned int>(new_val), byte_permutations[reinterpret_cast<size_t>(slot) & 0x3]); old_value = atomicCAS(base_slot_address, compare_value, swap_value); } while (compare_value != old_value); return true; } #include "../Utils/ChunkIter.cpp" #include "DateTruncate.cpp" #include "ExtractFromTime.cpp" #define EXECUTE_INCLUDE #include "ArrayOps.cpp" #include "DateAdd.cpp" #include "StringFunctions.cpp" #undef EXECUTE_INCLUDE #include "../Utils/Regexp.cpp" #include "../Utils/StringLike.cpp" extern "C" __device__ uint64_t string_decode(int8_t* chunk_iter_, int64_t pos) { // TODO(alex): de-dup, the x64 version is basically identical ChunkIter* chunk_iter = reinterpret_cast<ChunkIter*>(chunk_iter_); VarlenDatum vd; bool is_end; ChunkIter_get_nth(chunk_iter, pos, false, &vd, &is_end); return vd.is_null ? 0 : (reinterpret_cast<uint64_t>(vd.pointer) & 0xffffffffffff) | (static_cast<uint64_t>(vd.length) << 48); } extern "C" __device__ void linear_probabilistic_count(uint8_t* bitmap, const uint32_t bitmap_bytes, const uint8_t* key_bytes, const uint32_t key_len) { const uint32_t bit_pos = MurmurHash1(key_bytes, key_len, 0) % (bitmap_bytes * 8); const uint32_t word_idx = bit_pos / 32; const uint32_t bit_idx = bit_pos % 32; atomicOr(((uint32_t*)bitmap) + word_idx, 1 << bit_idx); } extern "C" __device__ void agg_count_distinct_bitmap_gpu(int64_t* agg, const int64_t val, const int64_t min_val, const int64_t base_dev_addr, const int64_t base_host_addr, const uint64_t sub_bitmap_count, const uint64_t bitmap_bytes) { const uint64_t bitmap_idx = val - min_val; const uint32_t byte_idx = bitmap_idx >> 3; const uint32_t word_idx = byte_idx >> 2; const uint32_t byte_word_idx = byte_idx & 3; const int64_t host_addr = *agg; uint32_t* bitmap = (uint32_t*)(base_dev_addr + host_addr - base_host_addr + (threadIdx.x & (sub_bitmap_count - 1)) * bitmap_bytes); switch (byte_word_idx) { case 0: atomicOr(&bitmap[word_idx], 1 << (bitmap_idx & 7)); break; case 1: atomicOr(&bitmap[word_idx], 1 << ((bitmap_idx & 7) + 8)); break; case 2: atomicOr(&bitmap[word_idx], 1 << ((bitmap_idx & 7) + 16)); break; case 3: atomicOr(&bitmap[word_idx], 1 << ((bitmap_idx & 7) + 24)); break; default: break; } } extern "C" __device__ void agg_count_distinct_bitmap_skip_val_gpu( int64_t* agg, const int64_t val, const int64_t min_val, const int64_t skip_val, const int64_t base_dev_addr, const int64_t base_host_addr, const uint64_t sub_bitmap_count, const uint64_t bitmap_bytes) { if (val != skip_val) { agg_count_distinct_bitmap_gpu( agg, val, min_val, base_dev_addr, base_host_addr, sub_bitmap_count, bitmap_bytes); } } extern "C" __device__ void agg_approximate_count_distinct_gpu( int64_t* agg, const int64_t key, const uint32_t b, const int64_t base_dev_addr, const int64_t base_host_addr) { const uint64_t hash = MurmurHash64A(&key, sizeof(key), 0); const uint32_t index = hash >> (64 - b); const int32_t rank = get_rank(hash << b, 64 - b); const int64_t host_addr = *agg; int32_t* M = (int32_t*)(base_dev_addr + host_addr - base_host_addr); atomicMax(&M[index], rank); } extern "C" __device__ void force_sync() { __threadfence_block(); } extern "C" __device__ void sync_warp() { #if (TORCH_HIP_VERSION >= 9000) __syncwarp(); #endif } /** * Protected warp synchornization to make sure all (or none) threads within a warp go * through a synchronization barrier. thread_pos: the current thread position to be used * for a memory access row_count: maximum number of rows to be processed The function * performs warp sync iff all 32 threads within that warp will process valid data NOTE: it * currently assumes that warp size is 32. */ extern "C" __device__ void sync_warp_protected(int64_t thread_pos, int64_t row_count) { #if (TORCH_HIP_VERSION >= 9000) // only syncing if NOT within the same warp as those threads experiencing the critical // edge if ((((row_count - 1) | 0x1F) - thread_pos) >= 32) { __syncwarp(); } #endif }
4b57666338b14a8d0b69e6e9d3919b870a7346cf.cu
#include <cuda.h> #include <float.h> #include <stdint.h> #include <limits> #include "BufferCompaction.h" #include "ExtensionFunctions.hpp" #include "GpuRtConstants.h" #include "HyperLogLogRank.h" extern "C" __device__ int32_t pos_start_impl(const int32_t* row_index_resume) { return blockIdx.x * blockDim.x + threadIdx.x; } extern "C" __device__ int32_t group_buff_idx_impl() { return pos_start_impl(NULL); } extern "C" __device__ int32_t pos_step_impl() { return blockDim.x * gridDim.x; } extern "C" __device__ int8_t thread_warp_idx(const int8_t warp_sz) { return threadIdx.x % warp_sz; } extern "C" __device__ const int64_t* init_shared_mem_nop( const int64_t* groups_buffer, const int32_t groups_buffer_size) { return groups_buffer; } extern "C" __device__ void write_back_nop(int64_t* dest, int64_t* src, const int32_t sz) { } extern "C" __device__ const int64_t* init_shared_mem(const int64_t* groups_buffer, const int32_t groups_buffer_size) { extern __shared__ int64_t fast_bins[]; if (threadIdx.x == 0) { memcpy(fast_bins, groups_buffer, groups_buffer_size); } __syncthreads(); return fast_bins; } /** * Dynamically allocates shared memory per block. * The amount of shared memory allocated is defined at kernel launch time. * Returns a pointer to the beginning of allocated shared memory */ extern "C" __device__ int64_t* alloc_shared_mem_dynamic() { extern __shared__ int64_t groups_buffer_smem[]; return groups_buffer_smem; } /** * Set the allocated shared memory elements to be equal to the 'identity_element'. * groups_buffer_size: number of 64-bit elements in shared memory per thread-block * NOTE: groups_buffer_size is in units of 64-bit elements. */ extern "C" __device__ void set_shared_mem_to_identity( int64_t* groups_buffer_smem, const int32_t groups_buffer_size, const int64_t identity_element = 0) { #pragma unroll for (int i = threadIdx.x; i < groups_buffer_size; i += blockDim.x) { groups_buffer_smem[i] = identity_element; } __syncthreads(); } /** * Initialize dynamic shared memory: * 1. Allocates dynamic shared memory * 2. Set every allocated element to be equal to the 'identity element', by default zero. */ extern "C" __device__ const int64_t* init_shared_mem_dynamic( const int64_t* groups_buffer, const int32_t groups_buffer_size) { int64_t* groups_buffer_smem = alloc_shared_mem_dynamic(); set_shared_mem_to_identity(groups_buffer_smem, groups_buffer_size); return groups_buffer_smem; } extern "C" __device__ void write_back(int64_t* dest, int64_t* src, const int32_t sz) { __syncthreads(); if (threadIdx.x == 0) { memcpy(dest, src, sz); } } extern "C" __device__ void write_back_smem_nop(int64_t* dest, int64_t* src, const int32_t sz) {} extern "C" __device__ void agg_from_smem_to_gmem_nop(int64_t* gmem_dest, int64_t* smem_src, const int32_t num_elements) {} /** * Aggregate the result stored into shared memory back into global memory. * It also writes back the stored binId, if any, back into global memory. * Memory layout assumption: each 64-bit shared memory unit of data is as follows: * [0..31: the stored bin ID, to be written back][32..63: the count result, to be * aggregated] */ extern "C" __device__ void agg_from_smem_to_gmem_binId_count(int64_t* gmem_dest, int64_t* smem_src, const int32_t num_elements) { __syncthreads(); #pragma unroll for (int i = threadIdx.x; i < num_elements; i += blockDim.x) { int32_t bin_id = *reinterpret_cast<int32_t*>(smem_src + i); int32_t count_result = *(reinterpret_cast<int32_t*>(smem_src + i) + 1); if (count_result) { // non-zero count atomicAdd(reinterpret_cast<unsigned int*>(gmem_dest + i) + 1, static_cast<int32_t>(count_result)); // writing back the binId, only if count_result is non-zero *reinterpret_cast<unsigned int*>(gmem_dest + i) = static_cast<int32_t>(bin_id); } } } /** * Aggregate the result stored into shared memory back into global memory. * It also writes back the stored binId, if any, back into global memory. * Memory layout assumption: each 64-bit shared memory unit of data is as follows: * [0..31: the count result, to be aggregated][32..63: the stored bin ID, to be written * back] */ extern "C" __device__ void agg_from_smem_to_gmem_count_binId(int64_t* gmem_dest, int64_t* smem_src, const int32_t num_elements) { __syncthreads(); #pragma unroll for (int i = threadIdx.x; i < num_elements; i += blockDim.x) { int32_t count_result = *reinterpret_cast<int32_t*>(smem_src + i); int32_t bin_id = *(reinterpret_cast<int32_t*>(smem_src + i) + 1); if (count_result) { // non-zero count atomicAdd(reinterpret_cast<unsigned int*>(gmem_dest + i), static_cast<int32_t>(count_result)); // writing back the binId, only if count_result is non-zero *(reinterpret_cast<unsigned int*>(gmem_dest + i) + 1) = static_cast<int32_t>(bin_id); } } } #define init_group_by_buffer_gpu_impl init_group_by_buffer_gpu #include "GpuInitGroups.cu" #undef init_group_by_buffer_gpu_impl // Dynamic watchdog: monitoring up to 64 SMs. E.g. GP100 config may have 60: // 6 Graphics Processing Clusters (GPCs) * 10 Streaming Multiprocessors // TODO(Saman): move these into a kernel parameter, allocated and initialized through CUDA __device__ int64_t dw_sm_cycle_start[128]; // Set from host before launching the kernel // TODO(Saman): make this cycle budget something constant in codegen level __device__ int64_t dw_cycle_budget = 0; // Set from host before launching the kernel __device__ int32_t dw_abort = 0; // TBD: set from host (async) __inline__ __device__ uint32_t get_smid(void) { uint32_t ret; asm("mov.u32 %0, %%smid;" : "=r"(ret)); return ret; } /* * The main objective of this funciton is to return true, if any of the following two * scnearios happen: * 1. receives a host request for aborting the kernel execution * 2. kernel execution takes longer clock cycles than it was initially allowed * The assumption is that all (or none) threads within a block return true for the * watchdog, and the first thread within each block compares the recorded clock cycles for * its occupying SM with the allowed budget. It also assumess that all threads entering * this function are active (no critical edge exposure) * NOTE: dw_cycle_budget, dw_abort, and dw_sm_cycle_start[] are all variables in global * memory scope. */ extern "C" __device__ bool dynamic_watchdog() { // check for dynamic watchdog, if triggered all threads return true if (dw_cycle_budget == 0LL) { return false; // Uninitialized watchdog can't check time } if (dw_abort == 1) { return true; // Received host request to abort } uint32_t smid = get_smid(); if (smid >= 128) { return false; } __shared__ volatile int64_t dw_block_cycle_start; // Thread block shared cycle start __shared__ volatile bool dw_should_terminate; // all threads within a block should return together if // watchdog criteria is met // thread 0 either initializes or read the initial clock cycle, the result is stored // into shared memory. Since all threads wihtin a block shares the same SM, there's no // point in using more threads here. if (threadIdx.x == 0) { dw_block_cycle_start = 0LL; int64_t cycle_count = static_cast<int64_t>(clock64()); // Make sure the block hasn't switched SMs if (smid == get_smid()) { dw_block_cycle_start = static_cast<int64_t>( atomicCAS(reinterpret_cast<unsigned long long*>(&dw_sm_cycle_start[smid]), 0ULL, static_cast<unsigned long long>(cycle_count))); } int64_t cycles = cycle_count - dw_block_cycle_start; if ((smid == get_smid()) && (dw_block_cycle_start > 0LL) && (cycles > dw_cycle_budget)) { // Check if we're out of time on this particular SM dw_should_terminate = true; } else { dw_should_terminate = false; } } __syncthreads(); return dw_should_terminate; } template <typename T = unsigned long long> inline __device__ T get_empty_key() { return EMPTY_KEY_64; } template <> inline __device__ unsigned int get_empty_key() { return EMPTY_KEY_32; } template <typename T> inline __device__ int64_t* get_matching_group_value(int64_t* groups_buffer, const uint32_t h, const T* key, const uint32_t key_count, const uint32_t row_size_quad) { const T empty_key = get_empty_key<T>(); uint32_t off = h * row_size_quad; auto row_ptr = reinterpret_cast<T*>(groups_buffer + off); { const T old = atomicCAS(row_ptr, empty_key, *key); if (empty_key == old && key_count > 1) { for (size_t i = 1; i <= key_count - 1; ++i) { atomicExch(row_ptr + i, key[i]); } } } if (key_count > 1) { while (atomicAdd(row_ptr + key_count - 1, 0) == empty_key) { // spin until the winning thread has finished writing the entire key and the init // value } } bool match = true; for (uint32_t i = 0; i < key_count; ++i) { if (row_ptr[i] != key[i]) { match = false; break; } } if (match) { auto row_ptr_i8 = reinterpret_cast<int8_t*>(row_ptr + key_count); return reinterpret_cast<int64_t*>(align_to_int64(row_ptr_i8)); } return NULL; } extern "C" __device__ int64_t* get_matching_group_value(int64_t* groups_buffer, const uint32_t h, const int64_t* key, const uint32_t key_count, const uint32_t key_width, const uint32_t row_size_quad, const int64_t* init_vals) { switch (key_width) { case 4: return get_matching_group_value(groups_buffer, h, reinterpret_cast<const unsigned int*>(key), key_count, row_size_quad); case 8: return get_matching_group_value(groups_buffer, h, reinterpret_cast<const unsigned long long*>(key), key_count, row_size_quad); default: return NULL; } } template <typename T> __device__ int32_t get_matching_group_value_columnar_slot(int64_t* groups_buffer, const uint32_t entry_count, const uint32_t h, const T* key, const uint32_t key_count) { uint32_t off = h; { const uint64_t old = atomicCAS(reinterpret_cast<T*>(groups_buffer + off), get_empty_key<T>(), *key); if (old == get_empty_key<T>()) { for (size_t i = 0; i < key_count; ++i) { groups_buffer[off] = key[i]; off += entry_count; } return h; } } __syncthreads(); off = h; for (size_t i = 0; i < key_count; ++i) { if (groups_buffer[off] != key[i]) { return -1; } off += entry_count; } return h; } extern "C" __device__ int32_t get_matching_group_value_columnar_slot(int64_t* groups_buffer, const uint32_t entry_count, const uint32_t h, const int64_t* key, const uint32_t key_count, const uint32_t key_width) { switch (key_width) { case 4: return get_matching_group_value_columnar_slot( groups_buffer, entry_count, h, reinterpret_cast<const unsigned int*>(key), key_count); case 8: return get_matching_group_value_columnar_slot( groups_buffer, entry_count, h, reinterpret_cast<const unsigned long long*>(key), key_count); default: return -1; } } extern "C" __device__ int64_t* get_matching_group_value_columnar( int64_t* groups_buffer, const uint32_t h, const int64_t* key, const uint32_t key_qw_count, const size_t entry_count) { uint32_t off = h; { const uint64_t old = atomicCAS( reinterpret_cast<unsigned long long*>(groups_buffer + off), EMPTY_KEY_64, *key); if (EMPTY_KEY_64 == old) { for (size_t i = 0; i < key_qw_count; ++i) { groups_buffer[off] = key[i]; off += entry_count; } return &groups_buffer[off]; } } __syncthreads(); off = h; for (size_t i = 0; i < key_qw_count; ++i) { if (groups_buffer[off] != key[i]) { return NULL; } off += entry_count; } return &groups_buffer[off]; } #include "GroupByRuntime.cpp" #include "JoinHashTableQueryRuntime.cpp" #include "MurmurHash.cpp" #include "TopKRuntime.cpp" __device__ int64_t atomicMax64(int64_t* address, int64_t val) { unsigned long long int* address_as_ull = (unsigned long long int*)address; unsigned long long int old = *address_as_ull, assumed; do { assumed = old; old = atomicCAS(address_as_ull, assumed, max((long long)val, (long long)assumed)); } while (assumed != old); return old; } __device__ int64_t atomicMin64(int64_t* address, int64_t val) { unsigned long long int* address_as_ull = (unsigned long long int*)address; unsigned long long int old = *address_as_ull, assumed; do { assumed = old; old = atomicCAS(address_as_ull, assumed, min((long long)val, (long long)assumed)); } while (assumed != old); return old; } // As of 20160418, CUDA 8.0EA only defines `atomicAdd(double*, double)` for compute // capability >= 6.0. #if CUDA_VERSION < 8000 || (defined(__CUDA_ARCH__) && __CUDA_ARCH__ < 600) __device__ double atomicAdd(double* address, double val) { unsigned long long int* address_as_ull = (unsigned long long int*)address; unsigned long long int old = *address_as_ull, assumed; do { assumed = old; old = atomicCAS(address_as_ull, assumed, __double_as_longlong(val + __longlong_as_double(assumed))); // Note: uses integer comparison to avoid hang in case of NaN (since NaN != NaN) } while (assumed != old); return __longlong_as_double(old); } #endif __device__ double atomicMax(double* address, double val) { unsigned long long int* address_as_ull = (unsigned long long int*)address; unsigned long long int old = *address_as_ull, assumed; do { assumed = old; old = atomicCAS(address_as_ull, assumed, __double_as_longlong(max(val, __longlong_as_double(assumed)))); // Note: uses integer comparison to avoid hang in case of NaN (since NaN != NaN) } while (assumed != old); return __longlong_as_double(old); } __device__ float atomicMax(float* address, float val) { int* address_as_int = (int*)address; int old = *address_as_int, assumed; do { assumed = old; old = atomicCAS( address_as_int, assumed, __float_as_int(max(val, __int_as_float(assumed)))); // Note: uses integer comparison to avoid hang in case of NaN (since NaN != NaN) } while (assumed != old); return __int_as_float(old); } __device__ double atomicMin(double* address, double val) { unsigned long long int* address_as_ull = (unsigned long long int*)address; unsigned long long int old = *address_as_ull, assumed; do { assumed = old; old = atomicCAS(address_as_ull, assumed, __double_as_longlong(min(val, __longlong_as_double(assumed)))); } while (assumed != old); return __longlong_as_double(old); } __device__ double atomicMin(float* address, float val) { int* address_as_ull = (int*)address; int old = *address_as_ull, assumed; do { assumed = old; old = atomicCAS( address_as_ull, assumed, __float_as_int(min(val, __int_as_float(assumed)))); } while (assumed != old); return __int_as_float(old); } extern "C" __device__ uint64_t agg_count_shared(uint64_t* agg, const int64_t val) { return static_cast<uint64_t>(atomicAdd(reinterpret_cast<uint32_t*>(agg), 1UL)); } extern "C" __device__ uint32_t agg_count_int32_shared(uint32_t* agg, const int32_t val) { return atomicAdd(agg, 1UL); } extern "C" __device__ uint64_t agg_count_double_shared(uint64_t* agg, const double val) { return agg_count_shared(agg, val); } extern "C" __device__ uint32_t agg_count_float_shared(uint32_t* agg, const float val) { return agg_count_int32_shared(agg, val); } extern "C" __device__ int64_t agg_sum_shared(int64_t* agg, const int64_t val) { return atomicAdd(reinterpret_cast<unsigned long long*>(agg), val); } extern "C" __device__ int32_t agg_sum_int32_shared(int32_t* agg, const int32_t val) { return atomicAdd(agg, val); } extern "C" __device__ void agg_sum_float_shared(int32_t* agg, const float val) { atomicAdd(reinterpret_cast<float*>(agg), val); } extern "C" __device__ void agg_sum_double_shared(int64_t* agg, const double val) { atomicAdd(reinterpret_cast<double*>(agg), val); } extern "C" __device__ void agg_max_shared(int64_t* agg, const int64_t val) { atomicMax64(agg, val); } extern "C" __device__ void agg_max_int32_shared(int32_t* agg, const int32_t val) { atomicMax(agg, val); } extern "C" __device__ void agg_max_double_shared(int64_t* agg, const double val) { atomicMax(reinterpret_cast<double*>(agg), val); } extern "C" __device__ void agg_max_float_shared(int32_t* agg, const float val) { atomicMax(reinterpret_cast<float*>(agg), val); } extern "C" __device__ void agg_min_shared(int64_t* agg, const int64_t val) { atomicMin64(agg, val); } extern "C" __device__ void agg_min_int32_shared(int32_t* agg, const int32_t val) { atomicMin(agg, val); } // TODO(Saman): use 16-bit atomicCAS for Turing extern "C" __device__ void atomicMax16(int16_t* agg, const int16_t val) { // properly align the input pointer: unsigned int* base_address_u32 = reinterpret_cast<unsigned int*>(reinterpret_cast<size_t>(agg) & ~0x3); unsigned int old_value = *base_address_u32; unsigned int swap_value, compare_value; do { compare_value = old_value; swap_value = (reinterpret_cast<size_t>(agg) & 0x2) ? static_cast<unsigned int>(max(static_cast<int16_t>(old_value >> 16), val)) << 16 | (old_value & 0xFFFF) : (old_value & 0xFFFF0000) | static_cast<unsigned int>( max(static_cast<int16_t>(old_value & 0xFFFF), val)); old_value = atomicCAS(base_address_u32, compare_value, swap_value); } while (old_value != compare_value); } extern "C" __device__ void atomicMax8(int8_t* agg, const int8_t val) { // properly align the input pointer: unsigned int* base_address_u32 = reinterpret_cast<unsigned int*>(reinterpret_cast<size_t>(agg) & ~0x3); // __byte_perm(unsigned int A, unsigned int B, unsigned int s): // if s == 0x3214 returns {A[31..24], A[23..16], A[15..8], B[7..0]} // if s == 0x3240 returns {A[31..24], A[23..16], B[7...0], A[7..0]} // if s == 0x3410 returns {A[31..24], B[7....0], A[15..8], A[7..0]} // if s == 0x4210 returns {B[7....0], A[23..16], A[15..8], A[7..0]} constexpr unsigned int byte_permutations[] = {0x3214, 0x3240, 0x3410, 0x4210}; unsigned int old_value = *base_address_u32; unsigned int swap_value, compare_value; do { compare_value = old_value; auto max_value = static_cast<unsigned int>( // compare val with its corresponding bits in the compare_value max(val, static_cast<int8_t>(__byte_perm( compare_value, 0, (reinterpret_cast<size_t>(agg) & 0x3) | 0x4440)))); swap_value = __byte_perm( compare_value, max_value, byte_permutations[reinterpret_cast<size_t>(agg) & 0x3]); old_value = atomicCAS(base_address_u32, compare_value, swap_value); } while (compare_value != old_value); } extern "C" __device__ void atomicMin16(int16_t* agg, const int16_t val) { // properly align the input pointer: unsigned int* base_address_u32 = reinterpret_cast<unsigned int*>(reinterpret_cast<size_t>(agg) & ~0x3); unsigned int old_value = *base_address_u32; unsigned int swap_value, compare_value; do { compare_value = old_value; swap_value = (reinterpret_cast<size_t>(agg) & 0x2) ? static_cast<unsigned int>(min(static_cast<int16_t>(old_value >> 16), val)) << 16 | (old_value & 0xFFFF) : (old_value & 0xFFFF0000) | static_cast<unsigned int>( min(static_cast<int16_t>(old_value & 0xFFFF), val)); old_value = atomicCAS(base_address_u32, compare_value, swap_value); } while (old_value != compare_value); } extern "C" __device__ void atomicMin16SkipVal(int16_t* agg, const int16_t val, const int16_t skip_val) { // properly align the input pointer: unsigned int* base_address_u32 = reinterpret_cast<unsigned int*>(reinterpret_cast<size_t>(agg) & ~0x3); unsigned int old_value = *base_address_u32; unsigned int swap_value, compare_value; do { compare_value = old_value; int16_t selected_old_val = (reinterpret_cast<size_t>(agg) & 0x2) ? static_cast<int16_t>(old_value >> 16) : static_cast<int16_t>(old_value & 0xFFFF); swap_value = (reinterpret_cast<size_t>(agg) & 0x2) ? static_cast<unsigned int>( selected_old_val == skip_val ? val : min(selected_old_val, val)) << 16 | (old_value & 0xFFFF) : (old_value & 0xFFFF0000) | static_cast<unsigned int>( selected_old_val == skip_val ? val : min(selected_old_val, val)); old_value = atomicCAS(base_address_u32, compare_value, swap_value); } while (old_value != compare_value); } extern "C" __device__ void atomicMin8(int8_t* agg, const int8_t val) { // properly align the input pointer: unsigned int* base_address_u32 = reinterpret_cast<unsigned int*>(reinterpret_cast<size_t>(agg) & ~0x3); constexpr unsigned int byte_permutations[] = {0x3214, 0x3240, 0x3410, 0x4210}; unsigned int old_value = *base_address_u32; unsigned int swap_value, compare_value; do { compare_value = old_value; auto min_value = static_cast<unsigned int>( min(val, static_cast<int8_t>(__byte_perm( compare_value, 0, (reinterpret_cast<size_t>(agg) & 0x3) | 0x4440)))); swap_value = __byte_perm( compare_value, min_value, byte_permutations[reinterpret_cast<size_t>(agg) & 0x3]); old_value = atomicCAS(base_address_u32, compare_value, swap_value); } while (compare_value != old_value); } extern "C" __device__ void atomicMin8SkipVal(int8_t* agg, const int8_t val, const int8_t skip_val) { // properly align the input pointer: unsigned int* base_address_u32 = reinterpret_cast<unsigned int*>(reinterpret_cast<size_t>(agg) & ~0x3); constexpr unsigned int byte_permutations[] = {0x3214, 0x3240, 0x3410, 0x4210}; unsigned int old_value = *base_address_u32; unsigned int swap_value, compare_value; do { compare_value = old_value; int8_t selected_old_val = static_cast<int8_t>( __byte_perm(compare_value, 0, (reinterpret_cast<size_t>(agg) & 0x3) | 0x4440)); auto min_value = static_cast<unsigned int>( selected_old_val == skip_val ? val : min(val, selected_old_val)); swap_value = __byte_perm( compare_value, min_value, byte_permutations[reinterpret_cast<size_t>(agg) & 0x3]); old_value = atomicCAS(base_address_u32, compare_value, swap_value); } while (compare_value != old_value); } extern "C" __device__ void agg_max_int16_shared(int16_t* agg, const int16_t val) { return atomicMax16(agg, val); } extern "C" __device__ void agg_max_int8_shared(int8_t* agg, const int8_t val) { return atomicMax8(agg, val); } extern "C" __device__ void agg_min_int16_shared(int16_t* agg, const int16_t val) { return atomicMin16(agg, val); } extern "C" __device__ void agg_min_int8_shared(int8_t* agg, const int8_t val) { return atomicMin8(agg, val); } extern "C" __device__ void agg_min_double_shared(int64_t* agg, const double val) { atomicMin(reinterpret_cast<double*>(agg), val); } extern "C" __device__ void agg_min_float_shared(int32_t* agg, const float val) { atomicMin(reinterpret_cast<float*>(agg), val); } extern "C" __device__ void agg_id_shared(int64_t* agg, const int64_t val) { *agg = val; } #define DEF_AGG_ID_INT_SHARED(n) \ extern "C" __device__ void agg_id_int##n##_shared(int##n##_t* agg, \ const int##n##_t val) { \ *agg = val; \ } DEF_AGG_ID_INT_SHARED(32) DEF_AGG_ID_INT_SHARED(16) DEF_AGG_ID_INT_SHARED(8) #undef DEF_AGG_ID_INT_SHARED extern "C" __device__ void agg_id_double_shared(int64_t* agg, const double val) { *agg = *(reinterpret_cast<const int64_t*>(&val)); } extern "C" __device__ void agg_id_double_shared_slow(int64_t* agg, const double* val) { *agg = *(reinterpret_cast<const int64_t*>(val)); } extern "C" __device__ void agg_id_float_shared(int32_t* agg, const float val) { *agg = __float_as_int(val); } #define DEF_SKIP_AGG(base_agg_func) \ extern "C" __device__ ADDR_T base_agg_func##_skip_val_shared( \ ADDR_T* agg, const DATA_T val, const DATA_T skip_val) { \ if (val != skip_val) { \ return base_agg_func##_shared(agg, val); \ } \ return 0; \ } #define DATA_T int64_t #define ADDR_T uint64_t DEF_SKIP_AGG(agg_count) #undef DATA_T #undef ADDR_T #define DATA_T int32_t #define ADDR_T uint32_t DEF_SKIP_AGG(agg_count_int32) #undef DATA_T #undef ADDR_T // Initial value for nullable column is INT32_MIN extern "C" __device__ void agg_max_int32_skip_val_shared(int32_t* agg, const int32_t val, const int32_t skip_val) { if (val != skip_val) { agg_max_int32_shared(agg, val); } } extern "C" __device__ void agg_max_int16_skip_val_shared(int16_t* agg, const int16_t val, const int16_t skip_val) { if (val != skip_val) { agg_max_int16_shared(agg, val); } } extern "C" __device__ void agg_min_int16_skip_val_shared(int16_t* agg, const int16_t val, const int16_t skip_val) { if (val != skip_val) { atomicMin16SkipVal(agg, val, skip_val); } } extern "C" __device__ void agg_max_int8_skip_val_shared(int8_t* agg, const int8_t val, const int8_t skip_val) { if (val != skip_val) { agg_max_int8_shared(agg, val); } } extern "C" __device__ void agg_min_int8_skip_val_shared(int8_t* agg, const int8_t val, const int8_t skip_val) { if (val != skip_val) { atomicMin8SkipVal(agg, val, skip_val); } } __device__ int32_t atomicMin32SkipVal(int32_t* address, int32_t val, const int32_t skip_val) { int32_t old = atomicExch(address, INT_MAX); return atomicMin(address, old == skip_val ? val : min(old, val)); } extern "C" __device__ void agg_min_int32_skip_val_shared(int32_t* agg, const int32_t val, const int32_t skip_val) { if (val != skip_val) { atomicMin32SkipVal(agg, val, skip_val); } } __device__ int32_t atomicSum32SkipVal(int32_t* address, const int32_t val, const int32_t skip_val) { unsigned int* address_as_int = (unsigned int*)address; int32_t old = atomicExch(address_as_int, 0); int32_t old2 = atomicAdd(address_as_int, old == skip_val ? val : (val + old)); return old == skip_val ? old2 : (old2 + old); } extern "C" __device__ int32_t agg_sum_int32_skip_val_shared(int32_t* agg, const int32_t val, const int32_t skip_val) { if (val != skip_val) { const int32_t old = atomicSum32SkipVal(agg, val, skip_val); return old; } return 0; } __device__ int64_t atomicSum64SkipVal(int64_t* address, const int64_t val, const int64_t skip_val) { unsigned long long int* address_as_ull = (unsigned long long int*)address; int64_t old = atomicExch(address_as_ull, 0); int64_t old2 = atomicAdd(address_as_ull, old == skip_val ? val : (val + old)); return old == skip_val ? old2 : (old2 + old); } extern "C" __device__ int64_t agg_sum_skip_val_shared(int64_t* agg, const int64_t val, const int64_t skip_val) { if (val != skip_val) { return atomicSum64SkipVal(agg, val, skip_val); } return 0; } __device__ int64_t atomicMin64SkipVal(int64_t* address, int64_t val, const int64_t skip_val) { unsigned long long int* address_as_ull = reinterpret_cast<unsigned long long int*>(address); unsigned long long int old = *address_as_ull, assumed; do { assumed = old; old = atomicCAS(address_as_ull, assumed, assumed == skip_val ? val : min((long long)val, (long long)assumed)); } while (assumed != old); return old; } extern "C" __device__ void agg_min_skip_val_shared(int64_t* agg, const int64_t val, const int64_t skip_val) { if (val != skip_val) { atomicMin64SkipVal(agg, val, skip_val); } } __device__ int64_t atomicMax64SkipVal(int64_t* address, int64_t val, const int64_t skip_val) { unsigned long long int* address_as_ull = reinterpret_cast<unsigned long long int*>(address); unsigned long long int old = *address_as_ull, assumed; do { assumed = old; old = atomicCAS(address_as_ull, assumed, assumed == skip_val ? val : max((long long)val, (long long)assumed)); } while (assumed != old); return old; } extern "C" __device__ void agg_max_skip_val_shared(int64_t* agg, const int64_t val, const int64_t skip_val) { if (val != skip_val) { atomicMax64SkipVal(agg, val, skip_val); } } #undef DEF_SKIP_AGG #define DEF_SKIP_AGG(base_agg_func) \ extern "C" __device__ ADDR_T base_agg_func##_skip_val_shared( \ ADDR_T* agg, const DATA_T val, const DATA_T skip_val) { \ if (val != skip_val) { \ return base_agg_func##_shared(agg, val); \ } \ return *agg; \ } #define DATA_T double #define ADDR_T uint64_t DEF_SKIP_AGG(agg_count_double) #undef ADDR_T #undef DATA_T #define DATA_T float #define ADDR_T uint32_t DEF_SKIP_AGG(agg_count_float) #undef ADDR_T #undef DATA_T // Initial value for nullable column is FLOAT_MIN extern "C" __device__ void agg_max_float_skip_val_shared(int32_t* agg, const float val, const float skip_val) { if (__float_as_int(val) != __float_as_int(skip_val)) { float old = atomicExch(reinterpret_cast<float*>(agg), -FLT_MAX); atomicMax(reinterpret_cast<float*>(agg), __float_as_int(old) == __float_as_int(skip_val) ? val : fmaxf(old, val)); } } __device__ float atomicMinFltSkipVal(int32_t* address, float val, const float skip_val) { float old = atomicExch(reinterpret_cast<float*>(address), FLT_MAX); return atomicMin( reinterpret_cast<float*>(address), __float_as_int(old) == __float_as_int(skip_val) ? val : fminf(old, val)); } extern "C" __device__ void agg_min_float_skip_val_shared(int32_t* agg, const float val, const float skip_val) { if (__float_as_int(val) != __float_as_int(skip_val)) { atomicMinFltSkipVal(agg, val, skip_val); } } __device__ void atomicSumFltSkipVal(float* address, const float val, const float skip_val) { float old = atomicExch(address, 0.f); atomicAdd(address, __float_as_int(old) == __float_as_int(skip_val) ? val : (val + old)); } extern "C" __device__ void agg_sum_float_skip_val_shared(int32_t* agg, const float val, const float skip_val) { if (__float_as_int(val) != __float_as_int(skip_val)) { atomicSumFltSkipVal(reinterpret_cast<float*>(agg), val, skip_val); } } __device__ void atomicSumDblSkipVal(double* address, const double val, const double skip_val) { unsigned long long int* address_as_ull = (unsigned long long int*)address; double old = __longlong_as_double(atomicExch(address_as_ull, __double_as_longlong(0.))); atomicAdd( address, __double_as_longlong(old) == __double_as_longlong(skip_val) ? val : (val + old)); } extern "C" __device__ void agg_sum_double_skip_val_shared(int64_t* agg, const double val, const double skip_val) { if (__double_as_longlong(val) != __double_as_longlong(skip_val)) { atomicSumDblSkipVal(reinterpret_cast<double*>(agg), val, skip_val); } } __device__ double atomicMinDblSkipVal(double* address, double val, const double skip_val) { unsigned long long int* address_as_ull = reinterpret_cast<unsigned long long int*>(address); unsigned long long int old = *address_as_ull; unsigned long long int skip_val_as_ull = *reinterpret_cast<const unsigned long long*>(&skip_val); unsigned long long int assumed; do { assumed = old; old = atomicCAS(address_as_ull, assumed, assumed == skip_val_as_ull ? *reinterpret_cast<unsigned long long*>(&val) : __double_as_longlong(min(val, __longlong_as_double(assumed)))); } while (assumed != old); return __longlong_as_double(old); } extern "C" __device__ void agg_min_double_skip_val_shared(int64_t* agg, const double val, const double skip_val) { if (val != skip_val) { atomicMinDblSkipVal(reinterpret_cast<double*>(agg), val, skip_val); } } __device__ double atomicMaxDblSkipVal(double* address, double val, const double skip_val) { unsigned long long int* address_as_ull = (unsigned long long int*)address; unsigned long long int old = *address_as_ull; unsigned long long int skip_val_as_ull = *((unsigned long long int*)&skip_val); unsigned long long int assumed; do { assumed = old; old = atomicCAS(address_as_ull, assumed, assumed == skip_val_as_ull ? *((unsigned long long int*)&val) : __double_as_longlong(max(val, __longlong_as_double(assumed)))); } while (assumed != old); return __longlong_as_double(old); } extern "C" __device__ void agg_max_double_skip_val_shared(int64_t* agg, const double val, const double skip_val) { if (val != skip_val) { atomicMaxDblSkipVal(reinterpret_cast<double*>(agg), val, skip_val); } } #undef DEF_SKIP_AGG extern "C" __device__ bool slotEmptyKeyCAS(int64_t* slot, int64_t new_val, int64_t init_val) { auto slot_address = reinterpret_cast<unsigned long long int*>(slot); const auto empty_key = static_cast<unsigned long long int*>(static_cast<void*>(&init_val)); const auto new_val_cast = static_cast<unsigned long long int*>(static_cast<void*>(&new_val)); const auto old_val = atomicCAS(slot_address, *empty_key, *new_val_cast); if (old_val == *empty_key) { return true; } else { return false; } } extern "C" __device__ bool slotEmptyKeyCAS_int32(int32_t* slot, int32_t new_val, int32_t init_val) { unsigned int* slot_address = reinterpret_cast<unsigned int*>(slot); unsigned int compare_value = static_cast<unsigned int>(init_val); unsigned int swap_value = static_cast<unsigned int>(new_val); const unsigned int old_value = atomicCAS(slot_address, compare_value, swap_value); return old_value == compare_value; } #include <stdio.h> extern "C" __device__ bool slotEmptyKeyCAS_int16(int16_t* slot, int16_t new_val, int16_t init_val) { unsigned int* base_slot_address = reinterpret_cast<unsigned int*>(reinterpret_cast<size_t>(slot) & ~0x3); unsigned int old_value = *base_slot_address; unsigned int swap_value, compare_value; do { compare_value = old_value; // exit criteria: if init_val does not exist in the slot (some other thread has // succeeded) if (static_cast<unsigned int>(init_val) != __byte_perm( compare_value, 0, (reinterpret_cast<size_t>(slot) & 0x2 ? 0x3244 : 0x4410))) { return false; } swap_value = __byte_perm(compare_value, static_cast<unsigned int>(new_val), (reinterpret_cast<size_t>(slot) & 0x2) ? 0x5410 : 0x3254); old_value = atomicCAS(base_slot_address, compare_value, swap_value); } while (compare_value != old_value); return true; } extern "C" __device__ bool slotEmptyKeyCAS_int8(int8_t* slot, int8_t new_val, int8_t init_val) { // properly align the slot address: unsigned int* base_slot_address = reinterpret_cast<unsigned int*>(reinterpret_cast<size_t>(slot) & ~0x3); constexpr unsigned int byte_permutations[] = {0x3214, 0x3240, 0x3410, 0x4210}; unsigned int old_value = *base_slot_address; unsigned int swap_value, compare_value; do { compare_value = old_value; // exit criteria: if init_val does not exist in the slot (some other thread has // succeeded) if (static_cast<unsigned int>(init_val) != __byte_perm(compare_value, 0, (reinterpret_cast<size_t>(slot) & 0x3) | 0x4440)) { return false; } swap_value = __byte_perm(compare_value, static_cast<unsigned int>(new_val), byte_permutations[reinterpret_cast<size_t>(slot) & 0x3]); old_value = atomicCAS(base_slot_address, compare_value, swap_value); } while (compare_value != old_value); return true; } #include "../Utils/ChunkIter.cpp" #include "DateTruncate.cpp" #include "ExtractFromTime.cpp" #define EXECUTE_INCLUDE #include "ArrayOps.cpp" #include "DateAdd.cpp" #include "StringFunctions.cpp" #undef EXECUTE_INCLUDE #include "../Utils/Regexp.cpp" #include "../Utils/StringLike.cpp" extern "C" __device__ uint64_t string_decode(int8_t* chunk_iter_, int64_t pos) { // TODO(alex): de-dup, the x64 version is basically identical ChunkIter* chunk_iter = reinterpret_cast<ChunkIter*>(chunk_iter_); VarlenDatum vd; bool is_end; ChunkIter_get_nth(chunk_iter, pos, false, &vd, &is_end); return vd.is_null ? 0 : (reinterpret_cast<uint64_t>(vd.pointer) & 0xffffffffffff) | (static_cast<uint64_t>(vd.length) << 48); } extern "C" __device__ void linear_probabilistic_count(uint8_t* bitmap, const uint32_t bitmap_bytes, const uint8_t* key_bytes, const uint32_t key_len) { const uint32_t bit_pos = MurmurHash1(key_bytes, key_len, 0) % (bitmap_bytes * 8); const uint32_t word_idx = bit_pos / 32; const uint32_t bit_idx = bit_pos % 32; atomicOr(((uint32_t*)bitmap) + word_idx, 1 << bit_idx); } extern "C" __device__ void agg_count_distinct_bitmap_gpu(int64_t* agg, const int64_t val, const int64_t min_val, const int64_t base_dev_addr, const int64_t base_host_addr, const uint64_t sub_bitmap_count, const uint64_t bitmap_bytes) { const uint64_t bitmap_idx = val - min_val; const uint32_t byte_idx = bitmap_idx >> 3; const uint32_t word_idx = byte_idx >> 2; const uint32_t byte_word_idx = byte_idx & 3; const int64_t host_addr = *agg; uint32_t* bitmap = (uint32_t*)(base_dev_addr + host_addr - base_host_addr + (threadIdx.x & (sub_bitmap_count - 1)) * bitmap_bytes); switch (byte_word_idx) { case 0: atomicOr(&bitmap[word_idx], 1 << (bitmap_idx & 7)); break; case 1: atomicOr(&bitmap[word_idx], 1 << ((bitmap_idx & 7) + 8)); break; case 2: atomicOr(&bitmap[word_idx], 1 << ((bitmap_idx & 7) + 16)); break; case 3: atomicOr(&bitmap[word_idx], 1 << ((bitmap_idx & 7) + 24)); break; default: break; } } extern "C" __device__ void agg_count_distinct_bitmap_skip_val_gpu( int64_t* agg, const int64_t val, const int64_t min_val, const int64_t skip_val, const int64_t base_dev_addr, const int64_t base_host_addr, const uint64_t sub_bitmap_count, const uint64_t bitmap_bytes) { if (val != skip_val) { agg_count_distinct_bitmap_gpu( agg, val, min_val, base_dev_addr, base_host_addr, sub_bitmap_count, bitmap_bytes); } } extern "C" __device__ void agg_approximate_count_distinct_gpu( int64_t* agg, const int64_t key, const uint32_t b, const int64_t base_dev_addr, const int64_t base_host_addr) { const uint64_t hash = MurmurHash64A(&key, sizeof(key), 0); const uint32_t index = hash >> (64 - b); const int32_t rank = get_rank(hash << b, 64 - b); const int64_t host_addr = *agg; int32_t* M = (int32_t*)(base_dev_addr + host_addr - base_host_addr); atomicMax(&M[index], rank); } extern "C" __device__ void force_sync() { __threadfence_block(); } extern "C" __device__ void sync_warp() { #if (CUDA_VERSION >= 9000) __syncwarp(); #endif } /** * Protected warp synchornization to make sure all (or none) threads within a warp go * through a synchronization barrier. thread_pos: the current thread position to be used * for a memory access row_count: maximum number of rows to be processed The function * performs warp sync iff all 32 threads within that warp will process valid data NOTE: it * currently assumes that warp size is 32. */ extern "C" __device__ void sync_warp_protected(int64_t thread_pos, int64_t row_count) { #if (CUDA_VERSION >= 9000) // only syncing if NOT within the same warp as those threads experiencing the critical // edge if ((((row_count - 1) | 0x1F) - thread_pos) >= 32) { __syncwarp(); } #endif }
e36da7fde619de4eea9423c2d9e43f4f7584102d.hip
// !!! This is a file automatically generated by hipify!!! #include <UnitTest++.h> #include <iostream> #include "GPUUtilityFunctions.hh" #include "MonteRayTypes.hh" SUITE( gpu_utility_functions_simple_tests ) { using namespace MonteRay; #ifdef __HIPCC__ TEST( setLaunchBounds_pos_threads_pos_nRaysPerThread ) { auto bounds = setLaunchBounds( 1, 1, 100000); CHECK_EQUAL( 3125, bounds.first ); // blocks CHECK_EQUAL( 32, bounds.second ); // threads } TEST( setLaunchBounds_neg_threads_neg_nRaysPerThread ) { auto bounds = setLaunchBounds( -1, -1, 100000); CHECK_EQUAL( 1, bounds.first ); // blocks CHECK_EQUAL( 1, bounds.second ); // threads } TEST( setLaunchBounds_neg_threads_pos_nRaysPerThread ) { auto bounds = setLaunchBounds( -1, 10, 100000); CHECK_EQUAL( 10000, bounds.first ); // blocks CHECK_EQUAL( 1, bounds.second ); // threads } TEST( setLaunchBounds_pos_threads_neg_nRaysPerThread ) { auto bounds = setLaunchBounds( 64, -10, 100000); CHECK_EQUAL( 10, bounds.first ); // blocks CHECK_EQUAL( 64, bounds.second ); // threads } TEST( setLaunchBounds_num_threads_non32multiple ) { auto bounds = setLaunchBounds( 63, -10, 100000); CHECK_EQUAL( 10, bounds.first ); // blocks CHECK_EQUAL( 64, bounds.second ); // threads } TEST( setLaunchBounds_more_threads_than_rays ) { auto bounds = setLaunchBounds( 512, 10, 100 ); CHECK_EQUAL( 1, bounds.first ); // blocks CHECK_EQUAL( 128, bounds.second ); // threads } TEST( setLaunchBounds_more_threads_than_MONTERAY_MAX_THREADS_PER_BLOCK ) { auto bounds = setLaunchBounds( MONTERAY_MAX_THREADS_PER_BLOCK+32, 10, 1000 ); CHECK_EQUAL( 1, bounds.first ); // blocks CHECK_EQUAL( MONTERAY_MAX_THREADS_PER_BLOCK, bounds.second ); // threads } TEST( setLaunchBounds_1_256_2568016 ) { auto bounds = setLaunchBounds( 256, 1, 2568016 ); CHECK_EQUAL( 390, bounds.first ); // blocks CHECK_EQUAL( 256, bounds.second ); // threads } #else TEST( setLaunchBounds_CPU ) { auto bounds = setLaunchBounds( 256, 1, 2568016 ); CHECK_EQUAL( 1, bounds.first ); // blocks CHECK_EQUAL( 1, bounds.second ); // threads bounds = setLaunchBounds( -256, -2, 2568016 ); CHECK_EQUAL( 1, bounds.first ); // blocks CHECK_EQUAL( 1, bounds.second ); // threads } #endif }
e36da7fde619de4eea9423c2d9e43f4f7584102d.cu
#include <UnitTest++.h> #include <iostream> #include "GPUUtilityFunctions.hh" #include "MonteRayTypes.hh" SUITE( gpu_utility_functions_simple_tests ) { using namespace MonteRay; #ifdef __CUDACC__ TEST( setLaunchBounds_pos_threads_pos_nRaysPerThread ) { auto bounds = setLaunchBounds( 1, 1, 100000); CHECK_EQUAL( 3125, bounds.first ); // blocks CHECK_EQUAL( 32, bounds.second ); // threads } TEST( setLaunchBounds_neg_threads_neg_nRaysPerThread ) { auto bounds = setLaunchBounds( -1, -1, 100000); CHECK_EQUAL( 1, bounds.first ); // blocks CHECK_EQUAL( 1, bounds.second ); // threads } TEST( setLaunchBounds_neg_threads_pos_nRaysPerThread ) { auto bounds = setLaunchBounds( -1, 10, 100000); CHECK_EQUAL( 10000, bounds.first ); // blocks CHECK_EQUAL( 1, bounds.second ); // threads } TEST( setLaunchBounds_pos_threads_neg_nRaysPerThread ) { auto bounds = setLaunchBounds( 64, -10, 100000); CHECK_EQUAL( 10, bounds.first ); // blocks CHECK_EQUAL( 64, bounds.second ); // threads } TEST( setLaunchBounds_num_threads_non32multiple ) { auto bounds = setLaunchBounds( 63, -10, 100000); CHECK_EQUAL( 10, bounds.first ); // blocks CHECK_EQUAL( 64, bounds.second ); // threads } TEST( setLaunchBounds_more_threads_than_rays ) { auto bounds = setLaunchBounds( 512, 10, 100 ); CHECK_EQUAL( 1, bounds.first ); // blocks CHECK_EQUAL( 128, bounds.second ); // threads } TEST( setLaunchBounds_more_threads_than_MONTERAY_MAX_THREADS_PER_BLOCK ) { auto bounds = setLaunchBounds( MONTERAY_MAX_THREADS_PER_BLOCK+32, 10, 1000 ); CHECK_EQUAL( 1, bounds.first ); // blocks CHECK_EQUAL( MONTERAY_MAX_THREADS_PER_BLOCK, bounds.second ); // threads } TEST( setLaunchBounds_1_256_2568016 ) { auto bounds = setLaunchBounds( 256, 1, 2568016 ); CHECK_EQUAL( 390, bounds.first ); // blocks CHECK_EQUAL( 256, bounds.second ); // threads } #else TEST( setLaunchBounds_CPU ) { auto bounds = setLaunchBounds( 256, 1, 2568016 ); CHECK_EQUAL( 1, bounds.first ); // blocks CHECK_EQUAL( 1, bounds.second ); // threads bounds = setLaunchBounds( -256, -2, 2568016 ); CHECK_EQUAL( 1, bounds.first ); // blocks CHECK_EQUAL( 1, bounds.second ); // threads } #endif }
18f5801d2abe6cceafe9df906ce2c8347db8f44e.hip
// !!! This is a file automatically generated by hipify!!! #include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <hiprand/hiprand_kernel.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <sys/time.h> #include "matrixMultiplyNaive.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { hipSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; float *A = NULL; hipMalloc(&A, XSIZE*YSIZE); float *B = NULL; hipMalloc(&B, XSIZE*YSIZE); float *C = NULL; hipMalloc(&C, XSIZE*YSIZE); int N = XSIZE*YSIZE; int K = 1; int M = 2; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); hipFree(0);hipLaunchKernelGGL(( matrixMultiplyNaive), dim3(gridBlock),dim3(threadBlock), 0, 0, A,B,C,N,K,M); hipDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL(( matrixMultiplyNaive), dim3(gridBlock),dim3(threadBlock), 0, 0, A,B,C,N,K,M); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL(( matrixMultiplyNaive), dim3(gridBlock),dim3(threadBlock), 0, 0, A,B,C,N,K,M); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
18f5801d2abe6cceafe9df906ce2c8347db8f44e.cu
#include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <curand_kernel.h> #include <stdlib.h> #include <cuda.h> #include <sys/time.h> #include "matrixMultiplyNaive.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { cudaSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; float *A = NULL; cudaMalloc(&A, XSIZE*YSIZE); float *B = NULL; cudaMalloc(&B, XSIZE*YSIZE); float *C = NULL; cudaMalloc(&C, XSIZE*YSIZE); int N = XSIZE*YSIZE; int K = 1; int M = 2; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); cudaFree(0); matrixMultiplyNaive<<<gridBlock,threadBlock>>>(A,B,C,N,K,M); cudaDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) { matrixMultiplyNaive<<<gridBlock,threadBlock>>>(A,B,C,N,K,M); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) { matrixMultiplyNaive<<<gridBlock,threadBlock>>>(A,B,C,N,K,M); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
27d11b32e0d2b76561dd002878fdcdd72f7d2f6e.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* -- MAGMA (version 2.2.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date November 2016 @precisions normal z -> s d c @author Azzam Haidar */ #include "magma_internal.h" #include "magma_templates.h" #define zgemv_bs 32 #define BLOCK_SIZE 512 #define use_gemm_larft extern __shared__ magmaDoubleComplex shared_data[]; /******************************************************************************/ static __device__ void zlarft_gemvcolwise_device( int m, magmaDoubleComplex *v, magmaDoubleComplex *tau, magmaDoubleComplex *c, int ldc, magmaDoubleComplex *T, int ldt, int step ) { const int thblk = blockIdx.x; if (thblk > step) return; /* if blockIdx.x < step step performs the z = V(tx:n,tx)' * V(tx:n,1:tx-1) used for computing T:*/ if ( !MAGMA_Z_EQUAL(*tau, MAGMA_Z_ZERO) ) { if (thblk < step) { const int tx = threadIdx.x; magmaDoubleComplex *dc = c + blockIdx.x * ldc; __shared__ magmaDoubleComplex sum[ BLOCK_SIZE ]; magmaDoubleComplex tmp; /* perform {T_i}^H := V(:,i)' * V(:,1:i-1) */ if (tx == 0) tmp = dc[0]; //since V[0] should be one else tmp = MAGMA_Z_ZERO; for( int j = tx+1; j < m; j += BLOCK_SIZE ) { tmp += MAGMA_Z_CONJ( v[j] ) * dc[j]; } sum[tx] = tmp; magma_sum_reduce< BLOCK_SIZE >( tx, sum ); #if defined (use_gemm_larft) *(T+thblk) = MAGMA_Z_CONJ(sum[0]); #else tmp = - MAGMA_Z_CONJ(*tau) * sum[0]; *(T+thblk) = MAGMA_Z_CONJ(tmp); // T = - tau(tx) * V(tx:n,1:tx-1)' * V(tx:n,tx) = tmp' //*(T+thblk) = - MAGMA_Z_CONJ(sum[0]) * (*tau); // T = - tau(tx) * V(tx:n,1:tx-1)' * V(tx:n,tx) = tmp' #endif } else { #if defined (use_gemm_larft) *(T+thblk) = MAGMA_Z_ONE; #else *(T+thblk) = *tau; #endif } }// in case tau is zero put the corresponding column of T to zero else { *(T+thblk) = MAGMA_Z_ZERO; } } /******************************************************************************/ __global__ void zlarft_gemvcolwise_kernel( int m, magmaDoubleComplex *v, int ldv, magmaDoubleComplex *tau, magmaDoubleComplex *T, int ldt, int step ) { zlarft_gemvcolwise_device(m, v+step+step*ldv, tau+step, v+step, ldv, T+step*ldt, ldt, step); } /******************************************************************************/ __global__ void zlarft_gemvcolwise_kernel_batched( int m, magmaDoubleComplex **v_array, int ldv, magmaDoubleComplex **tau_array, magmaDoubleComplex **T_array, int ldt, int step ) { int batchid = blockIdx.z; zlarft_gemvcolwise_device(m, v_array[batchid]+step+step*ldv, tau_array[batchid]+step, v_array[batchid]+step, ldv, T_array[batchid]+step*ldt, ldt, step); } /******************************************************************************/ extern "C" void magmablas_zlarft_gemvcolwise( magma_int_t m, magma_int_t step, magmaDoubleComplex *v, magma_int_t ldv, magmaDoubleComplex *T, magma_int_t ldt, magmaDoubleComplex *tau, magma_queue_t queue ) { dim3 grid( step+1, 1, 1 ); dim3 threads( BLOCK_SIZE ); hipLaunchKernelGGL(( zlarft_gemvcolwise_kernel) , dim3(grid), dim3(threads), 0, queue->cuda_stream() , m, v, ldv, tau, T, ldt, step); } /******************************************************************************/ extern "C" void magmablas_zlarft_gemvcolwise_batched( magma_int_t m, magma_int_t step, magmaDoubleComplex **v_array, magma_int_t ldv, magmaDoubleComplex **T_array, magma_int_t ldt, magmaDoubleComplex **tau_array, magma_int_t batchCount, magma_queue_t queue ) { dim3 grid( step+1, 1, batchCount ); dim3 threads( BLOCK_SIZE ); hipLaunchKernelGGL(( zlarft_gemvcolwise_kernel_batched) , dim3(grid), dim3(threads), 0, queue->cuda_stream() , m, v_array, ldv, tau_array, T_array, ldt, step); } /******************************************************************************/ // zgemv(y=alpha*A*x) interface: T/W=tau*v*x, static __device__ void zlarft_gemvrowwise_device( int m, int i, magmaDoubleComplex *tau, magmaDoubleComplex *v_ptr, int ldv, magmaDoubleComplex *x_ptr, int incx, magmaDoubleComplex *T_ptr, int ldt, magmaDoubleComplex *W, magmaDoubleComplex* sdata) { int tx = threadIdx.x; int ty = threadIdx.y; if (tx == 0 && ty == 0) { T_ptr[0] = *tau; } if (i <= 0) return; magmaDoubleComplex res = MAGMA_Z_ZERO; v_ptr += ldv * ty; if (tx < zgemv_bs) { for (int s=tx; s < m; s += zgemv_bs) { res += MAGMA_Z_CONJ (v_ptr[s]) * x_ptr[s*incx]; } sdata[ty * zgemv_bs + tx] = res; } __syncthreads(); magma_sum_reduce<zgemv_bs>(tx, &(sdata[ty*zgemv_bs+0])); #if defined (use_gemm_larft) if (tx == 0) { W[ty] = -sdata[ty * zgemv_bs + 0]; } #else if (tx == 0) { W[ty] = -sdata[ty * zgemv_bs + 0] * (*tau); } #endif } /******************************************************************************/ // T(1:i-1,i) := - tau(i) * V(i:n,1:i-1)' * V(i:n,i) // T(i,i) = tau(i) __global__ void zlarft_gemvrowwise_kernel( int m, int i, magmaDoubleComplex *tau, magmaDoubleComplex *v, int ldv, magmaDoubleComplex *T, int ldt) { magmaDoubleComplex *W = T +i*ldt; magmaDoubleComplex *sdata = (magmaDoubleComplex*)shared_data; zlarft_gemvrowwise_device(m, i, tau+i, v+i, ldv, v+i+i*ldv, 1, T+i+i*ldt, ldt, W, sdata); } /******************************************************************************/ __global__ void zlarft_gemvrowwise_kernel_batched( int m, int i, magmaDoubleComplex **tau_array, magmaDoubleComplex **v_array, int ldv, magmaDoubleComplex **T_array, int ldt) { int batchid = blockIdx.z; magmaDoubleComplex *W = T_array[batchid] +i*ldt; magmaDoubleComplex *sdata = (magmaDoubleComplex*)shared_data; zlarft_gemvrowwise_device(m, i, tau_array[batchid]+i, v_array[batchid]+i, ldv, v_array[batchid]+i+i*ldv, 1, T_array[batchid] +i+i*ldt, ldt, W, sdata); } /******************************************************************************/ extern "C" void magmablas_zlarft_gemvrowwise( magma_int_t m, magma_int_t i, magmaDoubleComplex *tau, magmaDoubleComplex *v, magma_int_t ldv, magmaDoubleComplex *T, magma_int_t ldt, magmaDoubleComplex *W, magma_queue_t queue ) { dim3 grid(1); dim3 threads(zgemv_bs, max(i,1), 1); size_t shmem = sizeof(magmaDoubleComplex)*zgemv_bs*(i+1); hipLaunchKernelGGL(( zlarft_gemvrowwise_kernel) , dim3(grid), dim3(threads), shmem, queue->cuda_stream() , m, i, tau, v, ldv, T, ldt); } /******************************************************************************/ extern "C" void magmablas_zlarft_gemvrowwise_batched( magma_int_t m, magma_int_t i, magmaDoubleComplex **tau_array, magmaDoubleComplex **v_array, magma_int_t ldv, magmaDoubleComplex **T_array, magma_int_t ldt, magma_int_t batchCount, magma_queue_t queue) { dim3 grid(1, 1, batchCount); dim3 threads(zgemv_bs, max(i,1), 1); size_t shmem = sizeof(magmaDoubleComplex)*zgemv_bs*(i+1); /* zgemvrowwise used a bigger shared memory and has more data reuse and performs better */ hipLaunchKernelGGL(( zlarft_gemvrowwise_kernel_batched) , dim3(grid), dim3(threads), shmem, queue->cuda_stream() , m, i, tau_array, v_array, ldv, T_array, ldt); } /******************************************************************************/ /* loop_inside */ static __device__ void zlarft_gemv_loop_inside_device( int n, int k, magmaDoubleComplex *tau, magmaDoubleComplex *v, int ldv, magmaDoubleComplex *T, int ldt) { int tx = threadIdx.x; int ty = threadIdx.y; int incx = 1; magmaDoubleComplex *sdata = (magmaDoubleComplex*)shared_data; magmaDoubleComplex res; // write the first elment if (tx == 0 && ty == 0) { T[0] = tau[0]; } for (int i=1; i < k; i++) { int m = n-i; magmaDoubleComplex *v_ptr = v; v_ptr += i; magmaDoubleComplex *x_ptr = v_ptr + i * ldv; res = MAGMA_Z_ZERO; if (tx < zgemv_bs && ty < i) { v_ptr += ldv * ty; for (int s=tx; s < m; s += zgemv_bs) { res += MAGMA_Z_CONJ (v_ptr[s]) * x_ptr[s*incx]; } sdata[ty * zgemv_bs + tx] = res; } __syncthreads(); magma_sum_reduce<zgemv_bs>(tx, &(sdata[ty*zgemv_bs+0])); __syncthreads(); #if defined (use_gemm_larft) if (tx < i && ty == 0) { T[i* ldt + tx] = sdata[tx * zgemv_bs + 0]; } // not needed since it is overwritten in trmv /* if (tx == i && ty == 0) { T[i * ldt + i] = tau[i]; } */ #else if (tx < i && ty == 0) { T[i* ldt + tx] = -sdata[tx * zgemv_bs + 0] * (tau[i]); } if (tx == i && ty == 0) { T[i * ldt + i] = tau[i]; } #endif v_ptr -= i; } // end of loop k } /******************************************************************************/ __global__ void zlarft_gemv_loop_inside_kernel( int n, int k, magmaDoubleComplex *tau, magmaDoubleComplex *v, int ldv, magmaDoubleComplex *T, int ldt) { zlarft_gemv_loop_inside_device(n, k, tau, v, ldv, T, ldt); } /******************************************************************************/ __global__ void zlarft_gemv_loop_inside_kernel_batched( int n, int k, magmaDoubleComplex **tau_array, magmaDoubleComplex **v_array, int ldv, magmaDoubleComplex **T_array, int ldt) { int batchid = blockIdx.z; zlarft_gemv_loop_inside_device(n, k, tau_array[batchid], v_array[batchid], ldv, T_array[batchid], ldt); } /******************************************************************************/ extern "C" void magmablas_zlarft_gemv_loop_inside( magma_int_t n, magma_int_t k, magmaDoubleComplex *tau, magmaDoubleComplex *v, magma_int_t ldv, magmaDoubleComplex *T, magma_int_t ldt, magma_queue_t queue ) { dim3 grid(1); dim3 threads(zgemv_bs, max(k,1), 1); size_t shmem = sizeof(magmaDoubleComplex) * (zgemv_bs*(k+1)); hipLaunchKernelGGL(( zlarft_gemv_loop_inside_kernel) , dim3(grid), dim3(threads), shmem, queue->cuda_stream() , n, k, tau, v, ldv, T, ldt); } /******************************************************************************/ extern "C" void magmablas_zlarft_gemv_loop_inside_batched( magma_int_t n, magma_int_t k, magmaDoubleComplex **tau_array, magmaDoubleComplex **v_array, magma_int_t ldv, magmaDoubleComplex **T_array, magma_int_t ldt, magma_int_t batchCount, magma_queue_t queue) { dim3 grid(1, 1, batchCount); dim3 threads(zgemv_bs, max(k,1), 1); size_t shmem = sizeof(magmaDoubleComplex) * (zgemv_bs*(k+1)); hipLaunchKernelGGL(( zlarft_gemv_loop_inside_kernel_batched) , dim3(grid), dim3(threads), shmem, queue->cuda_stream() , n, k, tau_array, v_array, ldv, T_array, ldt); } /******************************************************************************/ static __device__ void zlarft_ztrmv_sm32x32_device( int n, int k, magmaDoubleComplex *tau, magmaDoubleComplex *Tin, int ldtin, magmaDoubleComplex *Tout, int ldtout ) { int tx = threadIdx.x; magmaDoubleComplex *sdata = (magmaDoubleComplex*)shared_data; magmaDoubleComplex res; // this routine apply a sequence of trmv to update k column of the triangular // T starting at n-k to n where T is of size n by n and where the first n-k // columns of T are supposed updated previously. // So the routine load all of T nxn to the shared memory // and apply the sequence of trmv. // to update a certain column i, threads go in horizontal fashion where // every thread read one row and do it gemv(dot) to generate // one element of the column of T then move to the next column // read T into shared for (int s=0; s < n-k; s++) { sdata[tx + s*n] = Tin[tx + s * ldtin]; } #if defined(use_gemm_larft) for (int s=n-k; s < n; s++) { if (tx == s) sdata[tx + s*n] = tau[s]; else sdata[tx + s*n] = -tau[s] * Tin[tx + s * ldtin]; } #else for (int s=n-k; s < n; s++) { sdata[tx + s*n] = Tin[tx + s * ldtin]; } #endif // perform trmv for (int i=n-k; i < n; i++) { __syncthreads(); res = MAGMA_Z_ZERO; if (tx < i) { for (int j=tx; j < i; j++) { res += sdata[tx + j * n] * sdata[j+ i * n]; } } __syncthreads(); if (tx < i) { sdata[tx + i * n] = res; } } __syncthreads(); // write back the updated block of k column of T for (int s=n-k; s < n; s++) { Tout[tx + s * ldtout] = sdata[tx + s*n]; } } /******************************************************************************/ __global__ void zlarft_ztrmv_sm32x32_kernel( int n, int k, magmaDoubleComplex *tau, magmaDoubleComplex *Tin, int ldtin, magmaDoubleComplex *Tout, int ldtout ) { zlarft_ztrmv_sm32x32_device( n, k, tau, Tin, ldtin, Tout, ldtout); } /******************************************************************************/ __global__ void zlarft_ztrmv_sm32x32_kernel_batched( int n, int k, magmaDoubleComplex **tau_array, magmaDoubleComplex **Tin_array, int ldtin, magmaDoubleComplex **Tout_array, int ldtout ) { int batchId = blockIdx.z; zlarft_ztrmv_sm32x32_device( n, k, tau_array[batchId], Tin_array[batchId], ldtin, Tout_array[batchId], ldtout); } /******************************************************************************/ extern "C" void magmablas_zlarft_ztrmv_sm32x32( magma_int_t m, magma_int_t n, magmaDoubleComplex *tau, magmaDoubleComplex *Tin, magma_int_t ldtin, magmaDoubleComplex *Tout, magma_int_t ldtout, magma_queue_t queue ) { dim3 grid(1); dim3 threads(max(m,1), 1, 1); size_t shmem = sizeof(magmaDoubleComplex)*(m*m); hipLaunchKernelGGL(( zlarft_ztrmv_sm32x32_kernel) , dim3(grid), dim3(threads), shmem, queue->cuda_stream() , m, n, tau, Tin, ldtin, Tout, ldtout); } /******************************************************************************/ extern "C" void magmablas_zlarft_ztrmv_sm32x32_batched( magma_int_t m, magma_int_t n, magmaDoubleComplex **tau_array, magmaDoubleComplex **Tin_array, magma_int_t ldtin, magmaDoubleComplex **Tout_array, magma_int_t ldtout, magma_int_t batchCount, magma_queue_t queue) { dim3 grid(1, 1, batchCount); dim3 threads(max(m,1), 1, 1); size_t shmem = sizeof(magmaDoubleComplex)*(m*m); hipLaunchKernelGGL(( zlarft_ztrmv_sm32x32_kernel_batched) , dim3(grid), dim3(threads), shmem, queue->cuda_stream() , m, n, tau_array, Tin_array, ldtin, Tout_array, ldtout); } /******************************************************************************/ static __device__ void zlarft_recztrmv_sm32x32_device( int m, int n, magmaDoubleComplex *tau, magmaDoubleComplex *Trec, int ldtrec, magmaDoubleComplex *Ttri, int ldttri) { int tx = threadIdx.x; magmaDoubleComplex *sdata = (magmaDoubleComplex*)shared_data; magmaDoubleComplex res; // to update a certain column i, threads go in horizontal fashion where // every thread read one row and do it gemv(dot) to generate // one element of the column of T then move to the next column // read T into shared for (int s=0; s < n; s++) { sdata[tx + s*n] = Trec[tx + s * ldtrec]; } __syncthreads(); // perform sequence of n-1 gemv for (int i=0; i < n; i++) { res = MAGMA_Z_ZERO; for (int j=0; j < i; j++) { res += sdata[tx + j * n] * Ttri[j+ i * ldttri]; } __syncthreads(); // a enlever sdata[tx + i * n] = -tau[i] * (sdata[tx + i * n] + res); __syncthreads(); } // write back the updated block of k column of T multiplying by -tau for (int s=0; s < n; s++) { Trec[tx + s * ldtrec] = sdata[tx + s*n]; } } /******************************************************************************/ __global__ void zlarft_recztrmv_sm32x32_kernel( int m, int n, magmaDoubleComplex *tau, magmaDoubleComplex *Trec, int ldtrec, magmaDoubleComplex *Ttri, int ldttri) { zlarft_recztrmv_sm32x32_device(m, n, tau, Trec, ldtrec, Ttri, ldttri); } /******************************************************************************/ __global__ void zlarft_recztrmv_sm32x32_kernel_batched( int m, int n, magmaDoubleComplex **tau_array, magmaDoubleComplex **Trec_array, int ldtrec, magmaDoubleComplex **Ttri_array, int ldttri) { int batchId = blockIdx.z; zlarft_recztrmv_sm32x32_device(m, n, tau_array[batchId], Trec_array[batchId], ldtrec, Ttri_array[batchId], ldttri); } /******************************************************************************/ extern "C" void magmablas_zlarft_recztrmv_sm32x32( magma_int_t m, magma_int_t n, magmaDoubleComplex *tau, magmaDoubleComplex *Trec, magma_int_t ldtrec, magmaDoubleComplex *Ttri, magma_int_t ldttri, magma_queue_t queue ) { dim3 grid(1); dim3 threads(max(m,1), 1, 1); size_t shmem = sizeof(magmaDoubleComplex)*(m*n); hipLaunchKernelGGL(( zlarft_recztrmv_sm32x32_kernel) , dim3(grid), dim3(threads), shmem, queue->cuda_stream() , m, n, tau, Trec, ldtrec, Ttri, ldttri); } /******************************************************************************/ extern "C" void magmablas_zlarft_recztrmv_sm32x32_batched( magma_int_t m, magma_int_t n, magmaDoubleComplex **tau_array, magmaDoubleComplex **Trec_array, magma_int_t ldtrec, magmaDoubleComplex **Ttri_array, magma_int_t ldttri, magma_int_t batchCount, magma_queue_t queue) { dim3 grid(1, 1, batchCount); dim3 threads(max(m,1), 1, 1); size_t shmem = sizeof(magmaDoubleComplex)*(m*n); hipLaunchKernelGGL(( zlarft_recztrmv_sm32x32_kernel_batched) , dim3(grid), dim3(threads), shmem, queue->cuda_stream() , m, n, tau_array, Trec_array, ldtrec, Ttri_array, ldttri); }
27d11b32e0d2b76561dd002878fdcdd72f7d2f6e.cu
/* -- MAGMA (version 2.2.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date November 2016 @precisions normal z -> s d c @author Azzam Haidar */ #include "magma_internal.h" #include "magma_templates.h" #define zgemv_bs 32 #define BLOCK_SIZE 512 #define use_gemm_larft extern __shared__ magmaDoubleComplex shared_data[]; /******************************************************************************/ static __device__ void zlarft_gemvcolwise_device( int m, magmaDoubleComplex *v, magmaDoubleComplex *tau, magmaDoubleComplex *c, int ldc, magmaDoubleComplex *T, int ldt, int step ) { const int thblk = blockIdx.x; if (thblk > step) return; /* if blockIdx.x < step step performs the z = V(tx:n,tx)' * V(tx:n,1:tx-1) used for computing T:*/ if ( !MAGMA_Z_EQUAL(*tau, MAGMA_Z_ZERO) ) { if (thblk < step) { const int tx = threadIdx.x; magmaDoubleComplex *dc = c + blockIdx.x * ldc; __shared__ magmaDoubleComplex sum[ BLOCK_SIZE ]; magmaDoubleComplex tmp; /* perform {T_i}^H := V(:,i)' * V(:,1:i-1) */ if (tx == 0) tmp = dc[0]; //since V[0] should be one else tmp = MAGMA_Z_ZERO; for( int j = tx+1; j < m; j += BLOCK_SIZE ) { tmp += MAGMA_Z_CONJ( v[j] ) * dc[j]; } sum[tx] = tmp; magma_sum_reduce< BLOCK_SIZE >( tx, sum ); #if defined (use_gemm_larft) *(T+thblk) = MAGMA_Z_CONJ(sum[0]); #else tmp = - MAGMA_Z_CONJ(*tau) * sum[0]; *(T+thblk) = MAGMA_Z_CONJ(tmp); // T = - tau(tx) * V(tx:n,1:tx-1)' * V(tx:n,tx) = tmp' //*(T+thblk) = - MAGMA_Z_CONJ(sum[0]) * (*tau); // T = - tau(tx) * V(tx:n,1:tx-1)' * V(tx:n,tx) = tmp' #endif } else { #if defined (use_gemm_larft) *(T+thblk) = MAGMA_Z_ONE; #else *(T+thblk) = *tau; #endif } }// in case tau is zero put the corresponding column of T to zero else { *(T+thblk) = MAGMA_Z_ZERO; } } /******************************************************************************/ __global__ void zlarft_gemvcolwise_kernel( int m, magmaDoubleComplex *v, int ldv, magmaDoubleComplex *tau, magmaDoubleComplex *T, int ldt, int step ) { zlarft_gemvcolwise_device(m, v+step+step*ldv, tau+step, v+step, ldv, T+step*ldt, ldt, step); } /******************************************************************************/ __global__ void zlarft_gemvcolwise_kernel_batched( int m, magmaDoubleComplex **v_array, int ldv, magmaDoubleComplex **tau_array, magmaDoubleComplex **T_array, int ldt, int step ) { int batchid = blockIdx.z; zlarft_gemvcolwise_device(m, v_array[batchid]+step+step*ldv, tau_array[batchid]+step, v_array[batchid]+step, ldv, T_array[batchid]+step*ldt, ldt, step); } /******************************************************************************/ extern "C" void magmablas_zlarft_gemvcolwise( magma_int_t m, magma_int_t step, magmaDoubleComplex *v, magma_int_t ldv, magmaDoubleComplex *T, magma_int_t ldt, magmaDoubleComplex *tau, magma_queue_t queue ) { dim3 grid( step+1, 1, 1 ); dim3 threads( BLOCK_SIZE ); zlarft_gemvcolwise_kernel <<< grid, threads, 0, queue->cuda_stream() >>> ( m, v, ldv, tau, T, ldt, step); } /******************************************************************************/ extern "C" void magmablas_zlarft_gemvcolwise_batched( magma_int_t m, magma_int_t step, magmaDoubleComplex **v_array, magma_int_t ldv, magmaDoubleComplex **T_array, magma_int_t ldt, magmaDoubleComplex **tau_array, magma_int_t batchCount, magma_queue_t queue ) { dim3 grid( step+1, 1, batchCount ); dim3 threads( BLOCK_SIZE ); zlarft_gemvcolwise_kernel_batched <<< grid, threads, 0, queue->cuda_stream() >>> ( m, v_array, ldv, tau_array, T_array, ldt, step); } /******************************************************************************/ // zgemv(y=alpha*A*x) interface: T/W=tau*v*x, static __device__ void zlarft_gemvrowwise_device( int m, int i, magmaDoubleComplex *tau, magmaDoubleComplex *v_ptr, int ldv, magmaDoubleComplex *x_ptr, int incx, magmaDoubleComplex *T_ptr, int ldt, magmaDoubleComplex *W, magmaDoubleComplex* sdata) { int tx = threadIdx.x; int ty = threadIdx.y; if (tx == 0 && ty == 0) { T_ptr[0] = *tau; } if (i <= 0) return; magmaDoubleComplex res = MAGMA_Z_ZERO; v_ptr += ldv * ty; if (tx < zgemv_bs) { for (int s=tx; s < m; s += zgemv_bs) { res += MAGMA_Z_CONJ (v_ptr[s]) * x_ptr[s*incx]; } sdata[ty * zgemv_bs + tx] = res; } __syncthreads(); magma_sum_reduce<zgemv_bs>(tx, &(sdata[ty*zgemv_bs+0])); #if defined (use_gemm_larft) if (tx == 0) { W[ty] = -sdata[ty * zgemv_bs + 0]; } #else if (tx == 0) { W[ty] = -sdata[ty * zgemv_bs + 0] * (*tau); } #endif } /******************************************************************************/ // T(1:i-1,i) := - tau(i) * V(i:n,1:i-1)' * V(i:n,i) // T(i,i) = tau(i) __global__ void zlarft_gemvrowwise_kernel( int m, int i, magmaDoubleComplex *tau, magmaDoubleComplex *v, int ldv, magmaDoubleComplex *T, int ldt) { magmaDoubleComplex *W = T +i*ldt; magmaDoubleComplex *sdata = (magmaDoubleComplex*)shared_data; zlarft_gemvrowwise_device(m, i, tau+i, v+i, ldv, v+i+i*ldv, 1, T+i+i*ldt, ldt, W, sdata); } /******************************************************************************/ __global__ void zlarft_gemvrowwise_kernel_batched( int m, int i, magmaDoubleComplex **tau_array, magmaDoubleComplex **v_array, int ldv, magmaDoubleComplex **T_array, int ldt) { int batchid = blockIdx.z; magmaDoubleComplex *W = T_array[batchid] +i*ldt; magmaDoubleComplex *sdata = (magmaDoubleComplex*)shared_data; zlarft_gemvrowwise_device(m, i, tau_array[batchid]+i, v_array[batchid]+i, ldv, v_array[batchid]+i+i*ldv, 1, T_array[batchid] +i+i*ldt, ldt, W, sdata); } /******************************************************************************/ extern "C" void magmablas_zlarft_gemvrowwise( magma_int_t m, magma_int_t i, magmaDoubleComplex *tau, magmaDoubleComplex *v, magma_int_t ldv, magmaDoubleComplex *T, magma_int_t ldt, magmaDoubleComplex *W, magma_queue_t queue ) { dim3 grid(1); dim3 threads(zgemv_bs, max(i,1), 1); size_t shmem = sizeof(magmaDoubleComplex)*zgemv_bs*(i+1); zlarft_gemvrowwise_kernel <<< grid, threads, shmem, queue->cuda_stream() >>> (m, i, tau, v, ldv, T, ldt); } /******************************************************************************/ extern "C" void magmablas_zlarft_gemvrowwise_batched( magma_int_t m, magma_int_t i, magmaDoubleComplex **tau_array, magmaDoubleComplex **v_array, magma_int_t ldv, magmaDoubleComplex **T_array, magma_int_t ldt, magma_int_t batchCount, magma_queue_t queue) { dim3 grid(1, 1, batchCount); dim3 threads(zgemv_bs, max(i,1), 1); size_t shmem = sizeof(magmaDoubleComplex)*zgemv_bs*(i+1); /* zgemvrowwise used a bigger shared memory and has more data reuse and performs better */ zlarft_gemvrowwise_kernel_batched <<< grid, threads, shmem, queue->cuda_stream() >>> (m, i, tau_array, v_array, ldv, T_array, ldt); } /******************************************************************************/ /* loop_inside */ static __device__ void zlarft_gemv_loop_inside_device( int n, int k, magmaDoubleComplex *tau, magmaDoubleComplex *v, int ldv, magmaDoubleComplex *T, int ldt) { int tx = threadIdx.x; int ty = threadIdx.y; int incx = 1; magmaDoubleComplex *sdata = (magmaDoubleComplex*)shared_data; magmaDoubleComplex res; // write the first elment if (tx == 0 && ty == 0) { T[0] = tau[0]; } for (int i=1; i < k; i++) { int m = n-i; magmaDoubleComplex *v_ptr = v; v_ptr += i; magmaDoubleComplex *x_ptr = v_ptr + i * ldv; res = MAGMA_Z_ZERO; if (tx < zgemv_bs && ty < i) { v_ptr += ldv * ty; for (int s=tx; s < m; s += zgemv_bs) { res += MAGMA_Z_CONJ (v_ptr[s]) * x_ptr[s*incx]; } sdata[ty * zgemv_bs + tx] = res; } __syncthreads(); magma_sum_reduce<zgemv_bs>(tx, &(sdata[ty*zgemv_bs+0])); __syncthreads(); #if defined (use_gemm_larft) if (tx < i && ty == 0) { T[i* ldt + tx] = sdata[tx * zgemv_bs + 0]; } // not needed since it is overwritten in trmv /* if (tx == i && ty == 0) { T[i * ldt + i] = tau[i]; } */ #else if (tx < i && ty == 0) { T[i* ldt + tx] = -sdata[tx * zgemv_bs + 0] * (tau[i]); } if (tx == i && ty == 0) { T[i * ldt + i] = tau[i]; } #endif v_ptr -= i; } // end of loop k } /******************************************************************************/ __global__ void zlarft_gemv_loop_inside_kernel( int n, int k, magmaDoubleComplex *tau, magmaDoubleComplex *v, int ldv, magmaDoubleComplex *T, int ldt) { zlarft_gemv_loop_inside_device(n, k, tau, v, ldv, T, ldt); } /******************************************************************************/ __global__ void zlarft_gemv_loop_inside_kernel_batched( int n, int k, magmaDoubleComplex **tau_array, magmaDoubleComplex **v_array, int ldv, magmaDoubleComplex **T_array, int ldt) { int batchid = blockIdx.z; zlarft_gemv_loop_inside_device(n, k, tau_array[batchid], v_array[batchid], ldv, T_array[batchid], ldt); } /******************************************************************************/ extern "C" void magmablas_zlarft_gemv_loop_inside( magma_int_t n, magma_int_t k, magmaDoubleComplex *tau, magmaDoubleComplex *v, magma_int_t ldv, magmaDoubleComplex *T, magma_int_t ldt, magma_queue_t queue ) { dim3 grid(1); dim3 threads(zgemv_bs, max(k,1), 1); size_t shmem = sizeof(magmaDoubleComplex) * (zgemv_bs*(k+1)); zlarft_gemv_loop_inside_kernel <<< grid, threads, shmem, queue->cuda_stream() >>> (n, k, tau, v, ldv, T, ldt); } /******************************************************************************/ extern "C" void magmablas_zlarft_gemv_loop_inside_batched( magma_int_t n, magma_int_t k, magmaDoubleComplex **tau_array, magmaDoubleComplex **v_array, magma_int_t ldv, magmaDoubleComplex **T_array, magma_int_t ldt, magma_int_t batchCount, magma_queue_t queue) { dim3 grid(1, 1, batchCount); dim3 threads(zgemv_bs, max(k,1), 1); size_t shmem = sizeof(magmaDoubleComplex) * (zgemv_bs*(k+1)); zlarft_gemv_loop_inside_kernel_batched <<< grid, threads, shmem, queue->cuda_stream() >>> (n, k, tau_array, v_array, ldv, T_array, ldt); } /******************************************************************************/ static __device__ void zlarft_ztrmv_sm32x32_device( int n, int k, magmaDoubleComplex *tau, magmaDoubleComplex *Tin, int ldtin, magmaDoubleComplex *Tout, int ldtout ) { int tx = threadIdx.x; magmaDoubleComplex *sdata = (magmaDoubleComplex*)shared_data; magmaDoubleComplex res; // this routine apply a sequence of trmv to update k column of the triangular // T starting at n-k to n where T is of size n by n and where the first n-k // columns of T are supposed updated previously. // So the routine load all of T nxn to the shared memory // and apply the sequence of trmv. // to update a certain column i, threads go in horizontal fashion where // every thread read one row and do it gemv(dot) to generate // one element of the column of T then move to the next column // read T into shared for (int s=0; s < n-k; s++) { sdata[tx + s*n] = Tin[tx + s * ldtin]; } #if defined(use_gemm_larft) for (int s=n-k; s < n; s++) { if (tx == s) sdata[tx + s*n] = tau[s]; else sdata[tx + s*n] = -tau[s] * Tin[tx + s * ldtin]; } #else for (int s=n-k; s < n; s++) { sdata[tx + s*n] = Tin[tx + s * ldtin]; } #endif // perform trmv for (int i=n-k; i < n; i++) { __syncthreads(); res = MAGMA_Z_ZERO; if (tx < i) { for (int j=tx; j < i; j++) { res += sdata[tx + j * n] * sdata[j+ i * n]; } } __syncthreads(); if (tx < i) { sdata[tx + i * n] = res; } } __syncthreads(); // write back the updated block of k column of T for (int s=n-k; s < n; s++) { Tout[tx + s * ldtout] = sdata[tx + s*n]; } } /******************************************************************************/ __global__ void zlarft_ztrmv_sm32x32_kernel( int n, int k, magmaDoubleComplex *tau, magmaDoubleComplex *Tin, int ldtin, magmaDoubleComplex *Tout, int ldtout ) { zlarft_ztrmv_sm32x32_device( n, k, tau, Tin, ldtin, Tout, ldtout); } /******************************************************************************/ __global__ void zlarft_ztrmv_sm32x32_kernel_batched( int n, int k, magmaDoubleComplex **tau_array, magmaDoubleComplex **Tin_array, int ldtin, magmaDoubleComplex **Tout_array, int ldtout ) { int batchId = blockIdx.z; zlarft_ztrmv_sm32x32_device( n, k, tau_array[batchId], Tin_array[batchId], ldtin, Tout_array[batchId], ldtout); } /******************************************************************************/ extern "C" void magmablas_zlarft_ztrmv_sm32x32( magma_int_t m, magma_int_t n, magmaDoubleComplex *tau, magmaDoubleComplex *Tin, magma_int_t ldtin, magmaDoubleComplex *Tout, magma_int_t ldtout, magma_queue_t queue ) { dim3 grid(1); dim3 threads(max(m,1), 1, 1); size_t shmem = sizeof(magmaDoubleComplex)*(m*m); zlarft_ztrmv_sm32x32_kernel <<< grid, threads, shmem, queue->cuda_stream() >>> (m, n, tau, Tin, ldtin, Tout, ldtout); } /******************************************************************************/ extern "C" void magmablas_zlarft_ztrmv_sm32x32_batched( magma_int_t m, magma_int_t n, magmaDoubleComplex **tau_array, magmaDoubleComplex **Tin_array, magma_int_t ldtin, magmaDoubleComplex **Tout_array, magma_int_t ldtout, magma_int_t batchCount, magma_queue_t queue) { dim3 grid(1, 1, batchCount); dim3 threads(max(m,1), 1, 1); size_t shmem = sizeof(magmaDoubleComplex)*(m*m); zlarft_ztrmv_sm32x32_kernel_batched <<< grid, threads, shmem, queue->cuda_stream() >>> (m, n, tau_array, Tin_array, ldtin, Tout_array, ldtout); } /******************************************************************************/ static __device__ void zlarft_recztrmv_sm32x32_device( int m, int n, magmaDoubleComplex *tau, magmaDoubleComplex *Trec, int ldtrec, magmaDoubleComplex *Ttri, int ldttri) { int tx = threadIdx.x; magmaDoubleComplex *sdata = (magmaDoubleComplex*)shared_data; magmaDoubleComplex res; // to update a certain column i, threads go in horizontal fashion where // every thread read one row and do it gemv(dot) to generate // one element of the column of T then move to the next column // read T into shared for (int s=0; s < n; s++) { sdata[tx + s*n] = Trec[tx + s * ldtrec]; } __syncthreads(); // perform sequence of n-1 gemv for (int i=0; i < n; i++) { res = MAGMA_Z_ZERO; for (int j=0; j < i; j++) { res += sdata[tx + j * n] * Ttri[j+ i * ldttri]; } __syncthreads(); // a enlever sdata[tx + i * n] = -tau[i] * (sdata[tx + i * n] + res); __syncthreads(); } // write back the updated block of k column of T multiplying by -tau for (int s=0; s < n; s++) { Trec[tx + s * ldtrec] = sdata[tx + s*n]; } } /******************************************************************************/ __global__ void zlarft_recztrmv_sm32x32_kernel( int m, int n, magmaDoubleComplex *tau, magmaDoubleComplex *Trec, int ldtrec, magmaDoubleComplex *Ttri, int ldttri) { zlarft_recztrmv_sm32x32_device(m, n, tau, Trec, ldtrec, Ttri, ldttri); } /******************************************************************************/ __global__ void zlarft_recztrmv_sm32x32_kernel_batched( int m, int n, magmaDoubleComplex **tau_array, magmaDoubleComplex **Trec_array, int ldtrec, magmaDoubleComplex **Ttri_array, int ldttri) { int batchId = blockIdx.z; zlarft_recztrmv_sm32x32_device(m, n, tau_array[batchId], Trec_array[batchId], ldtrec, Ttri_array[batchId], ldttri); } /******************************************************************************/ extern "C" void magmablas_zlarft_recztrmv_sm32x32( magma_int_t m, magma_int_t n, magmaDoubleComplex *tau, magmaDoubleComplex *Trec, magma_int_t ldtrec, magmaDoubleComplex *Ttri, magma_int_t ldttri, magma_queue_t queue ) { dim3 grid(1); dim3 threads(max(m,1), 1, 1); size_t shmem = sizeof(magmaDoubleComplex)*(m*n); zlarft_recztrmv_sm32x32_kernel <<< grid, threads, shmem, queue->cuda_stream() >>> (m, n, tau, Trec, ldtrec, Ttri, ldttri); } /******************************************************************************/ extern "C" void magmablas_zlarft_recztrmv_sm32x32_batched( magma_int_t m, magma_int_t n, magmaDoubleComplex **tau_array, magmaDoubleComplex **Trec_array, magma_int_t ldtrec, magmaDoubleComplex **Ttri_array, magma_int_t ldttri, magma_int_t batchCount, magma_queue_t queue) { dim3 grid(1, 1, batchCount); dim3 threads(max(m,1), 1, 1); size_t shmem = sizeof(magmaDoubleComplex)*(m*n); zlarft_recztrmv_sm32x32_kernel_batched <<< grid, threads, shmem, queue->cuda_stream() >>> (m, n, tau_array, Trec_array, ldtrec, Ttri_array, ldttri); }
4e98f9b44cb4e8d547f1776075ea4f8f46106318.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /***************************************** Emitting C Generated Code *******************************************/ #ifndef _GNU_SOURCE #define _GNU_SOURCE #endif #include "cudnn_header.h" #include "nccl_header.h" #include <string.h> #include <cblas.h> #include <stdlib.h> #include "cuda_header.h" #include <stdio.h> #include <stdint.h> #include "cublas_header.h" #include <stdbool.h> #include "mpi_header.h" #include "scanner_header.h" /************* Functions **************/ __global__ void x10(float* x11, float x12, int x13) { // begin generating kernel function for FILL of type Float int x14 = gridDim.x * blockDim.x; int x15 = threadIdx.x + blockIdx.x * blockDim.x; while (x15 < x13) { x11[x15] = x12; x15 = x15 + x14; } // end generating kernel function for FILL of type Float } __global__ void x22(float* x23, float** x24) { // This is cuda 2-section split kernel for 3D input at axis 2. // It takes a 3D array and splits on the innermost dimension (dim2) into 2 arrays. // arg0: input array // arg1: array of output arrays // call constraint: sum of out(i).size = in.size for i in [0, 2) int x25 = blockIdx.x * blockDim.x + threadIdx.x; if (x25 < 16384) { float x26 = x23[x25]; int x27 = x25 % 32; if (x27 < 16) x24[0][x25 / 32 * 16 + x27] = x26; else x24[1][x25 / 32 * 16 + (x27 - 16)] = x26; } } __global__ void x29(float* x30, float* x31, float* x32, int x33) { // begin generating kernel function for MULT of type Float int x34 = gridDim.x * blockDim.x; int x35 = threadIdx.x + blockIdx.x * blockDim.x; while (x35 < x33) { int x36 = x35; x32[x36] = x30[x36] * x31[x36]; x35 = x35 + x34; } // end generating kernel function for MULT of type Float } __global__ void x43(float* x44, float* x45, int x46) { // begin generating kernel function for ACCUM of type Float int x47 = gridDim.x * blockDim.x; int x48 = threadIdx.x + blockIdx.x * blockDim.x; while (x48 < x46) { int x49 = x48; x44[x49] = x44[x49] + x45[x49]; x48 = x48 + x47; } // end generating kernel function for ACCUM of type Float } __global__ void x54(float** x55, float* x56) { // this is cuda 2-section concat kernel for 3D inputs at axis 2. // It concatenates 2 3D arrays on the innermost dimension (dim2). // arg0: array of input input arrays // arg1: output array // call constraint: in.size = 2 // call constraint: sum of in(i).size = out.size for i in [0, 2) int x57 = blockIdx.x * blockDim.x + threadIdx.x; if (x57 < 16384) { int x58 = x57 % 32; if (x58 < 16) x56[x57] = x55[0][x57 / 32 * 16 + x58]; else x56[x57] = x55[1][x57 / 32 * 16 + (x58 - 16)]; } } /**************** Snippet ****************/ void Snippet(int x0) { // begin setting up the MPI/NCCL environment int x1 = 0; int x2 = 0; MPICHECK(MPI_Init(NULL, NULL)); MPICHECK(MPI_Comm_rank(MPI_COMM_WORLD, &x2)); MPICHECK(MPI_Comm_size(MPI_COMM_WORLD, &x1)); MPICHECK(MPI_Barrier(MPI_COMM_WORLD)); CUDA_CALL(hipSetDevice(x2)); ncclUniqueId x3; NCCLCHECK(ncclGetUniqueId(&x3)); MPICHECK(MPI_Bcast(&x3, NCCL_UNIQUE_ID_BYTES, MPI_CHAR, 0, MPI_COMM_WORLD)); ncclComm_t x4; NCCLCHECK(ncclCommInitRank(&x4, x1, x3, x2)); hipStream_t x5; CUDA_CALL(hipStreamCreateWithFlags(&x5, hipStreamNonBlocking)); int x6 = x2; // end setting up the MPI/NCCL environment // begin initializing GPU array of size 16384 and type Float float* x7 = (float*)malloc(16384 * sizeof(float)); CUDA_CALL(hipSetDevice(x6)); float* x8 = (float*)malloc(0 * sizeof(float)); CUDA_CALL(hipMalloc(&x8, (size_t)(16384 * sizeof(float)))); scan_float_array(x7, 16384, "golden/weight_rank_%d.data", x6); CUDA_CALL(hipMemcpy(x8, x7, (size_t)(16384 * sizeof(float)), hipMemcpyHostToDevice)); // end initializing GPU array of size 16384 and type Float // begin initializing fixed GPU array of size 16384 and type Float and device (pre-rename) x39 CUDA_CALL(hipSetDevice(x6)); float* x9 = (float*)malloc(0 * sizeof(float)); CUDA_CALL(hipMalloc(&x9, (size_t)(16384 * sizeof(float)))); hipLaunchKernelGGL(( x10), dim3(dim3(28, 1, 1)), dim3(dim3(512, 1, 1)), 0, 0, x9, 0, 16384); // end initializing fixed GPU array of size 16384 and type Float and device (pre-rename) x39 // begin initializing GPU array of size 8192 and type Float float* x16 = (float*)malloc(8192 * sizeof(float)); CUDA_CALL(hipSetDevice(x6)); float* x17 = (float*)malloc(0 * sizeof(float)); CUDA_CALL(hipMalloc(&x17, (size_t)(8192 * sizeof(float)))); scan_float_array(x16, 8192, "golden/input_rank_%d.data", x6); CUDA_CALL(hipMemcpy(x17, x16, (size_t)(8192 * sizeof(float)), hipMemcpyHostToDevice)); // end initializing GPU array of size 8192 and type Float CUDA_CALL(hipSetDevice(x6)); float* x18 = (float*)malloc(0 * sizeof(float)); CUDA_CALL(hipMalloc(&x18, (size_t)(8192 * sizeof(float)))); CUDA_CALL(hipSetDevice(x6)); float* x19 = (float*)malloc(0 * sizeof(float)); CUDA_CALL(hipMalloc(&x19, (size_t)(8192 * sizeof(float)))); float** x20 = (float**)malloc(2 * sizeof(float*)); x20[0] = x18; x20[1] = x19; float** x21 = (float**)malloc(0 * sizeof(float*)); CUDA_CALL(hipMalloc(&x21, (size_t)(2 * sizeof(float*)))); CUDA_CALL(hipMemcpy(x21, x20, (size_t)(2 * sizeof(float*)), hipMemcpyHostToDevice)); hipLaunchKernelGGL(( x22), dim3(dim3(32, 1, 1)), dim3(dim3(512, 1, 1)), 0, 0, x8, x21); // begin computing MULT on GPU for size 8192 and type Float at device (pre-rename) x39 with left_operand x103 and right_operand x119 CUDA_CALL(hipSetDevice(x6)); float* x28 = (float*)malloc(0 * sizeof(float)); CUDA_CALL(hipMalloc(&x28, (size_t)(8192 * sizeof(float)))); hipLaunchKernelGGL(( x29), dim3(dim3(28, 1, 1)), dim3(dim3(512, 1, 1)), 0, 0, x17, x18, x28, 8192); // end computing MULT on GPU for size 8192 and type Float at device (pre-rename) x39 with left_operand x103 and right_operand x119 // begin initializing fixed GPU array of size 8192 and type Float and device (pre-rename) x39 CUDA_CALL(hipSetDevice(x6)); float* x37 = (float*)malloc(0 * sizeof(float)); CUDA_CALL(hipMalloc(&x37, (size_t)(8192 * sizeof(float)))); hipLaunchKernelGGL(( x10), dim3(dim3(28, 1, 1)), dim3(dim3(512, 1, 1)), 0, 0, x37, 0, 8192); // end initializing fixed GPU array of size 8192 and type Float and device (pre-rename) x39 // begin initializing fixed GPU array of size 8192 and type Float and device (pre-rename) x39 CUDA_CALL(hipSetDevice(x6)); float* x38 = (float*)malloc(0 * sizeof(float)); CUDA_CALL(hipMalloc(&x38, (size_t)(8192 * sizeof(float)))); hipLaunchKernelGGL(( x10), dim3(dim3(28, 1, 1)), dim3(dim3(512, 1, 1)), 0, 0, x38, 0, 8192); // end initializing fixed GPU array of size 8192 and type Float and device (pre-rename) x39 // begin initializing fixed GPU array of size 8192 and type Float and device (pre-rename) x39 CUDA_CALL(hipSetDevice(x6)); float* x39 = (float*)malloc(0 * sizeof(float)); CUDA_CALL(hipMalloc(&x39, (size_t)(8192 * sizeof(float)))); hipLaunchKernelGGL(( x10), dim3(dim3(28, 1, 1)), dim3(dim3(512, 1, 1)), 0, 0, x39, 0, 8192); // end initializing fixed GPU array of size 8192 and type Float and device (pre-rename) x39 // begin checking GPU array of size 8192 and type Float float* x40 = (float*)malloc(8192 * sizeof(float)); CUDA_CALL(hipMemcpy(x40, x28, (size_t)(8192 * sizeof(float)), hipMemcpyDeviceToHost)); check_float_array_with_file(x40, 8192, "golden/loss_rank_%d.data", x6); // end checking GPU array of size 8192 and type Float // begin initializing fixed GPU array of size 8192 and type Float and device (pre-rename) x39 CUDA_CALL(hipSetDevice(x6)); float* x41 = (float*)malloc(0 * sizeof(float)); CUDA_CALL(hipMalloc(&x41, (size_t)(8192 * sizeof(float)))); hipLaunchKernelGGL(( x10), dim3(dim3(28, 1, 1)), dim3(dim3(512, 1, 1)), 0, 0, x41, 1, 8192); // end initializing fixed GPU array of size 8192 and type Float and device (pre-rename) x39 // begin computing MULT on GPU for size 8192 and type Float at device (pre-rename) x39 with left_operand x119 and right_operand x282 CUDA_CALL(hipSetDevice(x6)); float* x42 = (float*)malloc(0 * sizeof(float)); CUDA_CALL(hipMalloc(&x42, (size_t)(8192 * sizeof(float)))); hipLaunchKernelGGL(( x29), dim3(dim3(28, 1, 1)), dim3(dim3(512, 1, 1)), 0, 0, x18, x41, x42, 8192); // end computing MULT on GPU for size 8192 and type Float at device (pre-rename) x39 with left_operand x119 and right_operand x282 // begin computing ACCUM on GPU for size 8192 and type Float at device (pre-rename) x39 with base_operand x233 and addition_operand x295 CUDA_CALL(hipSetDevice(x6)); hipLaunchKernelGGL(( x43), dim3(dim3(28, 1, 1)), dim3(dim3(512, 1, 1)), 0, 0, x37, x42, 8192); // end computing ACCUM on GPU for size 8192 and type Float at device (pre-rename) x39 with base_operand x233 and addition_operand x295 // begin computing MULT on GPU for size 8192 and type Float at device (pre-rename) x39 with left_operand x103 and right_operand x282 CUDA_CALL(hipSetDevice(x6)); float* x50 = (float*)malloc(0 * sizeof(float)); CUDA_CALL(hipMalloc(&x50, (size_t)(8192 * sizeof(float)))); hipLaunchKernelGGL(( x29), dim3(dim3(28, 1, 1)), dim3(dim3(512, 1, 1)), 0, 0, x17, x41, x50, 8192); // end computing MULT on GPU for size 8192 and type Float at device (pre-rename) x39 with left_operand x103 and right_operand x282 // begin computing ACCUM on GPU for size 8192 and type Float at device (pre-rename) x39 with base_operand x246 and addition_operand x345 CUDA_CALL(hipSetDevice(x6)); hipLaunchKernelGGL(( x43), dim3(dim3(28, 1, 1)), dim3(dim3(512, 1, 1)), 0, 0, x38, x50, 8192); // end computing ACCUM on GPU for size 8192 and type Float at device (pre-rename) x39 with base_operand x246 and addition_operand x345 CUDA_CALL(hipSetDevice(x6)); float* x51 = (float*)malloc(0 * sizeof(float)); CUDA_CALL(hipMalloc(&x51, (size_t)(16384 * sizeof(float)))); float** x52 = (float**)malloc(2 * sizeof(float*)); x52[0] = x38; x52[1] = x39; float** x53 = (float**)malloc(0 * sizeof(float*)); CUDA_CALL(hipMalloc(&x53, (size_t)(2 * sizeof(float*)))); CUDA_CALL(hipMemcpy(x53, x52, (size_t)(2 * sizeof(float*)), hipMemcpyHostToDevice)); hipLaunchKernelGGL(( x54), dim3(dim3(32, 1, 1)), dim3(dim3(512, 1, 1)), 0, 0, x53, x51); // begin computing ACCUM on GPU for size 16384 and type Float at device (pre-rename) x39 with base_operand x62 and addition_operand x364 CUDA_CALL(hipSetDevice(x6)); hipLaunchKernelGGL(( x43), dim3(dim3(28, 1, 1)), dim3(dim3(512, 1, 1)), 0, 0, x9, x51, 16384); // end computing ACCUM on GPU for size 16384 and type Float at device (pre-rename) x39 with base_operand x62 and addition_operand x364 // begin checking GPU array of size 16384 and type Float float* x59 = (float*)malloc(16384 * sizeof(float)); CUDA_CALL(hipMemcpy(x59, x9, (size_t)(16384 * sizeof(float)), hipMemcpyDeviceToHost)); check_float_array_with_file(x59, 16384, "golden/weight_grad_rank_%d.data", x6); // end checking GPU array of size 16384 and type Float // begin checking GPU array of size 8192 and type Float float* x60 = (float*)malloc(8192 * sizeof(float)); CUDA_CALL(hipMemcpy(x60, x37, (size_t)(8192 * sizeof(float)), hipMemcpyDeviceToHost)); check_float_array_with_file(x60, 8192, "golden/input_grad_rank_%d.data", x6); // end checking GPU array of size 8192 and type Float NCCLCHECK(ncclCommDestroy(x4)); MPICHECK(MPI_Finalize()); } /***************************************** End of C Generated Code *******************************************/ int main(int argc, char *argv[]) { if (argc != 2) { printf("usage: %s <arg>\n", argv[0]); return 0; } Snippet(atoi(argv[1])); return 0; }
4e98f9b44cb4e8d547f1776075ea4f8f46106318.cu
/***************************************** Emitting C Generated Code *******************************************/ #ifndef _GNU_SOURCE #define _GNU_SOURCE #endif #include "cudnn_header.h" #include "nccl_header.h" #include <string.h> #include <cblas.h> #include <stdlib.h> #include "cuda_header.h" #include <stdio.h> #include <stdint.h> #include "cublas_header.h" #include <stdbool.h> #include "mpi_header.h" #include "scanner_header.h" /************* Functions **************/ __global__ void x10(float* x11, float x12, int x13) { // begin generating kernel function for FILL of type Float int x14 = gridDim.x * blockDim.x; int x15 = threadIdx.x + blockIdx.x * blockDim.x; while (x15 < x13) { x11[x15] = x12; x15 = x15 + x14; } // end generating kernel function for FILL of type Float } __global__ void x22(float* x23, float** x24) { // This is cuda 2-section split kernel for 3D input at axis 2. // It takes a 3D array and splits on the innermost dimension (dim2) into 2 arrays. // arg0: input array // arg1: array of output arrays // call constraint: sum of out(i).size = in.size for i in [0, 2) int x25 = blockIdx.x * blockDim.x + threadIdx.x; if (x25 < 16384) { float x26 = x23[x25]; int x27 = x25 % 32; if (x27 < 16) x24[0][x25 / 32 * 16 + x27] = x26; else x24[1][x25 / 32 * 16 + (x27 - 16)] = x26; } } __global__ void x29(float* x30, float* x31, float* x32, int x33) { // begin generating kernel function for MULT of type Float int x34 = gridDim.x * blockDim.x; int x35 = threadIdx.x + blockIdx.x * blockDim.x; while (x35 < x33) { int x36 = x35; x32[x36] = x30[x36] * x31[x36]; x35 = x35 + x34; } // end generating kernel function for MULT of type Float } __global__ void x43(float* x44, float* x45, int x46) { // begin generating kernel function for ACCUM of type Float int x47 = gridDim.x * blockDim.x; int x48 = threadIdx.x + blockIdx.x * blockDim.x; while (x48 < x46) { int x49 = x48; x44[x49] = x44[x49] + x45[x49]; x48 = x48 + x47; } // end generating kernel function for ACCUM of type Float } __global__ void x54(float** x55, float* x56) { // this is cuda 2-section concat kernel for 3D inputs at axis 2. // It concatenates 2 3D arrays on the innermost dimension (dim2). // arg0: array of input input arrays // arg1: output array // call constraint: in.size = 2 // call constraint: sum of in(i).size = out.size for i in [0, 2) int x57 = blockIdx.x * blockDim.x + threadIdx.x; if (x57 < 16384) { int x58 = x57 % 32; if (x58 < 16) x56[x57] = x55[0][x57 / 32 * 16 + x58]; else x56[x57] = x55[1][x57 / 32 * 16 + (x58 - 16)]; } } /**************** Snippet ****************/ void Snippet(int x0) { // begin setting up the MPI/NCCL environment int x1 = 0; int x2 = 0; MPICHECK(MPI_Init(NULL, NULL)); MPICHECK(MPI_Comm_rank(MPI_COMM_WORLD, &x2)); MPICHECK(MPI_Comm_size(MPI_COMM_WORLD, &x1)); MPICHECK(MPI_Barrier(MPI_COMM_WORLD)); CUDA_CALL(cudaSetDevice(x2)); ncclUniqueId x3; NCCLCHECK(ncclGetUniqueId(&x3)); MPICHECK(MPI_Bcast(&x3, NCCL_UNIQUE_ID_BYTES, MPI_CHAR, 0, MPI_COMM_WORLD)); ncclComm_t x4; NCCLCHECK(ncclCommInitRank(&x4, x1, x3, x2)); cudaStream_t x5; CUDA_CALL(cudaStreamCreateWithFlags(&x5, cudaStreamNonBlocking)); int x6 = x2; // end setting up the MPI/NCCL environment // begin initializing GPU array of size 16384 and type Float float* x7 = (float*)malloc(16384 * sizeof(float)); CUDA_CALL(cudaSetDevice(x6)); float* x8 = (float*)malloc(0 * sizeof(float)); CUDA_CALL(cudaMalloc(&x8, (size_t)(16384 * sizeof(float)))); scan_float_array(x7, 16384, "golden/weight_rank_%d.data", x6); CUDA_CALL(cudaMemcpy(x8, x7, (size_t)(16384 * sizeof(float)), cudaMemcpyHostToDevice)); // end initializing GPU array of size 16384 and type Float // begin initializing fixed GPU array of size 16384 and type Float and device (pre-rename) x39 CUDA_CALL(cudaSetDevice(x6)); float* x9 = (float*)malloc(0 * sizeof(float)); CUDA_CALL(cudaMalloc(&x9, (size_t)(16384 * sizeof(float)))); x10<<<dim3(28, 1, 1), dim3(512, 1, 1)>>>(x9, 0, 16384); // end initializing fixed GPU array of size 16384 and type Float and device (pre-rename) x39 // begin initializing GPU array of size 8192 and type Float float* x16 = (float*)malloc(8192 * sizeof(float)); CUDA_CALL(cudaSetDevice(x6)); float* x17 = (float*)malloc(0 * sizeof(float)); CUDA_CALL(cudaMalloc(&x17, (size_t)(8192 * sizeof(float)))); scan_float_array(x16, 8192, "golden/input_rank_%d.data", x6); CUDA_CALL(cudaMemcpy(x17, x16, (size_t)(8192 * sizeof(float)), cudaMemcpyHostToDevice)); // end initializing GPU array of size 8192 and type Float CUDA_CALL(cudaSetDevice(x6)); float* x18 = (float*)malloc(0 * sizeof(float)); CUDA_CALL(cudaMalloc(&x18, (size_t)(8192 * sizeof(float)))); CUDA_CALL(cudaSetDevice(x6)); float* x19 = (float*)malloc(0 * sizeof(float)); CUDA_CALL(cudaMalloc(&x19, (size_t)(8192 * sizeof(float)))); float** x20 = (float**)malloc(2 * sizeof(float*)); x20[0] = x18; x20[1] = x19; float** x21 = (float**)malloc(0 * sizeof(float*)); CUDA_CALL(cudaMalloc(&x21, (size_t)(2 * sizeof(float*)))); CUDA_CALL(cudaMemcpy(x21, x20, (size_t)(2 * sizeof(float*)), cudaMemcpyHostToDevice)); x22<<<dim3(32, 1, 1), dim3(512, 1, 1)>>>(x8, x21); // begin computing MULT on GPU for size 8192 and type Float at device (pre-rename) x39 with left_operand x103 and right_operand x119 CUDA_CALL(cudaSetDevice(x6)); float* x28 = (float*)malloc(0 * sizeof(float)); CUDA_CALL(cudaMalloc(&x28, (size_t)(8192 * sizeof(float)))); x29<<<dim3(28, 1, 1), dim3(512, 1, 1)>>>(x17, x18, x28, 8192); // end computing MULT on GPU for size 8192 and type Float at device (pre-rename) x39 with left_operand x103 and right_operand x119 // begin initializing fixed GPU array of size 8192 and type Float and device (pre-rename) x39 CUDA_CALL(cudaSetDevice(x6)); float* x37 = (float*)malloc(0 * sizeof(float)); CUDA_CALL(cudaMalloc(&x37, (size_t)(8192 * sizeof(float)))); x10<<<dim3(28, 1, 1), dim3(512, 1, 1)>>>(x37, 0, 8192); // end initializing fixed GPU array of size 8192 and type Float and device (pre-rename) x39 // begin initializing fixed GPU array of size 8192 and type Float and device (pre-rename) x39 CUDA_CALL(cudaSetDevice(x6)); float* x38 = (float*)malloc(0 * sizeof(float)); CUDA_CALL(cudaMalloc(&x38, (size_t)(8192 * sizeof(float)))); x10<<<dim3(28, 1, 1), dim3(512, 1, 1)>>>(x38, 0, 8192); // end initializing fixed GPU array of size 8192 and type Float and device (pre-rename) x39 // begin initializing fixed GPU array of size 8192 and type Float and device (pre-rename) x39 CUDA_CALL(cudaSetDevice(x6)); float* x39 = (float*)malloc(0 * sizeof(float)); CUDA_CALL(cudaMalloc(&x39, (size_t)(8192 * sizeof(float)))); x10<<<dim3(28, 1, 1), dim3(512, 1, 1)>>>(x39, 0, 8192); // end initializing fixed GPU array of size 8192 and type Float and device (pre-rename) x39 // begin checking GPU array of size 8192 and type Float float* x40 = (float*)malloc(8192 * sizeof(float)); CUDA_CALL(cudaMemcpy(x40, x28, (size_t)(8192 * sizeof(float)), cudaMemcpyDeviceToHost)); check_float_array_with_file(x40, 8192, "golden/loss_rank_%d.data", x6); // end checking GPU array of size 8192 and type Float // begin initializing fixed GPU array of size 8192 and type Float and device (pre-rename) x39 CUDA_CALL(cudaSetDevice(x6)); float* x41 = (float*)malloc(0 * sizeof(float)); CUDA_CALL(cudaMalloc(&x41, (size_t)(8192 * sizeof(float)))); x10<<<dim3(28, 1, 1), dim3(512, 1, 1)>>>(x41, 1, 8192); // end initializing fixed GPU array of size 8192 and type Float and device (pre-rename) x39 // begin computing MULT on GPU for size 8192 and type Float at device (pre-rename) x39 with left_operand x119 and right_operand x282 CUDA_CALL(cudaSetDevice(x6)); float* x42 = (float*)malloc(0 * sizeof(float)); CUDA_CALL(cudaMalloc(&x42, (size_t)(8192 * sizeof(float)))); x29<<<dim3(28, 1, 1), dim3(512, 1, 1)>>>(x18, x41, x42, 8192); // end computing MULT on GPU for size 8192 and type Float at device (pre-rename) x39 with left_operand x119 and right_operand x282 // begin computing ACCUM on GPU for size 8192 and type Float at device (pre-rename) x39 with base_operand x233 and addition_operand x295 CUDA_CALL(cudaSetDevice(x6)); x43<<<dim3(28, 1, 1), dim3(512, 1, 1)>>>(x37, x42, 8192); // end computing ACCUM on GPU for size 8192 and type Float at device (pre-rename) x39 with base_operand x233 and addition_operand x295 // begin computing MULT on GPU for size 8192 and type Float at device (pre-rename) x39 with left_operand x103 and right_operand x282 CUDA_CALL(cudaSetDevice(x6)); float* x50 = (float*)malloc(0 * sizeof(float)); CUDA_CALL(cudaMalloc(&x50, (size_t)(8192 * sizeof(float)))); x29<<<dim3(28, 1, 1), dim3(512, 1, 1)>>>(x17, x41, x50, 8192); // end computing MULT on GPU for size 8192 and type Float at device (pre-rename) x39 with left_operand x103 and right_operand x282 // begin computing ACCUM on GPU for size 8192 and type Float at device (pre-rename) x39 with base_operand x246 and addition_operand x345 CUDA_CALL(cudaSetDevice(x6)); x43<<<dim3(28, 1, 1), dim3(512, 1, 1)>>>(x38, x50, 8192); // end computing ACCUM on GPU for size 8192 and type Float at device (pre-rename) x39 with base_operand x246 and addition_operand x345 CUDA_CALL(cudaSetDevice(x6)); float* x51 = (float*)malloc(0 * sizeof(float)); CUDA_CALL(cudaMalloc(&x51, (size_t)(16384 * sizeof(float)))); float** x52 = (float**)malloc(2 * sizeof(float*)); x52[0] = x38; x52[1] = x39; float** x53 = (float**)malloc(0 * sizeof(float*)); CUDA_CALL(cudaMalloc(&x53, (size_t)(2 * sizeof(float*)))); CUDA_CALL(cudaMemcpy(x53, x52, (size_t)(2 * sizeof(float*)), cudaMemcpyHostToDevice)); x54<<<dim3(32, 1, 1), dim3(512, 1, 1)>>>(x53, x51); // begin computing ACCUM on GPU for size 16384 and type Float at device (pre-rename) x39 with base_operand x62 and addition_operand x364 CUDA_CALL(cudaSetDevice(x6)); x43<<<dim3(28, 1, 1), dim3(512, 1, 1)>>>(x9, x51, 16384); // end computing ACCUM on GPU for size 16384 and type Float at device (pre-rename) x39 with base_operand x62 and addition_operand x364 // begin checking GPU array of size 16384 and type Float float* x59 = (float*)malloc(16384 * sizeof(float)); CUDA_CALL(cudaMemcpy(x59, x9, (size_t)(16384 * sizeof(float)), cudaMemcpyDeviceToHost)); check_float_array_with_file(x59, 16384, "golden/weight_grad_rank_%d.data", x6); // end checking GPU array of size 16384 and type Float // begin checking GPU array of size 8192 and type Float float* x60 = (float*)malloc(8192 * sizeof(float)); CUDA_CALL(cudaMemcpy(x60, x37, (size_t)(8192 * sizeof(float)), cudaMemcpyDeviceToHost)); check_float_array_with_file(x60, 8192, "golden/input_grad_rank_%d.data", x6); // end checking GPU array of size 8192 and type Float NCCLCHECK(ncclCommDestroy(x4)); MPICHECK(MPI_Finalize()); } /***************************************** End of C Generated Code *******************************************/ int main(int argc, char *argv[]) { if (argc != 2) { printf("usage: %s <arg>\n", argv[0]); return 0; } Snippet(atoi(argv[1])); return 0; }
7f018f5928f51291627d15466c243ab4b47ae0ee.hip
// !!! This is a file automatically generated by hipify!!! #include "../common/common.h" #include <hip/hip_runtime.h> #include <stdio.h> /* * This example demonstrates the impact of misaligned reads on performance by * forcing misaligned reads to occur on a float*. Kernels that reduce the * performance impact of misaligned reads via unrolling are also included below. */ void checkResult(float *hostRef, float *gpuRef, const int N) { double epsilon = 1.0E-8; bool match = 1; for (int i = 0; i < N; i++) { if (abs(hostRef[i] - gpuRef[i]) > epsilon) { match = 0; printf("different on %dth element: host %f gpu %f\n", i, hostRef[i], gpuRef[i]); break; } } if (!match) printf("Arrays do not match.\n\n"); } void initialData(float *ip, int size) { for (int i = 0; i < size; i++) { ip[i] = (float)( rand() & 0xFF ) / 100.0f; } return; } void sumArraysOnHost(float *A, float *B, float *C, const int n, int offset) { for (int idx = offset, k = 0; idx < n; idx++, k++) { C[k] = A[idx] + B[idx]; } } __global__ void warmup(float *A, float *B, float *C, const int n, int offset) { unsigned int i = blockIdx.x * blockDim.x + threadIdx.x; unsigned int k = i + offset; if (k < n) C[i] = A[k] + B[k]; } __global__ void readOffset(float *A, float *B, float *C, const int n, int offset) { unsigned int i = blockIdx.x * blockDim.x + threadIdx.x; unsigned int k = i + offset; if (k < n) C[i] = A[k] + B[k]; } __global__ void readOffsetUnroll2(float *A, float *B, float *C, const int n, int offset) { unsigned int i = blockIdx.x * blockDim.x * 2 + threadIdx.x; unsigned int k = i + offset; if (k + blockDim.x < n) { C[i] = A[k] + B[k]; C[i + blockDim.x] = A[k + blockDim.x] + B[k + blockDim.x]; } } __global__ void readOffsetUnroll4(float *A, float *B, float *C, const int n, int offset) { unsigned int i = blockIdx.x * blockDim.x * 4 + threadIdx.x; unsigned int k = i + offset; if (k + 3 * blockDim.x < n) { C[i] = A[k] + B[k]; C[i + blockDim.x] = A[k + blockDim.x] + B[k + blockDim.x]; C[i + 2 * blockDim.x] = A[k + 2 * blockDim.x] + B[k + 2 * blockDim.x]; C[i + 3 * blockDim.x] = A[k + 3 * blockDim.x] + B[k + 3 * blockDim.x]; } } int main(int argc, char **argv) { // set up device int dev = 0; hipDeviceProp_t deviceProp; CHECK(hipGetDeviceProperties(&deviceProp, dev)); printf("%s starting reduction at ", argv[0]); printf("device %d: %s ", dev, deviceProp.name); CHECK(hipSetDevice(dev)); // set up array size int nElem = 1 << 20; // total number of elements to reduce printf(" with array size %d\n", nElem); size_t nBytes = nElem * sizeof(float); // set up offset for summary int blocksize = 512; int offset = 0; if (argc > 1) offset = atoi(argv[1]); if (argc > 2) blocksize = atoi(argv[2]); // execution configuration dim3 block (blocksize, 1); dim3 grid ((nElem + block.x - 1) / block.x, 1); // allocate host memory float *h_A = (float *)malloc(nBytes); float *h_B = (float *)malloc(nBytes); float *hostRef = (float *)malloc(nBytes); float *gpuRef = (float *)malloc(nBytes); // initialize host array initialData(h_A, nElem); memcpy(h_B, h_A, nBytes); // summary at host side sumArraysOnHost(h_A, h_B, hostRef, nElem, offset); // allocate device memory float *d_A, *d_B, *d_C; CHECK(hipMalloc((float**)&d_A, nBytes)); CHECK(hipMalloc((float**)&d_B, nBytes)); CHECK(hipMalloc((float**)&d_C, nBytes)); // copy data from host to device CHECK(hipMemcpy(d_A, h_A, nBytes, hipMemcpyHostToDevice)); CHECK(hipMemcpy(d_B, h_A, nBytes, hipMemcpyHostToDevice)); // kernel 1: double iStart = seconds(); hipLaunchKernelGGL(( warmup), dim3(grid), dim3(block), 0, 0, d_A, d_B, d_C, nElem, offset); CHECK(hipDeviceSynchronize()); double iElaps = seconds() - iStart; printf("warmup <<< %4d, %4d >>> offset %4d elapsed %f sec\n", grid.x, block.x, offset, iElaps); CHECK(hipGetLastError()); // kernel 1 iStart = seconds(); hipLaunchKernelGGL(( readOffset), dim3(grid), dim3(block), 0, 0, d_A, d_B, d_C, nElem, offset); CHECK(hipDeviceSynchronize()); iElaps = seconds() - iStart; printf("readOffset <<< %4d, %4d >>> offset %4d elapsed %f sec\n", grid.x, block.x, offset, iElaps); CHECK(hipGetLastError()); // copy kernel result back to host side and check device results CHECK(hipMemcpy(gpuRef, d_C, nBytes, hipMemcpyDeviceToHost)); checkResult(hostRef, gpuRef, nElem-offset); // kernel 2 iStart = seconds(); hipLaunchKernelGGL(( readOffsetUnroll2), dim3(grid.x/2), dim3(block), 0, 0, d_A, d_B, d_C, nElem/2, offset); CHECK(hipDeviceSynchronize()); iElaps = seconds() - iStart; printf("unroll2 <<< %4d, %4d >>> offset %4d elapsed %f sec\n", grid.x / 2, block.x, offset, iElaps); CHECK(hipGetLastError()); // copy kernel result back to host side and check device results CHECK(hipMemcpy(gpuRef, d_C, nBytes, hipMemcpyDeviceToHost)); checkResult(hostRef, gpuRef, nElem - offset); // kernel 3 iStart = seconds(); hipLaunchKernelGGL(( readOffsetUnroll4), dim3(grid.x / 4), dim3(block), 0, 0, d_A, d_B, d_C, nElem / 4, offset); CHECK(hipDeviceSynchronize()); iElaps = seconds() - iStart; printf("unroll4 <<< %4d, %4d >>> offset %4d elapsed %f sec\n", grid.x / 4, block.x, offset, iElaps); CHECK(hipGetLastError()); // copy kernel result back to host side and check device results CHECK(hipMemcpy(gpuRef, d_C, nBytes, hipMemcpyDeviceToHost)); checkResult(hostRef, gpuRef, nElem - offset); // free host and device memory CHECK(hipFree(d_A)); CHECK(hipFree(d_B)); CHECK(hipFree(d_C)); free(h_A); free(h_B); // reset device CHECK(hipDeviceReset()); return EXIT_SUCCESS; }
7f018f5928f51291627d15466c243ab4b47ae0ee.cu
#include "../common/common.h" #include <cuda_runtime.h> #include <stdio.h> /* * This example demonstrates the impact of misaligned reads on performance by * forcing misaligned reads to occur on a float*. Kernels that reduce the * performance impact of misaligned reads via unrolling are also included below. */ void checkResult(float *hostRef, float *gpuRef, const int N) { double epsilon = 1.0E-8; bool match = 1; for (int i = 0; i < N; i++) { if (abs(hostRef[i] - gpuRef[i]) > epsilon) { match = 0; printf("different on %dth element: host %f gpu %f\n", i, hostRef[i], gpuRef[i]); break; } } if (!match) printf("Arrays do not match.\n\n"); } void initialData(float *ip, int size) { for (int i = 0; i < size; i++) { ip[i] = (float)( rand() & 0xFF ) / 100.0f; } return; } void sumArraysOnHost(float *A, float *B, float *C, const int n, int offset) { for (int idx = offset, k = 0; idx < n; idx++, k++) { C[k] = A[idx] + B[idx]; } } __global__ void warmup(float *A, float *B, float *C, const int n, int offset) { unsigned int i = blockIdx.x * blockDim.x + threadIdx.x; unsigned int k = i + offset; if (k < n) C[i] = A[k] + B[k]; } __global__ void readOffset(float *A, float *B, float *C, const int n, int offset) { unsigned int i = blockIdx.x * blockDim.x + threadIdx.x; unsigned int k = i + offset; if (k < n) C[i] = A[k] + B[k]; } __global__ void readOffsetUnroll2(float *A, float *B, float *C, const int n, int offset) { unsigned int i = blockIdx.x * blockDim.x * 2 + threadIdx.x; unsigned int k = i + offset; if (k + blockDim.x < n) { C[i] = A[k] + B[k]; C[i + blockDim.x] = A[k + blockDim.x] + B[k + blockDim.x]; } } __global__ void readOffsetUnroll4(float *A, float *B, float *C, const int n, int offset) { unsigned int i = blockIdx.x * blockDim.x * 4 + threadIdx.x; unsigned int k = i + offset; if (k + 3 * blockDim.x < n) { C[i] = A[k] + B[k]; C[i + blockDim.x] = A[k + blockDim.x] + B[k + blockDim.x]; C[i + 2 * blockDim.x] = A[k + 2 * blockDim.x] + B[k + 2 * blockDim.x]; C[i + 3 * blockDim.x] = A[k + 3 * blockDim.x] + B[k + 3 * blockDim.x]; } } int main(int argc, char **argv) { // set up device int dev = 0; cudaDeviceProp deviceProp; CHECK(cudaGetDeviceProperties(&deviceProp, dev)); printf("%s starting reduction at ", argv[0]); printf("device %d: %s ", dev, deviceProp.name); CHECK(cudaSetDevice(dev)); // set up array size int nElem = 1 << 20; // total number of elements to reduce printf(" with array size %d\n", nElem); size_t nBytes = nElem * sizeof(float); // set up offset for summary int blocksize = 512; int offset = 0; if (argc > 1) offset = atoi(argv[1]); if (argc > 2) blocksize = atoi(argv[2]); // execution configuration dim3 block (blocksize, 1); dim3 grid ((nElem + block.x - 1) / block.x, 1); // allocate host memory float *h_A = (float *)malloc(nBytes); float *h_B = (float *)malloc(nBytes); float *hostRef = (float *)malloc(nBytes); float *gpuRef = (float *)malloc(nBytes); // initialize host array initialData(h_A, nElem); memcpy(h_B, h_A, nBytes); // summary at host side sumArraysOnHost(h_A, h_B, hostRef, nElem, offset); // allocate device memory float *d_A, *d_B, *d_C; CHECK(cudaMalloc((float**)&d_A, nBytes)); CHECK(cudaMalloc((float**)&d_B, nBytes)); CHECK(cudaMalloc((float**)&d_C, nBytes)); // copy data from host to device CHECK(cudaMemcpy(d_A, h_A, nBytes, cudaMemcpyHostToDevice)); CHECK(cudaMemcpy(d_B, h_A, nBytes, cudaMemcpyHostToDevice)); // kernel 1: double iStart = seconds(); warmup<<<grid, block>>>(d_A, d_B, d_C, nElem, offset); CHECK(cudaDeviceSynchronize()); double iElaps = seconds() - iStart; printf("warmup <<< %4d, %4d >>> offset %4d elapsed %f sec\n", grid.x, block.x, offset, iElaps); CHECK(cudaGetLastError()); // kernel 1 iStart = seconds(); readOffset<<<grid, block>>>(d_A, d_B, d_C, nElem, offset); CHECK(cudaDeviceSynchronize()); iElaps = seconds() - iStart; printf("readOffset <<< %4d, %4d >>> offset %4d elapsed %f sec\n", grid.x, block.x, offset, iElaps); CHECK(cudaGetLastError()); // copy kernel result back to host side and check device results CHECK(cudaMemcpy(gpuRef, d_C, nBytes, cudaMemcpyDeviceToHost)); checkResult(hostRef, gpuRef, nElem-offset); // kernel 2 iStart = seconds(); readOffsetUnroll2<<<grid.x/2, block>>>(d_A, d_B, d_C, nElem/2, offset); CHECK(cudaDeviceSynchronize()); iElaps = seconds() - iStart; printf("unroll2 <<< %4d, %4d >>> offset %4d elapsed %f sec\n", grid.x / 2, block.x, offset, iElaps); CHECK(cudaGetLastError()); // copy kernel result back to host side and check device results CHECK(cudaMemcpy(gpuRef, d_C, nBytes, cudaMemcpyDeviceToHost)); checkResult(hostRef, gpuRef, nElem - offset); // kernel 3 iStart = seconds(); readOffsetUnroll4<<<grid.x / 4, block>>>(d_A, d_B, d_C, nElem / 4, offset); CHECK(cudaDeviceSynchronize()); iElaps = seconds() - iStart; printf("unroll4 <<< %4d, %4d >>> offset %4d elapsed %f sec\n", grid.x / 4, block.x, offset, iElaps); CHECK(cudaGetLastError()); // copy kernel result back to host side and check device results CHECK(cudaMemcpy(gpuRef, d_C, nBytes, cudaMemcpyDeviceToHost)); checkResult(hostRef, gpuRef, nElem - offset); // free host and device memory CHECK(cudaFree(d_A)); CHECK(cudaFree(d_B)); CHECK(cudaFree(d_C)); free(h_A); free(h_B); // reset device CHECK(cudaDeviceReset()); return EXIT_SUCCESS; }
2318862c9b1d05eb9bdf5c06335ff8166d8d0460.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <iostream> #include <assert.h> #include "RayTracing.h" #include "Device.h" #include "MathTools.h" #include "SphereFactory.h" #include "ConstantMemoryLink.h" using cpu::IntervalI; #define LENGTH 1000 __constant__ Sphere TAB_DATA_CM[LENGTH]; /*----------------------------------------------------------------------*\ |* Declaration *| \*---------------------------------------------------------------------*/ /*--------------------------------------*\ |* Imported *| \*-------------------------------------*/ extern __global__ void rayTracing(uchar4* ptrDevPixels, int w, int h, Sphere* ptrDevSpheres, int n, float t); extern __global__ void rayTracingSM(uchar4* ptrDevPixels, int w, int h, Sphere* ptrDevSpheres, int n, float t); /*--------------------------------------*\ |* Public *| \*-------------------------------------*/ /*--------------------------------------*\ |* Private *| \*-------------------------------------*/ /*----------------------------------------------------------------------*\ |* Implementation *| \*---------------------------------------------------------------------*/ /*--------------------------------------*\ |* Public *| \*-------------------------------------*/ /*-------------------------*\ |* Constructeur *| \*-------------------------*/ // Global memory : 1 // Constant memory : 2 // Shared memory : 3 RayTracing::RayTracing(int w, int h, int nSphere, float dt, int memoryType, string title) { // Inputs this->w = w; this->h = h; this->nSphere = nSphere; this->dt = dt; // Tools this->dg = dim3(16, 2, 1); // disons a optimiser this->db = dim3(32, 4, 1); // disons a optimiser int margin = 50; this->ptrSpheres = SphereFactory::createSpheres(nSphere, w, h, margin); this->t = 0.0f; //Outputs this->title = title; // Check: Device::assertDim(dg, db); // CM if (memoryType == 2) { this->ptrProcessFunction = &RayTracing::processCM; copySpheresToConstantMemory(); } sizeSpheres = sizeof(Sphere) * LENGTH; if (memoryType == 1 || memoryType == 3) { this->ptrProcessFunction = memoryType == 1 ? &RayTracing::processGM : &RayTracing::processSM; HANDLE_ERROR(hipMalloc(&ptrDevSpheres, sizeSpheres)); HANDLE_ERROR(hipMemcpy(ptrDevSpheres, ptrSpheres, sizeSpheres, hipMemcpyHostToDevice)); } } RayTracing::~RayTracing() { delete[] this->ptrSpheres; HANDLE_ERROR(hipFree(ptrDevSpheres)); } /*-------------------------*\ |* Methode *| \*-------------------------*/ ConstantMemoryLink constantMemoryLink(void) { Sphere* ptrDevTabData; size_t sizeAll = LENGTH * sizeof(Sphere); HANDLE_ERROR(hipGetSymbolAddress((void ** )&ptrDevTabData, TAB_DATA_CM)); ConstantMemoryLink cmLink = { (void**) ptrDevTabData, LENGTH, sizeAll }; return cmLink; } void RayTracing::copySpheresToConstantMemory() { ConstantMemoryLink cmLink = constantMemoryLink(); this->ptrDevSpheres = (Sphere*) cmLink.ptrDevTab; size_t sizeALL = cmLink.sizeAll; HANDLE_ERROR(hipMemcpy(ptrDevSpheres, ptrSpheres, sizeALL, hipMemcpyHostToDevice)); } /** * Override * Call periodicly by the API */ void RayTracing::process(uchar4* ptrDevPixels, int w, int h) { (this->*ptrProcessFunction)(ptrDevPixels, w, h); } void RayTracing::processGM(uchar4* ptrDevPixels, int w, int h) {hipLaunchKernelGGL(( rayTracing), dim3(this->dg), dim3(this->db), 0, 0, ptrDevPixels, w, h, this->ptrDevSpheres, this->nSphere, this->t); } void RayTracing::processCM(uchar4* ptrDevPixels, int w, int h) {hipLaunchKernelGGL(( rayTracing), dim3(this->dg), dim3(this->db), 0, 0, ptrDevPixels, w, h, this->ptrDevSpheres, this->nSphere, this->t); } void RayTracing::processSM(uchar4* ptrDevPixels, int w, int h) {hipLaunchKernelGGL(( rayTracingSM), dim3(this->dg), dim3(this->db), sizeSpheres, 0, ptrDevPixels, w, h, this->ptrDevSpheres, this->nSphere, this->t); } /** * Override * Call periodicly by the API */ void RayTracing::animationStep() { t += dt; } /*--------------*\ |* get *| \*--------------*/ /** * Override */ float RayTracing::getAnimationPara(void) { return t; } /** * Override */ int RayTracing::getW(void) { return w; } /** * Override */ int RayTracing::getH(void) { return h; } /** * Override */ string RayTracing::getTitle(void) { return title; } /*--------------------------------------*\ |* Private *| \*-------------------------------------*/ /*----------------------------------------------------------------------*\ |* End *| \*---------------------------------------------------------------------*/
2318862c9b1d05eb9bdf5c06335ff8166d8d0460.cu
#include <iostream> #include <assert.h> #include "RayTracing.h" #include "Device.h" #include "MathTools.h" #include "SphereFactory.h" #include "ConstantMemoryLink.h" using cpu::IntervalI; #define LENGTH 1000 __constant__ Sphere TAB_DATA_CM[LENGTH]; /*----------------------------------------------------------------------*\ |* Declaration *| \*---------------------------------------------------------------------*/ /*--------------------------------------*\ |* Imported *| \*-------------------------------------*/ extern __global__ void rayTracing(uchar4* ptrDevPixels, int w, int h, Sphere* ptrDevSpheres, int n, float t); extern __global__ void rayTracingSM(uchar4* ptrDevPixels, int w, int h, Sphere* ptrDevSpheres, int n, float t); /*--------------------------------------*\ |* Public *| \*-------------------------------------*/ /*--------------------------------------*\ |* Private *| \*-------------------------------------*/ /*----------------------------------------------------------------------*\ |* Implementation *| \*---------------------------------------------------------------------*/ /*--------------------------------------*\ |* Public *| \*-------------------------------------*/ /*-------------------------*\ |* Constructeur *| \*-------------------------*/ // Global memory : 1 // Constant memory : 2 // Shared memory : 3 RayTracing::RayTracing(int w, int h, int nSphere, float dt, int memoryType, string title) { // Inputs this->w = w; this->h = h; this->nSphere = nSphere; this->dt = dt; // Tools this->dg = dim3(16, 2, 1); // disons a optimiser this->db = dim3(32, 4, 1); // disons a optimiser int margin = 50; this->ptrSpheres = SphereFactory::createSpheres(nSphere, w, h, margin); this->t = 0.0f; //Outputs this->title = title; // Check: Device::assertDim(dg, db); // CM if (memoryType == 2) { this->ptrProcessFunction = &RayTracing::processCM; copySpheresToConstantMemory(); } sizeSpheres = sizeof(Sphere) * LENGTH; if (memoryType == 1 || memoryType == 3) { this->ptrProcessFunction = memoryType == 1 ? &RayTracing::processGM : &RayTracing::processSM; HANDLE_ERROR(cudaMalloc(&ptrDevSpheres, sizeSpheres)); HANDLE_ERROR(cudaMemcpy(ptrDevSpheres, ptrSpheres, sizeSpheres, cudaMemcpyHostToDevice)); } } RayTracing::~RayTracing() { delete[] this->ptrSpheres; HANDLE_ERROR(cudaFree(ptrDevSpheres)); } /*-------------------------*\ |* Methode *| \*-------------------------*/ ConstantMemoryLink constantMemoryLink(void) { Sphere* ptrDevTabData; size_t sizeAll = LENGTH * sizeof(Sphere); HANDLE_ERROR(cudaGetSymbolAddress((void ** )&ptrDevTabData, TAB_DATA_CM)); ConstantMemoryLink cmLink = { (void**) ptrDevTabData, LENGTH, sizeAll }; return cmLink; } void RayTracing::copySpheresToConstantMemory() { ConstantMemoryLink cmLink = constantMemoryLink(); this->ptrDevSpheres = (Sphere*) cmLink.ptrDevTab; size_t sizeALL = cmLink.sizeAll; HANDLE_ERROR(cudaMemcpy(ptrDevSpheres, ptrSpheres, sizeALL, cudaMemcpyHostToDevice)); } /** * Override * Call periodicly by the API */ void RayTracing::process(uchar4* ptrDevPixels, int w, int h) { (this->*ptrProcessFunction)(ptrDevPixels, w, h); } void RayTracing::processGM(uchar4* ptrDevPixels, int w, int h) { rayTracing<<<this->dg, this->db>>>(ptrDevPixels, w, h, this->ptrDevSpheres, this->nSphere, this->t); } void RayTracing::processCM(uchar4* ptrDevPixels, int w, int h) { rayTracing<<<this->dg, this->db>>>(ptrDevPixels, w, h, this->ptrDevSpheres, this->nSphere, this->t); } void RayTracing::processSM(uchar4* ptrDevPixels, int w, int h) { rayTracingSM<<<this->dg, this->db, sizeSpheres>>>(ptrDevPixels, w, h, this->ptrDevSpheres, this->nSphere, this->t); } /** * Override * Call periodicly by the API */ void RayTracing::animationStep() { t += dt; } /*--------------*\ |* get *| \*--------------*/ /** * Override */ float RayTracing::getAnimationPara(void) { return t; } /** * Override */ int RayTracing::getW(void) { return w; } /** * Override */ int RayTracing::getH(void) { return h; } /** * Override */ string RayTracing::getTitle(void) { return title; } /*--------------------------------------*\ |* Private *| \*-------------------------------------*/ /*----------------------------------------------------------------------*\ |* End *| \*---------------------------------------------------------------------*/
5473c7ca403c726405abe2cd78df4c2fbe62f505.hip
// !!! This is a file automatically generated by hipify!!! /* Includes, system */ #include <stdio.h> #include <stdlib.h> #include <string.h> #include <time.h> /* Includes, cuda */ #include <rocblas.h> #include <hip/hip_runtime.h> //#include "rocblas.h" /* Number of columns & rows in dictionary */ // TODO: get as input #define M 300 // num of Dictionary columns #define N 50 // num of Dictionary rows #define X 13// number of signals /* Number of non-zero elements in signal */ int K = 4; /* Residual error */ double epsilon = 1.0e-7; /* Max num of iterations - assume as same as num of elements in signal */ int T = N; /* Sign function */ double sign(double x){return (x>=0) - (x<0);} /* Matrix indexing convention */ #define id(m, n, ld) (((n) * (ld) + (m))) int main(int argc, char** argv) { cublasStatus status; double *h_D, *h_X, *h_C, *c; //host memory pointers double *d_D = 0, *d_S = 0, *d_R = 0,*d_G = 0; //device memory pointers int i; int MX = M*X; int NX = M*X; int MN = M*N, m, n, k, q, t; double norm = sqrt(N), normi, normf, a, dtime,ttime = 0; printf("\nDictionary dimensions: N x M = %d x %d, K = %d, Number of Signals = %d", N, M, K, X); /* Initialize srand and clock */ srand(time(NULL)); clock_t start = clock(); /* Initialize cublas */ status = hipblasInit(); if (status != HIPBLAS_STATUS_SUCCESS) { fprintf(stderr,"CUBLAS initialization error\n"); return EXIT_FAILURE; } /* Initialize dictionary on host */ h_D = (double*)malloc(MN * sizeof(h_D[0])); if(h_D == 0){ fprintf(stderr, " host memory allocation error (dictionary)\n"); return EXIT_FAILURE; } for(n = 0; n < N; n++){ for(m = 0; m < M; m++){ a = sign(2.0*rand()/(double)RAND_MAX-1.0)/norm; h_D[id(m, n, M)] = a; } } /* Create X random K-sparse signals */ h_X = (double*)calloc(M*X, sizeof(h_X[0])); // X initialized with zeros if(h_X == 0){ fprintf(stderr, " host memory allocation error (signal)\n"); return EXIT_FAILURE; } for (i = 0;i < X;i++){ for(k = 0; k < K; k++){ a = 2.0*rand()/(double)RAND_MAX - 1.0; h_X[(rand()%M)+i*M] = a;} } /* Allocate solution memory on host */ h_C = (double*)calloc(M*X, sizeof(h_C[0])); if(h_C == 0){ fprintf(stderr, " host memory allocation error (solution)\n"); return EXIT_FAILURE; } c = (double*)calloc(1, sizeof(c)); if(c == 0){ fprintf(stderr, " host memory allocation error (c)\n"); return EXIT_FAILURE; } // start counting Host to Device MemCpy start = clock(); /* Host to device data transfer: dictionary */ status = hipblasAlloc(MN, sizeof(d_D[0]),(void**)&d_D); if(status != HIPBLAS_STATUS_SUCCESS) { fprintf(stderr, " device memory allocation error (dictionary)\n"); return EXIT_FAILURE; } //trasnfer the Host dictionary to Device dictionary status = hipblasSetVector(MN, sizeof(h_D[0]),h_D, 1, d_D, 1); if(status != HIPBLAS_STATUS_SUCCESS) { fprintf(stderr, "! device access error (write dictionary)\n"); return EXIT_FAILURE; }12800 6400 3200 1600 800 400 200 100 50 25 13 /* Host to device data transfer: signal */ status = hipblasAlloc(MX, sizeof(d_R[0]),(void**)&d_R); if(status != HIPBLAS_STATUS_SUCCESS){ fprintf(stderr, "! device memory allocation error (signal)\n"); return EXIT_FAILURE; } status = hipblasSetVector(MX, sizeof(h_X[0]),h_X, 1, d_R, 1); if(status != HIPBLAS_STATUS_SUCCESS){ fprintf(stderr, "! device access error (write signal)\n"); return EXIT_FAILURE; } /*Allocate device memory for Signal Solution */ status = hipblasAlloc(NX, sizeof(d_S[0]),(void**)&d_S); if(status != HIPBLAS_STATUS_SUCCESS){ fprintf(stderr, "! device memory allocation error (projected vector)\n"); return EXIT_FAILURE; } /*Allocate Gram matrix */ status = hipblasAlloc(M*M, sizeof(d_G[0]),(void**)&d_G); if(status != HIPBLAS_STATUS_SUCCESS){ fprintf(stderr, "! device memory allocation error (projected vector)\n"); return EXIT_FAILURE; } hipblasDgemm('n','t', M, M,N, 1.0, d_D, M,d_D, M, 0.0, d_G, M); // stop counting Host to Device MemCpy dtime = ((double)clock()-start)/CLOCKS_PER_SEC; ttime = ttime + dtime; printf("\nTime for Host to Device MemCpy: %f(s)",dtime); /* Encoding the signal on device*/ for (i = 0;i<X;i++) { hipblasDgemv('t', M, N, 1.0, d_D, M,d_R+i*M, 1, 0.0, d_S+i*N, 1); status = hipblasGetError(); if(status != HIPBLAS_STATUS_SUCCESS) { fprintf(stderr, "! kernel execution error (encoding)\n"); return EXIT_FAILURE; } } /* Decoding the signal on device*/ // start counting Encoding for (i = 0;i<X;i++) { normi = hipblasDnrm2 (N, d_S+i*N, 1); epsilon = sqrt(epsilon*normi); normf = normi; t = 0; while(normf > epsilon && t < T){ //printf("\n %f",normf); start = clock(); hipblasDgemv('n', M, N, 1.0, d_D, M,d_S+i*N, 1, 0.0, d_R+i*M, 1); q = hipblasIdamax (M, d_R+i*M, 1) - 1; hipblasGetVector(1, sizeof(c),&d_R[q+i*M], 1, c, 1); h_C[q+i*M] = *c + h_C[q+i*M]; dtime = ((double)clock()-start)/CLOCKS_PER_SEC; ttime = ttime + dtime; hipblasDaxpy (N, -(*c), &d_D[q], M, d_S+i*N, 1); start = clock(); normf = hipblasDnrm2 (N, d_S+i*N, 1); t++; dtime = ((double)clock()-start)/CLOCKS_PER_SEC; ttime = ttime + dtime; } /* status = hipblasGetError(); if(status != HIPBLAS_STATUS_SUCCESS){ fprintf(stderr, "! kernel execution error (decoding)\n"); return EXIT_FAILURE; */ a = 100.0*(normf*normf)/(normi*normi); // printf("\nComputation residual error: %f",a); a=0; q=0; *c=0; epsilon=1.0e-7; } // stop counting Encoding //dtime = ((double)clock()-start)/CLOCKS_PER_SEC; //ttime = ttime + dtime; //printf("\nTime for Host to Device MemCpy: %f(s)",dtime); printf("\n Total time : %f(s) ",ttime); /* Check the solution */ /* printf("\nSolution (first column),Reference (second column):"); getchar(); // Wait for key ... for(m=0; m<M; m++) { printf("\n%f\t%f\t%f\t%f", h_C[m], h_X[m],h_C[m+M],h_X[m+M]); } normi = 0; normf = 0; for(m=0; m<M; m++) { normi = normi + h_X[m]*h_X[m]; normf = normf + (h_C[m] - h_X[m])*(h_C[m] - h_X[m]); } printf("\nSolution residual error:%f", 100.0*normf/normi); */ /* Memory clean up */ free(h_D); free(h_X); free(h_C); status = hipblasFree(d_D); status = hipblasFree(d_S); status = hipblasFree(d_R); if(status != HIPBLAS_STATUS_SUCCESS){ fprintf(stderr,"! device memory free error\n"); return EXIT_FAILURE; } /* Shutdown */ status = hipblasShutdown(); if(status != HIPBLAS_STATUS_SUCCESS){ fprintf(stderr,"! cublas shutdown error\n"); return EXIT_FAILURE; } if(argc<=1 || strcmp(argv[1],"-noprompt")){ printf("\nPress ENTER to exit...\n"); getchar(); } return EXIT_SUCCESS; }
5473c7ca403c726405abe2cd78df4c2fbe62f505.cu
/* Includes, system */ #include <stdio.h> #include <stdlib.h> #include <string.h> #include <time.h> /* Includes, cuda */ #include <cublas.h> #include <cuda_runtime.h> //#include "cublas_v2.h" /* Number of columns & rows in dictionary */ // TODO: get as input #define M 300 // num of Dictionary columns #define N 50 // num of Dictionary rows #define X 13// number of signals /* Number of non-zero elements in signal */ int K = 4; /* Residual error */ double epsilon = 1.0e-7; /* Max num of iterations - assume as same as num of elements in signal */ int T = N; /* Sign function */ double sign(double x){return (x>=0) - (x<0);} /* Matrix indexing convention */ #define id(m, n, ld) (((n) * (ld) + (m))) int main(int argc, char** argv) { cublasStatus status; double *h_D, *h_X, *h_C, *c; //host memory pointers double *d_D = 0, *d_S = 0, *d_R = 0,*d_G = 0; //device memory pointers int i; int MX = M*X; int NX = M*X; int MN = M*N, m, n, k, q, t; double norm = sqrt(N), normi, normf, a, dtime,ttime = 0; printf("\nDictionary dimensions: N x M = %d x %d, K = %d, Number of Signals = %d", N, M, K, X); /* Initialize srand and clock */ srand(time(NULL)); clock_t start = clock(); /* Initialize cublas */ status = cublasInit(); if (status != CUBLAS_STATUS_SUCCESS) { fprintf(stderr,"CUBLAS initialization error\n"); return EXIT_FAILURE; } /* Initialize dictionary on host */ h_D = (double*)malloc(MN * sizeof(h_D[0])); if(h_D == 0){ fprintf(stderr, " host memory allocation error (dictionary)\n"); return EXIT_FAILURE; } for(n = 0; n < N; n++){ for(m = 0; m < M; m++){ a = sign(2.0*rand()/(double)RAND_MAX-1.0)/norm; h_D[id(m, n, M)] = a; } } /* Create X random K-sparse signals */ h_X = (double*)calloc(M*X, sizeof(h_X[0])); // X initialized with zeros if(h_X == 0){ fprintf(stderr, " host memory allocation error (signal)\n"); return EXIT_FAILURE; } for (i = 0;i < X;i++){ for(k = 0; k < K; k++){ a = 2.0*rand()/(double)RAND_MAX - 1.0; h_X[(rand()%M)+i*M] = a;} } /* Allocate solution memory on host */ h_C = (double*)calloc(M*X, sizeof(h_C[0])); if(h_C == 0){ fprintf(stderr, " host memory allocation error (solution)\n"); return EXIT_FAILURE; } c = (double*)calloc(1, sizeof(c)); if(c == 0){ fprintf(stderr, " host memory allocation error (c)\n"); return EXIT_FAILURE; } // start counting Host to Device MemCpy start = clock(); /* Host to device data transfer: dictionary */ status = cublasAlloc(MN, sizeof(d_D[0]),(void**)&d_D); if(status != CUBLAS_STATUS_SUCCESS) { fprintf(stderr, " device memory allocation error (dictionary)\n"); return EXIT_FAILURE; } //trasnfer the Host dictionary to Device dictionary status = cublasSetVector(MN, sizeof(h_D[0]),h_D, 1, d_D, 1); if(status != CUBLAS_STATUS_SUCCESS) { fprintf(stderr, "! device access error (write dictionary)\n"); return EXIT_FAILURE; }12800 6400 3200 1600 800 400 200 100 50 25 13 /* Host to device data transfer: signal */ status = cublasAlloc(MX, sizeof(d_R[0]),(void**)&d_R); if(status != CUBLAS_STATUS_SUCCESS){ fprintf(stderr, "! device memory allocation error (signal)\n"); return EXIT_FAILURE; } status = cublasSetVector(MX, sizeof(h_X[0]),h_X, 1, d_R, 1); if(status != CUBLAS_STATUS_SUCCESS){ fprintf(stderr, "! device access error (write signal)\n"); return EXIT_FAILURE; } /*Allocate device memory for Signal Solution */ status = cublasAlloc(NX, sizeof(d_S[0]),(void**)&d_S); if(status != CUBLAS_STATUS_SUCCESS){ fprintf(stderr, "! device memory allocation error (projected vector)\n"); return EXIT_FAILURE; } /*Allocate Gram matrix */ status = cublasAlloc(M*M, sizeof(d_G[0]),(void**)&d_G); if(status != CUBLAS_STATUS_SUCCESS){ fprintf(stderr, "! device memory allocation error (projected vector)\n"); return EXIT_FAILURE; } cublasDgemm('n','t', M, M,N, 1.0, d_D, M,d_D, M, 0.0, d_G, M); // stop counting Host to Device MemCpy dtime = ((double)clock()-start)/CLOCKS_PER_SEC; ttime = ttime + dtime; printf("\nTime for Host to Device MemCpy: %f(s)",dtime); /* Encoding the signal on device*/ for (i = 0;i<X;i++) { cublasDgemv('t', M, N, 1.0, d_D, M,d_R+i*M, 1, 0.0, d_S+i*N, 1); status = cublasGetError(); if(status != CUBLAS_STATUS_SUCCESS) { fprintf(stderr, "! kernel execution error (encoding)\n"); return EXIT_FAILURE; } } /* Decoding the signal on device*/ // start counting Encoding for (i = 0;i<X;i++) { normi = cublasDnrm2 (N, d_S+i*N, 1); epsilon = sqrt(epsilon*normi); normf = normi; t = 0; while(normf > epsilon && t < T){ //printf("\n %f",normf); start = clock(); cublasDgemv('n', M, N, 1.0, d_D, M,d_S+i*N, 1, 0.0, d_R+i*M, 1); q = cublasIdamax (M, d_R+i*M, 1) - 1; cublasGetVector(1, sizeof(c),&d_R[q+i*M], 1, c, 1); h_C[q+i*M] = *c + h_C[q+i*M]; dtime = ((double)clock()-start)/CLOCKS_PER_SEC; ttime = ttime + dtime; cublasDaxpy (N, -(*c), &d_D[q], M, d_S+i*N, 1); start = clock(); normf = cublasDnrm2 (N, d_S+i*N, 1); t++; dtime = ((double)clock()-start)/CLOCKS_PER_SEC; ttime = ttime + dtime; } /* status = cublasGetError(); if(status != CUBLAS_STATUS_SUCCESS){ fprintf(stderr, "! kernel execution error (decoding)\n"); return EXIT_FAILURE; */ a = 100.0*(normf*normf)/(normi*normi); // printf("\nComputation residual error: %f",a); a=0; q=0; *c=0; epsilon=1.0e-7; } // stop counting Encoding //dtime = ((double)clock()-start)/CLOCKS_PER_SEC; //ttime = ttime + dtime; //printf("\nTime for Host to Device MemCpy: %f(s)",dtime); printf("\n Total time : %f(s) ",ttime); /* Check the solution */ /* printf("\nSolution (first column),Reference (second column):"); getchar(); // Wait for key ... for(m=0; m<M; m++) { printf("\n%f\t%f\t%f\t%f", h_C[m], h_X[m],h_C[m+M],h_X[m+M]); } normi = 0; normf = 0; for(m=0; m<M; m++) { normi = normi + h_X[m]*h_X[m]; normf = normf + (h_C[m] - h_X[m])*(h_C[m] - h_X[m]); } printf("\nSolution residual error:%f", 100.0*normf/normi); */ /* Memory clean up */ free(h_D); free(h_X); free(h_C); status = cublasFree(d_D); status = cublasFree(d_S); status = cublasFree(d_R); if(status != CUBLAS_STATUS_SUCCESS){ fprintf(stderr,"! device memory free error\n"); return EXIT_FAILURE; } /* Shutdown */ status = cublasShutdown(); if(status != CUBLAS_STATUS_SUCCESS){ fprintf(stderr,"! cublas shutdown error\n"); return EXIT_FAILURE; } if(argc<=1 || strcmp(argv[1],"-noprompt")){ printf("\nPress ENTER to exit...\n"); getchar(); } return EXIT_SUCCESS; }
55420b5a96a89d86a16d9bf960c19f2d19a0b4c0.hip
// !!! This is a file automatically generated by hipify!!! /** * gemm.cu: This file is part of the PolyBench/GPU 1.0 test suite. * * * Contact: Scott Grauer-Gray <[email protected]> * Louis-Noel Pouchet <[email protected]> * Web address: http://www.cse.ohio-state.edu/~pouchet/software/polybench/GPU */ #include <unistd.h> #include <stdio.h> #include <time.h> #include <sys/time.h> #include <stdlib.h> #include <stdarg.h> #include <string.h> #include <hip/hip_runtime.h> #include "polybenchUtilFuncts.h" #define GPU_DEVICE 0 //define the error threshold for the results "not matching" #define PERCENT_DIFF_ERROR_THRESHOLD 0.05 /* Problem size */ #define NI 512 #define NJ 512 #define NK 512 /* Thread block dimensions */ #define DIM_THREAD_BLOCK_X 32 #define DIM_THREAD_BLOCK_Y 8 /* Declared constant values for ALPHA and BETA (same as values in PolyBench 2.0) */ #define ALPHA 32412.0f #define BETA 2123.0f /* Can switch DATA_TYPE between float and double */ typedef float DATA_TYPE; void gemm(DATA_TYPE *A, DATA_TYPE *B, DATA_TYPE *C) { int i,j,k; for (i = 0; i < NI; i++) { for (j = 0; j < NJ; j++) { C[i*NJ + j] *= BETA; for (k = 0; k < NK; ++k) { C[i*NJ + j] += ALPHA * A[i*NK + k] * B[k*NJ + j]; } } } } void init(DATA_TYPE *A, DATA_TYPE *B, DATA_TYPE *C, DATA_TYPE *A_gpu, DATA_TYPE *B_gpu, DATA_TYPE *C_gpu) { int i, j; for (i = 0; i < NI; i++) { for (j = 0; j < NK; j++) { A[i*NK + j] = ((DATA_TYPE) i*j) / NI; A_gpu[i*NK + j] = ((DATA_TYPE) i*j) / NI; } } for (i = 0; i < NK; i++) { for (j = 0; j < NJ; j++) { B[i*NJ + j] = ((DATA_TYPE) i*j + 1) / NJ; B_gpu[i*NJ + j] = ((DATA_TYPE) i*j + 1) / NJ; } } for (i = 0; i < NI; i++) { for (j = 0; j < NJ; j++) { C[i*NJ + j] = ((DATA_TYPE) i*j + 2) / NJ; C_gpu[i*NJ + j] = ((DATA_TYPE) i*j + 2) / NJ; } } } void compareResults(DATA_TYPE* C, DATA_TYPE* C_outputFromGpu) { int i, j, fail; fail = 0; // Compare C1 and C2 for (i=0; i < NI; i++) { for (j=0; j < NJ; j++) { if (percentDiff(C[i*NJ + j], C_outputFromGpu[i*NJ + j]) > PERCENT_DIFF_ERROR_THRESHOLD) { fail++; } } } // Print results printf("Non-Matching CPU-GPU Outputs Beyond Error Threshold of %4.2f Percent: %d\n", PERCENT_DIFF_ERROR_THRESHOLD, fail); } void GPU_argv_init() { hipDeviceProp_t deviceProp; hipGetDeviceProperties(&deviceProp, GPU_DEVICE); printf("setting device %d with name %s\n",GPU_DEVICE,deviceProp.name); hipSetDevice( GPU_DEVICE ); } __global__ void gemm_kernel(DATA_TYPE *a, DATA_TYPE *b, DATA_TYPE *c) { int j = blockIdx.x * blockDim.x + threadIdx.x; int i = blockIdx.y * blockDim.y + threadIdx.y; if ((i < NI) && (j < NJ)) { c[i * NJ + j] *= BETA; int k; for(k=0; k < NK; k++) { c[i * NJ + j] += ALPHA * a[i * NK + k] * b[k * NJ +j]; } } } void gemmCuda(DATA_TYPE* A_gpu, DATA_TYPE* B_gpu, DATA_TYPE* C_gpu) { double t_start, t_end; dim3 block(DIM_THREAD_BLOCK_X, DIM_THREAD_BLOCK_Y); dim3 grid((size_t)(ceil( ((float)NI)/ ((float)block.x) )),(size_t)(ceil( ((float)NJ)/ ((float)block.y) ))); t_start = rtclock(); hipLaunchKernelGGL(( gemm_kernel), dim3(grid), dim3(block) , 0, 0, A_gpu, B_gpu, C_gpu); hipDeviceSynchronize(); t_end = rtclock(); fprintf(stdout, "GPU Runtime: %0.6lfs\n", t_end - t_start); } int main(int argc, char *argv[]) { double t_start, t_end; DATA_TYPE* A; DATA_TYPE* B; DATA_TYPE* C; DATA_TYPE *A_gpu; DATA_TYPE *B_gpu; DATA_TYPE *C_gpu; A = (DATA_TYPE*)malloc(NI*NK*sizeof(DATA_TYPE)); B = (DATA_TYPE*)malloc(NK*NJ*sizeof(DATA_TYPE)); C = (DATA_TYPE*)malloc(NI*NJ*sizeof(DATA_TYPE)); hipMallocManaged(&A_gpu, sizeof(DATA_TYPE) * NI * NK); hipMallocManaged(&B_gpu, sizeof(DATA_TYPE) * NK * NJ); hipMallocManaged(&C_gpu, sizeof(DATA_TYPE) * NI * NJ); init(A, B, C, A_gpu, B_gpu, C_gpu); GPU_argv_init(); gemmCuda(A_gpu, B_gpu, C_gpu); t_start = rtclock(); gemm(A, B, C); t_end = rtclock(); fprintf(stdout, "CPU Runtime: %0.6lfs\n", t_end - t_start); compareResults(C, C_gpu); free(A); free(B); free(C); hipFree(A_gpu); hipFree(B_gpu); hipFree(C_gpu); return 0; }
55420b5a96a89d86a16d9bf960c19f2d19a0b4c0.cu
/** * gemm.cu: This file is part of the PolyBench/GPU 1.0 test suite. * * * Contact: Scott Grauer-Gray <[email protected]> * Louis-Noel Pouchet <[email protected]> * Web address: http://www.cse.ohio-state.edu/~pouchet/software/polybench/GPU */ #include <unistd.h> #include <stdio.h> #include <time.h> #include <sys/time.h> #include <stdlib.h> #include <stdarg.h> #include <string.h> #include <cuda.h> #include "polybenchUtilFuncts.h" #define GPU_DEVICE 0 //define the error threshold for the results "not matching" #define PERCENT_DIFF_ERROR_THRESHOLD 0.05 /* Problem size */ #define NI 512 #define NJ 512 #define NK 512 /* Thread block dimensions */ #define DIM_THREAD_BLOCK_X 32 #define DIM_THREAD_BLOCK_Y 8 /* Declared constant values for ALPHA and BETA (same as values in PolyBench 2.0) */ #define ALPHA 32412.0f #define BETA 2123.0f /* Can switch DATA_TYPE between float and double */ typedef float DATA_TYPE; void gemm(DATA_TYPE *A, DATA_TYPE *B, DATA_TYPE *C) { int i,j,k; for (i = 0; i < NI; i++) { for (j = 0; j < NJ; j++) { C[i*NJ + j] *= BETA; for (k = 0; k < NK; ++k) { C[i*NJ + j] += ALPHA * A[i*NK + k] * B[k*NJ + j]; } } } } void init(DATA_TYPE *A, DATA_TYPE *B, DATA_TYPE *C, DATA_TYPE *A_gpu, DATA_TYPE *B_gpu, DATA_TYPE *C_gpu) { int i, j; for (i = 0; i < NI; i++) { for (j = 0; j < NK; j++) { A[i*NK + j] = ((DATA_TYPE) i*j) / NI; A_gpu[i*NK + j] = ((DATA_TYPE) i*j) / NI; } } for (i = 0; i < NK; i++) { for (j = 0; j < NJ; j++) { B[i*NJ + j] = ((DATA_TYPE) i*j + 1) / NJ; B_gpu[i*NJ + j] = ((DATA_TYPE) i*j + 1) / NJ; } } for (i = 0; i < NI; i++) { for (j = 0; j < NJ; j++) { C[i*NJ + j] = ((DATA_TYPE) i*j + 2) / NJ; C_gpu[i*NJ + j] = ((DATA_TYPE) i*j + 2) / NJ; } } } void compareResults(DATA_TYPE* C, DATA_TYPE* C_outputFromGpu) { int i, j, fail; fail = 0; // Compare C1 and C2 for (i=0; i < NI; i++) { for (j=0; j < NJ; j++) { if (percentDiff(C[i*NJ + j], C_outputFromGpu[i*NJ + j]) > PERCENT_DIFF_ERROR_THRESHOLD) { fail++; } } } // Print results printf("Non-Matching CPU-GPU Outputs Beyond Error Threshold of %4.2f Percent: %d\n", PERCENT_DIFF_ERROR_THRESHOLD, fail); } void GPU_argv_init() { cudaDeviceProp deviceProp; cudaGetDeviceProperties(&deviceProp, GPU_DEVICE); printf("setting device %d with name %s\n",GPU_DEVICE,deviceProp.name); cudaSetDevice( GPU_DEVICE ); } __global__ void gemm_kernel(DATA_TYPE *a, DATA_TYPE *b, DATA_TYPE *c) { int j = blockIdx.x * blockDim.x + threadIdx.x; int i = blockIdx.y * blockDim.y + threadIdx.y; if ((i < NI) && (j < NJ)) { c[i * NJ + j] *= BETA; int k; for(k=0; k < NK; k++) { c[i * NJ + j] += ALPHA * a[i * NK + k] * b[k * NJ +j]; } } } void gemmCuda(DATA_TYPE* A_gpu, DATA_TYPE* B_gpu, DATA_TYPE* C_gpu) { double t_start, t_end; dim3 block(DIM_THREAD_BLOCK_X, DIM_THREAD_BLOCK_Y); dim3 grid((size_t)(ceil( ((float)NI)/ ((float)block.x) )),(size_t)(ceil( ((float)NJ)/ ((float)block.y) ))); t_start = rtclock(); gemm_kernel<<< grid, block >>>(A_gpu, B_gpu, C_gpu); cudaDeviceSynchronize(); t_end = rtclock(); fprintf(stdout, "GPU Runtime: %0.6lfs\n", t_end - t_start); } int main(int argc, char *argv[]) { double t_start, t_end; DATA_TYPE* A; DATA_TYPE* B; DATA_TYPE* C; DATA_TYPE *A_gpu; DATA_TYPE *B_gpu; DATA_TYPE *C_gpu; A = (DATA_TYPE*)malloc(NI*NK*sizeof(DATA_TYPE)); B = (DATA_TYPE*)malloc(NK*NJ*sizeof(DATA_TYPE)); C = (DATA_TYPE*)malloc(NI*NJ*sizeof(DATA_TYPE)); cudaMallocManaged(&A_gpu, sizeof(DATA_TYPE) * NI * NK); cudaMallocManaged(&B_gpu, sizeof(DATA_TYPE) * NK * NJ); cudaMallocManaged(&C_gpu, sizeof(DATA_TYPE) * NI * NJ); init(A, B, C, A_gpu, B_gpu, C_gpu); GPU_argv_init(); gemmCuda(A_gpu, B_gpu, C_gpu); t_start = rtclock(); gemm(A, B, C); t_end = rtclock(); fprintf(stdout, "CPU Runtime: %0.6lfs\n", t_end - t_start); compareResults(C, C_gpu); free(A); free(B); free(C); cudaFree(A_gpu); cudaFree(B_gpu); cudaFree(C_gpu); return 0; }
944b7412cd1b9d4b5361ad786aab07ddd3822be2.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include <vector> #include "paddle/fluid/operators/detection/nms_op.h" #include "paddle/fluid/platform/device/gpu/gpu_primitives.h" static const int64_t threadsPerBlock = sizeof(int64_t) * 8; namespace paddle { namespace operators { using framework::Tensor; template <typename T> static __global__ void NMS(const T* boxes_data, float threshold, int64_t num_boxes, uint64_t* masks) { auto raw_start = blockIdx.y; auto col_start = blockIdx.x; if (raw_start > col_start) return; const int raw_last_storage = min(num_boxes - raw_start * threadsPerBlock, threadsPerBlock); const int col_last_storage = min(num_boxes - col_start * threadsPerBlock, threadsPerBlock); if (threadIdx.x < raw_last_storage) { uint64_t mask = 0; auto current_box_idx = raw_start * threadsPerBlock + threadIdx.x; const T* current_box = boxes_data + current_box_idx * 4; for (int i = 0; i < col_last_storage; ++i) { const T* target_box = boxes_data + (col_start * threadsPerBlock + i) * 4; if (CalculateIoU<T>(current_box, target_box, threshold)) { mask |= 1ULL << i; } } const int blocks_per_line = CeilDivide(num_boxes, threadsPerBlock); masks[current_box_idx * blocks_per_line + col_start] = mask; } } template <typename T> class NMSCudaKernel : public framework::OpKernel<T> { public: void Compute(const framework::ExecutionContext& context) const override { const Tensor* boxes = context.Input<Tensor>("Boxes"); Tensor* output = context.Output<Tensor>("KeepBoxesIdxs"); auto* output_data = output->mutable_data<int64_t>(context.GetPlace()); auto threshold = context.template Attr<float>("iou_threshold"); const int64_t num_boxes = boxes->dims()[0]; const auto blocks_per_line = CeilDivide(num_boxes, threadsPerBlock); dim3 block(threadsPerBlock); dim3 grid(blocks_per_line, blocks_per_line); auto mask_data = memory::Alloc(context.cuda_device_context(), num_boxes * blocks_per_line * sizeof(uint64_t)); uint64_t* mask_dev = reinterpret_cast<uint64_t*>(mask_data->ptr()); hipLaunchKernelGGL(( NMS<T>), dim3(grid), dim3(block), 0, context.cuda_device_context().stream(), boxes->data<T>(), threshold, num_boxes, mask_dev); std::vector<uint64_t> mask_host(num_boxes * blocks_per_line); memory::Copy(platform::CPUPlace(), mask_host.data(), context.GetPlace(), mask_dev, num_boxes * blocks_per_line * sizeof(uint64_t), context.cuda_device_context().stream()); std::vector<int64_t> remv(blocks_per_line); std::vector<int64_t> keep_boxes_idxs(num_boxes); int64_t* output_host = keep_boxes_idxs.data(); int64_t last_box_num = 0; for (int64_t i = 0; i < num_boxes; ++i) { auto remv_element_id = i / threadsPerBlock; auto remv_bit_id = i % threadsPerBlock; if (!(remv[remv_element_id] & 1ULL << remv_bit_id)) { output_host[last_box_num++] = i; uint64_t* current_mask = mask_host.data() + i * blocks_per_line; for (auto j = remv_element_id; j < blocks_per_line; ++j) { remv[j] |= current_mask[j]; } } } memory::Copy(context.GetPlace(), output_data, platform::CPUPlace(), output_host, sizeof(int64_t) * num_boxes, context.cuda_device_context().stream()); } }; } // namespace operators } // namespace paddle namespace ops = paddle::operators; REGISTER_OP_CUDA_KERNEL(nms, ops::NMSCudaKernel<float>, ops::NMSCudaKernel<double>);
944b7412cd1b9d4b5361ad786aab07ddd3822be2.cu
/* Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include <vector> #include "paddle/fluid/operators/detection/nms_op.h" #include "paddle/fluid/platform/device/gpu/gpu_primitives.h" static const int64_t threadsPerBlock = sizeof(int64_t) * 8; namespace paddle { namespace operators { using framework::Tensor; template <typename T> static __global__ void NMS(const T* boxes_data, float threshold, int64_t num_boxes, uint64_t* masks) { auto raw_start = blockIdx.y; auto col_start = blockIdx.x; if (raw_start > col_start) return; const int raw_last_storage = min(num_boxes - raw_start * threadsPerBlock, threadsPerBlock); const int col_last_storage = min(num_boxes - col_start * threadsPerBlock, threadsPerBlock); if (threadIdx.x < raw_last_storage) { uint64_t mask = 0; auto current_box_idx = raw_start * threadsPerBlock + threadIdx.x; const T* current_box = boxes_data + current_box_idx * 4; for (int i = 0; i < col_last_storage; ++i) { const T* target_box = boxes_data + (col_start * threadsPerBlock + i) * 4; if (CalculateIoU<T>(current_box, target_box, threshold)) { mask |= 1ULL << i; } } const int blocks_per_line = CeilDivide(num_boxes, threadsPerBlock); masks[current_box_idx * blocks_per_line + col_start] = mask; } } template <typename T> class NMSCudaKernel : public framework::OpKernel<T> { public: void Compute(const framework::ExecutionContext& context) const override { const Tensor* boxes = context.Input<Tensor>("Boxes"); Tensor* output = context.Output<Tensor>("KeepBoxesIdxs"); auto* output_data = output->mutable_data<int64_t>(context.GetPlace()); auto threshold = context.template Attr<float>("iou_threshold"); const int64_t num_boxes = boxes->dims()[0]; const auto blocks_per_line = CeilDivide(num_boxes, threadsPerBlock); dim3 block(threadsPerBlock); dim3 grid(blocks_per_line, blocks_per_line); auto mask_data = memory::Alloc(context.cuda_device_context(), num_boxes * blocks_per_line * sizeof(uint64_t)); uint64_t* mask_dev = reinterpret_cast<uint64_t*>(mask_data->ptr()); NMS<T><<<grid, block, 0, context.cuda_device_context().stream()>>>( boxes->data<T>(), threshold, num_boxes, mask_dev); std::vector<uint64_t> mask_host(num_boxes * blocks_per_line); memory::Copy(platform::CPUPlace(), mask_host.data(), context.GetPlace(), mask_dev, num_boxes * blocks_per_line * sizeof(uint64_t), context.cuda_device_context().stream()); std::vector<int64_t> remv(blocks_per_line); std::vector<int64_t> keep_boxes_idxs(num_boxes); int64_t* output_host = keep_boxes_idxs.data(); int64_t last_box_num = 0; for (int64_t i = 0; i < num_boxes; ++i) { auto remv_element_id = i / threadsPerBlock; auto remv_bit_id = i % threadsPerBlock; if (!(remv[remv_element_id] & 1ULL << remv_bit_id)) { output_host[last_box_num++] = i; uint64_t* current_mask = mask_host.data() + i * blocks_per_line; for (auto j = remv_element_id; j < blocks_per_line; ++j) { remv[j] |= current_mask[j]; } } } memory::Copy(context.GetPlace(), output_data, platform::CPUPlace(), output_host, sizeof(int64_t) * num_boxes, context.cuda_device_context().stream()); } }; } // namespace operators } // namespace paddle namespace ops = paddle::operators; REGISTER_OP_CUDA_KERNEL(nms, ops::NMSCudaKernel<float>, ops::NMSCudaKernel<double>);
1fb85864db2349f6e4e26b8b615c3988ea5b6e81.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" //scan.cu //#include "kernel.hip" #include "comm.h" #include "wtime.h" #include "iostream" #define max_thd 256 #define max_block 256 #define thread_limit 256 #define block_limit 1024 #define GPU_COWORKER 1 #define warp_thd 256 #define block_thd 256 graph * mygraph; //#define GPU_NUM 4 //int NUM_ONE_TASK; __global__ void block_binary_kernel ( vertex_t* head, vertex_t* adj, vertex_t* adj_list, // index_t* degree, index_t* begin, index_t Ns, index_t Ne, index_t* count ) { //phase 1, partition index_t tid = Ns + (threadIdx.x + blockIdx.x * blockDim.x)/ block_thd; int i = threadIdx.x% block_thd; index_t mycount=0; // __shared__ int cache[256]; // __shared__ int offset[256]; __shared__ index_t local[block_thd]; while(tid<Ne){ vertex_t A = head[tid]; vertex_t B = adj[tid]; index_t m = begin[A+1]-begin[A];//degree[A]; index_t n = begin[B+1]-begin[B];//degree[B]; index_t temp; if(m<n){ temp = A; A = B; B = temp; temp = m; m = n; n = temp; } vertex_t* a = &(adj_list[begin[A]]); vertex_t* b = &(adj_list[begin[B]]); //initial cache local[i]=a[i*m/block_thd]; __syncthreads(); //search int j=i; while(j<n){ vertex_t X = b[j]; vertex_t Y; //phase 1: cache int bot = 0; int top = block_thd; int r; while(top>bot+1){ r = (top+bot)/2; Y = local[r]; if(X==Y){ mycount++; bot = top + block_thd; } if(X<Y){ top = r; } if(X>Y){ bot = r; } } //phase 2 // bot = bot*k; // top = top*k; bot = bot*m/block_thd; top = top*m/block_thd -1; while(top>=bot){ r = (top+bot)/2; Y = a[r]; if(X==Y){ mycount++; } if(X<=Y){ top = r-1; } if(X>=Y){ bot = r+1; } } j += block_thd; } tid += GPU_COWORKER * gridDim.x*blockDim.x/ block_thd; __syncthreads(); } //reduce __syncthreads(); local[threadIdx.x] = mycount; __syncthreads(); if(threadIdx.x==0){ index_t val=0; for(int i=0; i<blockDim.x; i++){ val+= local[i]; } count[blockIdx.x]+=val; // count[blockIdx.x]=val; } } __global__ void warp_binary_kernel ( vertex_t* head, vertex_t* adj, vertex_t* adj_list, // index_t* degree, index_t* begin, index_t Ns, index_t Ne, index_t* count ) { //phase 1, partition index_t tid = (threadIdx.x + blockIdx.x * blockDim.x)/32 + Ns; index_t mycount=0; __shared__ index_t local[warp_thd]; int i = threadIdx.x%32; int p = threadIdx.x/32; while(tid<Ne){ vertex_t A = head[tid]; vertex_t B = adj[tid]; index_t m = begin[A+1]-begin[A];//degree[A]; index_t n = begin[B+1]-begin[B];//degree[B]; index_t temp; if(m<n){ temp = A; A = B; B = temp; temp = m; m = n; n = temp; } vertex_t* a = &(adj_list[begin[A]]); vertex_t* b = &(adj_list[begin[B]]); //initial cache local[p*32+i]=a[i*m/32]; __syncthreads(); //search int j=i; while(j<n){ vertex_t X = b[j]; vertex_t Y; //phase 1: cache int bot = 0; int top = 32; int r; while(top>bot+1){ r = (top+bot)/2; Y = local[p*32+r]; if(X==Y){ mycount++; bot = top + 32; } if(X<Y){ top = r; } if(X>Y){ bot = r; } } //phase 2 // bot = bot*k; // top = top*k; bot = bot*m/32; top = top*m/32 -1; while(top>=bot){ r = (top+bot)/2; Y = a[r]; if(X==Y){ mycount++; } if(X<=Y){ top = r-1; } if(X>=Y){ bot = r+1; } } j += 32; } tid += GPU_COWORKER* blockDim.x*gridDim.x/32; __syncthreads(); } __syncthreads(); //reduce local[threadIdx.x] = mycount; __syncthreads(); if(threadIdx.x==0){ index_t val=0; for(int i=0; i<blockDim.x; i++){ val+= local[i]; } count[blockIdx.x]=val; } __syncthreads(); } //---------------------------------------------------------------------------------------- __global__ void classify_kernel //step 1: classify the edge list into different arrays ( vertex_t* adj_list, vertex_t* head_list, index_t* begin, index_t N, //inputs index_t* small_num, index_t* mid_num, index_t* large_num //outputs: small/large head, adjacent, and number by thread ) { int tid = threadIdx.x +blockIdx.x*blockDim.x; index_t bin_size = (N-1)/(blockDim.x*gridDim.x)+1; index_t thd_base = tid*bin_size; //start point of threads space index_t small_offset=0; index_t mid_offset=0; index_t large_offset=0; //temp variables vertex_t head; vertex_t adj; index_t m; index_t n; for(index_t i=0; i<bin_size; i++){ index_t id = thd_base + i; if(id<N){ head = head_list[id]; adj = adj_list[id]; m = begin[head+1]-begin[head];//degree[head]; n = begin[adj+1]-begin[adj];//degree[adj]; if(m<n){ n=m; } if(n<thread_limit){ small_offset++; } else if(n<block_limit){ //could be more then 2 catigories // else{ mid_offset++; } else { //could be more then 2 catigories large_offset++; } } } small_num[tid] = small_offset; mid_num[tid] = mid_offset; large_num[tid] = large_offset; } __global__ void prefix_kernel_1 //this prefix scan function could be easier for data size is always 256*256 ( index_t* data, index_t* block_offset ) { //step 1: each block do prefix sum inside int tid = threadIdx.x +blockIdx.x*blockDim.x; __shared__ index_t temp_in[256]; temp_in[threadIdx.x] = data[tid]; __syncthreads(); index_t val=0; for(int i=0; i<=threadIdx.x; i++){ val += temp_in[i]; } __syncthreads(); if(threadIdx.x==255){ block_offset[blockIdx.x] = val; } data[tid] = val; __syncthreads(); } __global__ void prefix_kernel_2 ( index_t* block_offset ) { //step 2: collect each block's offset and do prefix for this set __shared__ index_t temp_in[256]; temp_in[threadIdx.x] = block_offset[threadIdx.x]; __syncthreads(); index_t val=0; for(int i=0; i<threadIdx.x; i++){ val += temp_in[i]; } // val = temp_in[threadIdx.x]; block_offset[threadIdx.x] = val; __syncthreads(); } __global__ void prefix_kernel_3 ( index_t* data, index_t* block_offset ) { //step 3: update by adding block offset int tid = threadIdx.x + blockIdx.x*blockDim.x; index_t val = data[tid]; index_t offset = block_offset[blockIdx.x]; val += offset; data[tid] = val; __syncthreads(); } __global__ void collect_kernel ( vertex_t* adj_list, vertex_t* head_list, index_t* begin, index_t N, index_t* small_num, index_t* mid_num, index_t* large_num, index_t N1, index_t N2, vertex_t* dest_head, vertex_t* dest_adj ) { int tid = threadIdx.x +blockIdx.x*blockDim.x; index_t bin_size = (N-1)/(blockDim.x*gridDim.x)+1; index_t thd_base = tid*bin_size; //start point of threads space index_t thd_base_small = 0; index_t thd_base_mid = N1; index_t thd_base_large = N1+N2; if(tid!=0){ thd_base_small = small_num[tid-1]; thd_base_mid = N1 + mid_num[tid-1]; thd_base_large = N1 + N2 + large_num[tid-1]; } //temp variables vertex_t head; vertex_t adj; index_t m; index_t n; index_t small_offset = thd_base_small; index_t mid_offset = thd_base_mid; index_t large_offset = thd_base_large; for(index_t i=0; i<bin_size; i++){ index_t id = thd_base + i; if(id<N){ head = head_list[id]; adj = adj_list[id]; m = begin[head+1]-begin[head];//degree[head]; n = begin[adj+1]-begin[adj];//degree[adj]; if(m<n){ n=m; } if(n<thread_limit){ dest_head[small_offset] = head; dest_adj [small_offset] = adj; small_offset++; } else if(n<block_limit){ //could be more then 2 catigories // else{ dest_head[mid_offset] = head; dest_adj [mid_offset] = adj; mid_offset++; } else { //could be more then 2 catigories dest_head[large_offset] = head; dest_adj [large_offset] = adj; large_offset++; } } } } __global__ void reduce_kernel2(index_t* count) { index_t val = 0; for(int i=0; i<max_block; i++){ val += count[i]; } count[0] = val; } //---------------------------------------- cpu function-------------------- //------------------------------------------------------------------ void* part_scan(void * data){ index_t thd_count=0; int GPU_id = *(int*)data; int i = GPU_id; // cout<<"GPU id = "<<GPU_id<<"\n"; hipSetDevice(GPU_id); H_ERR(hipDeviceSynchronize() ); vertex_t* dev_adj; vertex_t* dev_head; // index_t* dev_degree; index_t* dev_begin; index_t* dev_count; index_t partEdgeCount = mygraph->partEdgeCount[i]; vertex_t vert_count = mygraph->vert_count; vertex_t* partAdj = mygraph->partAdj[i]; vertex_t* partHead= mygraph->partHead[i]; // index_t* partDegree = mygraph->partDegree[i]; index_t* partBegin = mygraph->partBegin[i]; index_t* count = mygraph->count; H_ERR(hipMalloc(&dev_adj, partEdgeCount*sizeof(vertex_t)) ); H_ERR(hipMalloc(&dev_head, partEdgeCount*sizeof(vertex_t)) ); // H_ERR(hipMalloc(&dev_degree, vert_count*sizeof(index_t)) ); H_ERR(hipMalloc(&dev_begin, vert_count*sizeof(index_t)) ); H_ERR(hipMalloc(&dev_count, max_block*sizeof(index_t)) ); index_t* block_offset; H_ERR(hipMalloc(&block_offset, max_block*sizeof(index_t)) ); H_ERR(hipMemcpy(dev_adj, partAdj, partEdgeCount*sizeof(vertex_t), hipMemcpyHostToDevice) ); H_ERR(hipMemcpy(dev_head, partHead, partEdgeCount*sizeof(vertex_t), hipMemcpyHostToDevice) ); // H_ERR(hipMemcpy(dev_degree, partDegree, vert_count*sizeof(index_t), hipMemcpyHostToDevice) ); H_ERR(hipMemcpy(dev_begin, partBegin, vert_count*sizeof(index_t), hipMemcpyHostToDevice) ); for(int j=0; j<PART_NUM; j++){ index_t totalEdgeCount = mygraph->partEdgeCount[j]; vertex_t* head = mygraph->partHead[j]; vertex_t* adj = mygraph->partAdj[j]; // index_t* degree = mygraph->partDegree[j]; vertex_t* classified_head; vertex_t* classified_adj; index_t* small_num; index_t* mid_num; index_t* large_num; vertex_t* src_head; vertex_t* src_adj; // index_t* src_degree; H_ERR(hipMalloc(&small_num, max_thd*max_block*sizeof(index_t)) ); H_ERR(hipMalloc(&mid_num, max_thd*max_block*sizeof(index_t)) ); H_ERR(hipMalloc(&large_num, max_thd*max_block*sizeof(index_t)) ); H_ERR(hipMalloc(&src_head, totalEdgeCount*sizeof(vertex_t)) ); H_ERR(hipMalloc(&src_adj, totalEdgeCount*sizeof(vertex_t)) ); // H_ERR(hipMalloc(&src_degree, vert_count*sizeof(index_t)) ); H_ERR(hipMemcpy(src_adj, adj, totalEdgeCount*sizeof(vertex_t), hipMemcpyHostToDevice) ); H_ERR(hipMemcpy(src_head, head, totalEdgeCount*sizeof(vertex_t), hipMemcpyHostToDevice) ); // H_ERR(hipMemcpy(src_degree, degree, vert_count*sizeof(index_t), hipMemcpyHostToDevice) ); H_ERR(hipMalloc(&classified_head, totalEdgeCount*sizeof(vertex_t)) ); H_ERR(hipMalloc(&classified_adj, totalEdgeCount*sizeof(vertex_t)) ); // double time1=wtime(); H_ERR(hipDeviceSynchronize() ); hipLaunchKernelGGL(( classify_kernel) , dim3(max_block),dim3(max_thd), 0, 0, src_adj, src_head, dev_begin, totalEdgeCount, small_num, mid_num, large_num ); H_ERR(hipDeviceSynchronize() ); //test for prefix sum hipLaunchKernelGGL(( prefix_kernel_1) , dim3(max_block),dim3(max_thd), 0, 0, small_num, block_offset); H_ERR(hipDeviceSynchronize() ); hipLaunchKernelGGL(( prefix_kernel_2) , dim3(1),dim3(max_thd), 0, 0, block_offset); H_ERR(hipDeviceSynchronize() ); hipLaunchKernelGGL(( prefix_kernel_3) , dim3(max_block),dim3(max_thd), 0, 0, small_num, block_offset); H_ERR(hipDeviceSynchronize() ); hipLaunchKernelGGL(( prefix_kernel_1) , dim3(max_block),dim3(max_thd), 0, 0, mid_num, block_offset); H_ERR(hipDeviceSynchronize() ); hipLaunchKernelGGL(( prefix_kernel_2) , dim3(1),dim3(max_thd), 0, 0, block_offset); H_ERR(hipDeviceSynchronize() ); hipLaunchKernelGGL(( prefix_kernel_3) , dim3(max_block),dim3(max_thd), 0, 0, mid_num, block_offset); H_ERR(hipDeviceSynchronize() ); hipLaunchKernelGGL(( prefix_kernel_1) , dim3(max_block),dim3(max_thd), 0, 0, large_num, block_offset); H_ERR(hipDeviceSynchronize() ); hipLaunchKernelGGL(( prefix_kernel_2) , dim3(1),dim3(max_thd), 0, 0, block_offset); H_ERR(hipDeviceSynchronize() ); hipLaunchKernelGGL(( prefix_kernel_3) , dim3(max_block),dim3(max_thd), 0, 0, large_num, block_offset); H_ERR(hipDeviceSynchronize() ); index_t N1,N2,N3; H_ERR(hipMemcpy(&N1 , &small_num[65535] , sizeof(index_t), hipMemcpyDeviceToHost) ); H_ERR(hipMemcpy(&N2 , &mid_num[65535] , sizeof(index_t), hipMemcpyDeviceToHost) ); H_ERR(hipMemcpy(&N3 , &large_num[65535] , sizeof(index_t), hipMemcpyDeviceToHost) ); H_ERR(hipDeviceSynchronize() ); // cout<<"N1 = "<<N1<<"\n"; // cout<<"N2 = "<<N2<<"\n"; // cout<<"N3 = "<<N3<<"\n"; hipLaunchKernelGGL(( collect_kernel) , dim3(max_block),dim3(max_thd), 0, 0, src_adj, src_head, dev_begin, totalEdgeCount, small_num, mid_num, large_num, N1, N2, classified_head, classified_adj ); H_ERR(hipDeviceSynchronize() ); //double time2=wtime(); hipLaunchKernelGGL(( warp_binary_kernel), dim3(max_block),dim3(warp_thd), 0, 0, classified_head, classified_adj, dev_adj, // dev_degree, dev_begin, 0, N1, dev_count ); H_ERR(hipDeviceSynchronize() ); //double time3 = wtime(); /* // int* large_head = &(classified_head[N1]); // int* large_adj = &(classified_adj[N1]); block_binary_kernel<<<max_block,block_thd>>> ( classified_head, classified_adj, dev_adj, // dev_degree, dev_begin, N1+GPU_id*256, N1+N2, dev_count ); H_ERR(hipDeviceSynchronize() ); */ //double time4 = wtime(); hipLaunchKernelGGL(( block_binary_kernel), dim3(max_block),dim3(max_thd), 0, 0, classified_head, classified_adj, //dev_head, //dev_adj, dev_adj, // dev_degree, dev_begin, N1, // 0 + GPU_id*256, totalEdgeCount, dev_count ); H_ERR(hipDeviceSynchronize() ); hipLaunchKernelGGL(( reduce_kernel2) , dim3(1),dim3(1), 0, 0, dev_count); H_ERR(hipDeviceSynchronize() ); H_ERR(hipMemcpy(&count[i], dev_count, sizeof(index_t), hipMemcpyDeviceToHost)); thd_count += count[i]; H_ERR(hipFree(small_num) ); H_ERR(hipFree(large_num) ); H_ERR(hipFree(classified_head) ); H_ERR(hipFree(classified_adj) ); H_ERR(hipFree(src_head) ); H_ERR(hipFree(src_adj) ); // H_ERR(hipFree(src_degree) ); cout<<"GPU "<<i<<" part "<<j<<"\n"; } count[i] = thd_count; cout<<"gpu binary count="<<count[i]<<"\n"; H_ERR(hipFree(dev_adj) ); H_ERR(hipFree(dev_head) ); // H_ERR(hipFree(dev_degree) ); H_ERR(hipFree(dev_begin) ); H_ERR(hipFree(block_offset) ); H_ERR(hipFree(dev_count) ); return NULL; }
1fb85864db2349f6e4e26b8b615c3988ea5b6e81.cu
//scan.cu //#include "kernel.cu" #include "comm.h" #include "wtime.h" #include "iostream" #define max_thd 256 #define max_block 256 #define thread_limit 256 #define block_limit 1024 #define GPU_COWORKER 1 #define warp_thd 256 #define block_thd 256 graph * mygraph; //#define GPU_NUM 4 //int NUM_ONE_TASK; __global__ void block_binary_kernel ( vertex_t* head, vertex_t* adj, vertex_t* adj_list, // index_t* degree, index_t* begin, index_t Ns, index_t Ne, index_t* count ) { //phase 1, partition index_t tid = Ns + (threadIdx.x + blockIdx.x * blockDim.x)/ block_thd; int i = threadIdx.x% block_thd; index_t mycount=0; // __shared__ int cache[256]; // __shared__ int offset[256]; __shared__ index_t local[block_thd]; while(tid<Ne){ vertex_t A = head[tid]; vertex_t B = adj[tid]; index_t m = begin[A+1]-begin[A];//degree[A]; index_t n = begin[B+1]-begin[B];//degree[B]; index_t temp; if(m<n){ temp = A; A = B; B = temp; temp = m; m = n; n = temp; } vertex_t* a = &(adj_list[begin[A]]); vertex_t* b = &(adj_list[begin[B]]); //initial cache local[i]=a[i*m/block_thd]; __syncthreads(); //search int j=i; while(j<n){ vertex_t X = b[j]; vertex_t Y; //phase 1: cache int bot = 0; int top = block_thd; int r; while(top>bot+1){ r = (top+bot)/2; Y = local[r]; if(X==Y){ mycount++; bot = top + block_thd; } if(X<Y){ top = r; } if(X>Y){ bot = r; } } //phase 2 // bot = bot*k; // top = top*k; bot = bot*m/block_thd; top = top*m/block_thd -1; while(top>=bot){ r = (top+bot)/2; Y = a[r]; if(X==Y){ mycount++; } if(X<=Y){ top = r-1; } if(X>=Y){ bot = r+1; } } j += block_thd; } tid += GPU_COWORKER * gridDim.x*blockDim.x/ block_thd; __syncthreads(); } //reduce __syncthreads(); local[threadIdx.x] = mycount; __syncthreads(); if(threadIdx.x==0){ index_t val=0; for(int i=0; i<blockDim.x; i++){ val+= local[i]; } count[blockIdx.x]+=val; // count[blockIdx.x]=val; } } __global__ void warp_binary_kernel ( vertex_t* head, vertex_t* adj, vertex_t* adj_list, // index_t* degree, index_t* begin, index_t Ns, index_t Ne, index_t* count ) { //phase 1, partition index_t tid = (threadIdx.x + blockIdx.x * blockDim.x)/32 + Ns; index_t mycount=0; __shared__ index_t local[warp_thd]; int i = threadIdx.x%32; int p = threadIdx.x/32; while(tid<Ne){ vertex_t A = head[tid]; vertex_t B = adj[tid]; index_t m = begin[A+1]-begin[A];//degree[A]; index_t n = begin[B+1]-begin[B];//degree[B]; index_t temp; if(m<n){ temp = A; A = B; B = temp; temp = m; m = n; n = temp; } vertex_t* a = &(adj_list[begin[A]]); vertex_t* b = &(adj_list[begin[B]]); //initial cache local[p*32+i]=a[i*m/32]; __syncthreads(); //search int j=i; while(j<n){ vertex_t X = b[j]; vertex_t Y; //phase 1: cache int bot = 0; int top = 32; int r; while(top>bot+1){ r = (top+bot)/2; Y = local[p*32+r]; if(X==Y){ mycount++; bot = top + 32; } if(X<Y){ top = r; } if(X>Y){ bot = r; } } //phase 2 // bot = bot*k; // top = top*k; bot = bot*m/32; top = top*m/32 -1; while(top>=bot){ r = (top+bot)/2; Y = a[r]; if(X==Y){ mycount++; } if(X<=Y){ top = r-1; } if(X>=Y){ bot = r+1; } } j += 32; } tid += GPU_COWORKER* blockDim.x*gridDim.x/32; __syncthreads(); } __syncthreads(); //reduce local[threadIdx.x] = mycount; __syncthreads(); if(threadIdx.x==0){ index_t val=0; for(int i=0; i<blockDim.x; i++){ val+= local[i]; } count[blockIdx.x]=val; } __syncthreads(); } //---------------------------------------------------------------------------------------- __global__ void classify_kernel //step 1: classify the edge list into different arrays ( vertex_t* adj_list, vertex_t* head_list, index_t* begin, index_t N, //inputs index_t* small_num, index_t* mid_num, index_t* large_num //outputs: small/large head, adjacent, and number by thread ) { int tid = threadIdx.x +blockIdx.x*blockDim.x; index_t bin_size = (N-1)/(blockDim.x*gridDim.x)+1; index_t thd_base = tid*bin_size; //start point of threads space index_t small_offset=0; index_t mid_offset=0; index_t large_offset=0; //temp variables vertex_t head; vertex_t adj; index_t m; index_t n; for(index_t i=0; i<bin_size; i++){ index_t id = thd_base + i; if(id<N){ head = head_list[id]; adj = adj_list[id]; m = begin[head+1]-begin[head];//degree[head]; n = begin[adj+1]-begin[adj];//degree[adj]; if(m<n){ n=m; } if(n<thread_limit){ small_offset++; } else if(n<block_limit){ //could be more then 2 catigories // else{ mid_offset++; } else { //could be more then 2 catigories large_offset++; } } } small_num[tid] = small_offset; mid_num[tid] = mid_offset; large_num[tid] = large_offset; } __global__ void prefix_kernel_1 //this prefix scan function could be easier for data size is always 256*256 ( index_t* data, index_t* block_offset ) { //step 1: each block do prefix sum inside int tid = threadIdx.x +blockIdx.x*blockDim.x; __shared__ index_t temp_in[256]; temp_in[threadIdx.x] = data[tid]; __syncthreads(); index_t val=0; for(int i=0; i<=threadIdx.x; i++){ val += temp_in[i]; } __syncthreads(); if(threadIdx.x==255){ block_offset[blockIdx.x] = val; } data[tid] = val; __syncthreads(); } __global__ void prefix_kernel_2 ( index_t* block_offset ) { //step 2: collect each block's offset and do prefix for this set __shared__ index_t temp_in[256]; temp_in[threadIdx.x] = block_offset[threadIdx.x]; __syncthreads(); index_t val=0; for(int i=0; i<threadIdx.x; i++){ val += temp_in[i]; } // val = temp_in[threadIdx.x]; block_offset[threadIdx.x] = val; __syncthreads(); } __global__ void prefix_kernel_3 ( index_t* data, index_t* block_offset ) { //step 3: update by adding block offset int tid = threadIdx.x + blockIdx.x*blockDim.x; index_t val = data[tid]; index_t offset = block_offset[blockIdx.x]; val += offset; data[tid] = val; __syncthreads(); } __global__ void collect_kernel ( vertex_t* adj_list, vertex_t* head_list, index_t* begin, index_t N, index_t* small_num, index_t* mid_num, index_t* large_num, index_t N1, index_t N2, vertex_t* dest_head, vertex_t* dest_adj ) { int tid = threadIdx.x +blockIdx.x*blockDim.x; index_t bin_size = (N-1)/(blockDim.x*gridDim.x)+1; index_t thd_base = tid*bin_size; //start point of threads space index_t thd_base_small = 0; index_t thd_base_mid = N1; index_t thd_base_large = N1+N2; if(tid!=0){ thd_base_small = small_num[tid-1]; thd_base_mid = N1 + mid_num[tid-1]; thd_base_large = N1 + N2 + large_num[tid-1]; } //temp variables vertex_t head; vertex_t adj; index_t m; index_t n; index_t small_offset = thd_base_small; index_t mid_offset = thd_base_mid; index_t large_offset = thd_base_large; for(index_t i=0; i<bin_size; i++){ index_t id = thd_base + i; if(id<N){ head = head_list[id]; adj = adj_list[id]; m = begin[head+1]-begin[head];//degree[head]; n = begin[adj+1]-begin[adj];//degree[adj]; if(m<n){ n=m; } if(n<thread_limit){ dest_head[small_offset] = head; dest_adj [small_offset] = adj; small_offset++; } else if(n<block_limit){ //could be more then 2 catigories // else{ dest_head[mid_offset] = head; dest_adj [mid_offset] = adj; mid_offset++; } else { //could be more then 2 catigories dest_head[large_offset] = head; dest_adj [large_offset] = adj; large_offset++; } } } } __global__ void reduce_kernel2(index_t* count) { index_t val = 0; for(int i=0; i<max_block; i++){ val += count[i]; } count[0] = val; } //---------------------------------------- cpu function-------------------- //------------------------------------------------------------------ void* part_scan(void * data){ index_t thd_count=0; int GPU_id = *(int*)data; int i = GPU_id; // cout<<"GPU id = "<<GPU_id<<"\n"; cudaSetDevice(GPU_id); H_ERR(cudaDeviceSynchronize() ); vertex_t* dev_adj; vertex_t* dev_head; // index_t* dev_degree; index_t* dev_begin; index_t* dev_count; index_t partEdgeCount = mygraph->partEdgeCount[i]; vertex_t vert_count = mygraph->vert_count; vertex_t* partAdj = mygraph->partAdj[i]; vertex_t* partHead= mygraph->partHead[i]; // index_t* partDegree = mygraph->partDegree[i]; index_t* partBegin = mygraph->partBegin[i]; index_t* count = mygraph->count; H_ERR(cudaMalloc(&dev_adj, partEdgeCount*sizeof(vertex_t)) ); H_ERR(cudaMalloc(&dev_head, partEdgeCount*sizeof(vertex_t)) ); // H_ERR(cudaMalloc(&dev_degree, vert_count*sizeof(index_t)) ); H_ERR(cudaMalloc(&dev_begin, vert_count*sizeof(index_t)) ); H_ERR(cudaMalloc(&dev_count, max_block*sizeof(index_t)) ); index_t* block_offset; H_ERR(cudaMalloc(&block_offset, max_block*sizeof(index_t)) ); H_ERR(cudaMemcpy(dev_adj, partAdj, partEdgeCount*sizeof(vertex_t), cudaMemcpyHostToDevice) ); H_ERR(cudaMemcpy(dev_head, partHead, partEdgeCount*sizeof(vertex_t), cudaMemcpyHostToDevice) ); // H_ERR(cudaMemcpy(dev_degree, partDegree, vert_count*sizeof(index_t), cudaMemcpyHostToDevice) ); H_ERR(cudaMemcpy(dev_begin, partBegin, vert_count*sizeof(index_t), cudaMemcpyHostToDevice) ); for(int j=0; j<PART_NUM; j++){ index_t totalEdgeCount = mygraph->partEdgeCount[j]; vertex_t* head = mygraph->partHead[j]; vertex_t* adj = mygraph->partAdj[j]; // index_t* degree = mygraph->partDegree[j]; vertex_t* classified_head; vertex_t* classified_adj; index_t* small_num; index_t* mid_num; index_t* large_num; vertex_t* src_head; vertex_t* src_adj; // index_t* src_degree; H_ERR(cudaMalloc(&small_num, max_thd*max_block*sizeof(index_t)) ); H_ERR(cudaMalloc(&mid_num, max_thd*max_block*sizeof(index_t)) ); H_ERR(cudaMalloc(&large_num, max_thd*max_block*sizeof(index_t)) ); H_ERR(cudaMalloc(&src_head, totalEdgeCount*sizeof(vertex_t)) ); H_ERR(cudaMalloc(&src_adj, totalEdgeCount*sizeof(vertex_t)) ); // H_ERR(cudaMalloc(&src_degree, vert_count*sizeof(index_t)) ); H_ERR(cudaMemcpy(src_adj, adj, totalEdgeCount*sizeof(vertex_t), cudaMemcpyHostToDevice) ); H_ERR(cudaMemcpy(src_head, head, totalEdgeCount*sizeof(vertex_t), cudaMemcpyHostToDevice) ); // H_ERR(cudaMemcpy(src_degree, degree, vert_count*sizeof(index_t), cudaMemcpyHostToDevice) ); H_ERR(cudaMalloc(&classified_head, totalEdgeCount*sizeof(vertex_t)) ); H_ERR(cudaMalloc(&classified_adj, totalEdgeCount*sizeof(vertex_t)) ); // double time1=wtime(); H_ERR(cudaDeviceSynchronize() ); classify_kernel <<<max_block,max_thd>>>( src_adj, src_head, dev_begin, totalEdgeCount, small_num, mid_num, large_num ); H_ERR(cudaDeviceSynchronize() ); //test for prefix sum prefix_kernel_1 <<<max_block,max_thd>>>(small_num, block_offset); H_ERR(cudaDeviceSynchronize() ); prefix_kernel_2 <<<1,max_thd>>>(block_offset); H_ERR(cudaDeviceSynchronize() ); prefix_kernel_3 <<<max_block,max_thd>>>(small_num, block_offset); H_ERR(cudaDeviceSynchronize() ); prefix_kernel_1 <<<max_block,max_thd>>>(mid_num, block_offset); H_ERR(cudaDeviceSynchronize() ); prefix_kernel_2 <<<1,max_thd>>>(block_offset); H_ERR(cudaDeviceSynchronize() ); prefix_kernel_3 <<<max_block,max_thd>>>(mid_num, block_offset); H_ERR(cudaDeviceSynchronize() ); prefix_kernel_1 <<<max_block,max_thd>>>(large_num, block_offset); H_ERR(cudaDeviceSynchronize() ); prefix_kernel_2 <<<1,max_thd>>>(block_offset); H_ERR(cudaDeviceSynchronize() ); prefix_kernel_3 <<<max_block,max_thd>>>(large_num, block_offset); H_ERR(cudaDeviceSynchronize() ); index_t N1,N2,N3; H_ERR(cudaMemcpy(&N1 , &small_num[65535] , sizeof(index_t), cudaMemcpyDeviceToHost) ); H_ERR(cudaMemcpy(&N2 , &mid_num[65535] , sizeof(index_t), cudaMemcpyDeviceToHost) ); H_ERR(cudaMemcpy(&N3 , &large_num[65535] , sizeof(index_t), cudaMemcpyDeviceToHost) ); H_ERR(cudaDeviceSynchronize() ); // cout<<"N1 = "<<N1<<"\n"; // cout<<"N2 = "<<N2<<"\n"; // cout<<"N3 = "<<N3<<"\n"; collect_kernel <<<max_block,max_thd>>>( src_adj, src_head, dev_begin, totalEdgeCount, small_num, mid_num, large_num, N1, N2, classified_head, classified_adj ); H_ERR(cudaDeviceSynchronize() ); //double time2=wtime(); warp_binary_kernel<<<max_block,warp_thd>>> ( classified_head, classified_adj, dev_adj, // dev_degree, dev_begin, 0, N1, dev_count ); H_ERR(cudaDeviceSynchronize() ); //double time3 = wtime(); /* // int* large_head = &(classified_head[N1]); // int* large_adj = &(classified_adj[N1]); block_binary_kernel<<<max_block,block_thd>>> ( classified_head, classified_adj, dev_adj, // dev_degree, dev_begin, N1+GPU_id*256, N1+N2, dev_count ); H_ERR(cudaDeviceSynchronize() ); */ //double time4 = wtime(); block_binary_kernel<<<max_block,max_thd>>> ( classified_head, classified_adj, //dev_head, //dev_adj, dev_adj, // dev_degree, dev_begin, N1, // 0 + GPU_id*256, totalEdgeCount, dev_count ); H_ERR(cudaDeviceSynchronize() ); reduce_kernel2 <<<1,1>>>(dev_count); H_ERR(cudaDeviceSynchronize() ); H_ERR(cudaMemcpy(&count[i], dev_count, sizeof(index_t), cudaMemcpyDeviceToHost)); thd_count += count[i]; H_ERR(cudaFree(small_num) ); H_ERR(cudaFree(large_num) ); H_ERR(cudaFree(classified_head) ); H_ERR(cudaFree(classified_adj) ); H_ERR(cudaFree(src_head) ); H_ERR(cudaFree(src_adj) ); // H_ERR(cudaFree(src_degree) ); cout<<"GPU "<<i<<" part "<<j<<"\n"; } count[i] = thd_count; cout<<"gpu binary count="<<count[i]<<"\n"; H_ERR(cudaFree(dev_adj) ); H_ERR(cudaFree(dev_head) ); // H_ERR(cudaFree(dev_degree) ); H_ERR(cudaFree(dev_begin) ); H_ERR(cudaFree(block_offset) ); H_ERR(cudaFree(dev_count) ); return NULL; }
ef5e46e2062be97958260916009bb388418d8010.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" __global__ void stretch_kernel(int acc, int samps, float tsamp, float *d_input, float *d_output, float t_zero, float multiplier, float tsamp_inverse) { int t = blockIdx.x * blockDim.x + threadIdx.x; float p_time = t * ( t_zero + ( multiplier * ( t - 1.0f ) ) ); int stretch_index = __float2int_rz(p_time * tsamp_inverse); if (stretch_index >= 0 && stretch_index < samps) d_output[stretch_index] = d_input[t]; }
ef5e46e2062be97958260916009bb388418d8010.cu
#include "includes.h" __global__ void stretch_kernel(int acc, int samps, float tsamp, float *d_input, float *d_output, float t_zero, float multiplier, float tsamp_inverse) { int t = blockIdx.x * blockDim.x + threadIdx.x; float p_time = t * ( t_zero + ( multiplier * ( t - 1.0f ) ) ); int stretch_index = __float2int_rz(p_time * tsamp_inverse); if (stretch_index >= 0 && stretch_index < samps) d_output[stretch_index] = d_input[t]; }
34691fad7da9aded5851944cd478562a6d869122.hip
// !!! This is a file automatically generated by hipify!!! /* * Copyright 1993-2013 NVIDIA Corporation. All rights reserved. * * Please refer to the NVIDIA end user license agreement (EULA) associated * with this source code for terms and conditions that govern your use of * this software. Any use, reproduction, disclosure, or distribution of * this software and related documentation outside the terms of the EULA * is strictly prohibited. * */ #include "Kernel_Starter_SP.h" #include "hip/hip_runtime.h" /////////////////////////////////////////////////////////////////////////////// // On G80-class hardware 24-bit multiplication takes 4 clocks per warp // (the same as for floating point multiplication and addition), // whereas full 32-bit multiplication takes 16 clocks per warp. // So if integer multiplication operands are guaranteed to fit into 24 bits // (always lie withtin [-8M, 8M - 1] range in signed case), // explicit 24-bit multiplication is preferred for performance. /////////////////////////////////////////////////////////////////////////////// #define IMUL(a, b) __mul24(a, b) /////////////////////////////////////////////////////////////////////////////// // Calculate scalar products of VectorN vectors of ElementN elements on GPU // Parameters restrictions: // 1) ElementN is strongly preferred to be a multiple of warp size to // meet alignment constraints of memory coalescing. // 2) ACCUM_N must be a power of two. /////////////////////////////////////////////////////////////////////////////// #define ACCUM_N 1024 __global__ void scalarProdGPU(float *d_C, float *d_A, float *d_B, int vectorN, int elementN) { //Accumulators cache __shared__ float accumResult[ACCUM_N]; //////////////////////////////////////////////////////////////////////////// // Cycle through every pair of vectors, // taking into account that vector counts can be different // from total number of thread blocks //////////////////////////////////////////////////////////////////////////// for (int vec = blockIdx.x; vec < vectorN; vec += gridDim.x) { int vectorBase = IMUL(elementN, vec); int vectorEnd = vectorBase + elementN; //////////////////////////////////////////////////////////////////////// // Each accumulator cycles through vectors with // stride equal to number of total number of accumulators ACCUM_N // At this stage ACCUM_N is only preferred be a multiple of warp size // to meet memory coalescing alignment constraints. //////////////////////////////////////////////////////////////////////// for (int iAccum = threadIdx.x; iAccum < ACCUM_N; iAccum += blockDim.x) { float sum = 0; for (int pos = vectorBase + iAccum; pos < vectorEnd; pos += ACCUM_N) sum += d_A[pos] * d_B[pos]; accumResult[iAccum] = sum; } //////////////////////////////////////////////////////////////////////// // Perform tree-like reduction of accumulators' results. // ACCUM_N has to be power of two at this stage //////////////////////////////////////////////////////////////////////// for (int stride = ACCUM_N / 2; stride > 0; stride >>= 1) { __syncthreads(); for (int iAccum = threadIdx.x; iAccum < stride; iAccum += blockDim.x) accumResult[iAccum] += accumResult[stride + iAccum]; } if (threadIdx.x == 0) d_C[vec] = accumResult[0]; } } extern "C" void startSPKernel(size_t threads, size_t blocks, float *d_C, float *d_A, float *d_B, int vectorN, int elementN, hipStream_t& stream) { hipLaunchKernelGGL(( scalarProdGPU), dim3(blocks), dim3(threads),0,stream, d_C, d_A, d_B, vectorN, elementN); } hipFuncAttributes getSPKernelProperties() { hipFuncAttributes attributes; hipFuncGetAttributes(&attributes, scalarProdGPU); return attributes; }
34691fad7da9aded5851944cd478562a6d869122.cu
/* * Copyright 1993-2013 NVIDIA Corporation. All rights reserved. * * Please refer to the NVIDIA end user license agreement (EULA) associated * with this source code for terms and conditions that govern your use of * this software. Any use, reproduction, disclosure, or distribution of * this software and related documentation outside the terms of the EULA * is strictly prohibited. * */ #include "Kernel_Starter_SP.h" #include "cuda_runtime.h" /////////////////////////////////////////////////////////////////////////////// // On G80-class hardware 24-bit multiplication takes 4 clocks per warp // (the same as for floating point multiplication and addition), // whereas full 32-bit multiplication takes 16 clocks per warp. // So if integer multiplication operands are guaranteed to fit into 24 bits // (always lie withtin [-8M, 8M - 1] range in signed case), // explicit 24-bit multiplication is preferred for performance. /////////////////////////////////////////////////////////////////////////////// #define IMUL(a, b) __mul24(a, b) /////////////////////////////////////////////////////////////////////////////// // Calculate scalar products of VectorN vectors of ElementN elements on GPU // Parameters restrictions: // 1) ElementN is strongly preferred to be a multiple of warp size to // meet alignment constraints of memory coalescing. // 2) ACCUM_N must be a power of two. /////////////////////////////////////////////////////////////////////////////// #define ACCUM_N 1024 __global__ void scalarProdGPU(float *d_C, float *d_A, float *d_B, int vectorN, int elementN) { //Accumulators cache __shared__ float accumResult[ACCUM_N]; //////////////////////////////////////////////////////////////////////////// // Cycle through every pair of vectors, // taking into account that vector counts can be different // from total number of thread blocks //////////////////////////////////////////////////////////////////////////// for (int vec = blockIdx.x; vec < vectorN; vec += gridDim.x) { int vectorBase = IMUL(elementN, vec); int vectorEnd = vectorBase + elementN; //////////////////////////////////////////////////////////////////////// // Each accumulator cycles through vectors with // stride equal to number of total number of accumulators ACCUM_N // At this stage ACCUM_N is only preferred be a multiple of warp size // to meet memory coalescing alignment constraints. //////////////////////////////////////////////////////////////////////// for (int iAccum = threadIdx.x; iAccum < ACCUM_N; iAccum += blockDim.x) { float sum = 0; for (int pos = vectorBase + iAccum; pos < vectorEnd; pos += ACCUM_N) sum += d_A[pos] * d_B[pos]; accumResult[iAccum] = sum; } //////////////////////////////////////////////////////////////////////// // Perform tree-like reduction of accumulators' results. // ACCUM_N has to be power of two at this stage //////////////////////////////////////////////////////////////////////// for (int stride = ACCUM_N / 2; stride > 0; stride >>= 1) { __syncthreads(); for (int iAccum = threadIdx.x; iAccum < stride; iAccum += blockDim.x) accumResult[iAccum] += accumResult[stride + iAccum]; } if (threadIdx.x == 0) d_C[vec] = accumResult[0]; } } extern "C" void startSPKernel(size_t threads, size_t blocks, float *d_C, float *d_A, float *d_B, int vectorN, int elementN, cudaStream_t& stream) { scalarProdGPU<<<blocks, threads,0,stream>>>(d_C, d_A, d_B, vectorN, elementN); } cudaFuncAttributes getSPKernelProperties() { cudaFuncAttributes attributes; cudaFuncGetAttributes(&attributes, scalarProdGPU); return attributes; }
86cfcd8449fbf20e68d2b10fd7a43598649ca30f.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #define t_max 1 #define t 1 /* (u[0][0][0][1][0]=(a*((((u[-3][0][0][0][0]+(u[0][-3][0][0][0]+u[0][0][-3][0][0]))*-2.0)+(((u[-2][0][0][0][0]+(u[0][-2][0][0][0]+u[0][0][-2][0][0]))*15.0)+((u[-1][0][0][0][0]+(u[0][-1][0][0][0]+u[0][0][-1][0][0]))*-60.0)))+((u[0][0][0][0][0]*20.0)+(((u[1][0][0][0][0]+(u[0][1][0][0][0]+u[0][0][1][0][0]))*30.0)+((u[2][0][0][0][0]+(u[0][2][0][0][0]+u[0][0][2][0][0]))*-3.0)))))) */ __global__ void upstream_5_3d(double * * u_0_1_out, double * u_0_0, double * u_0_1, double a, int x_max, int y_max, int z_max, int tbx, int tby, int tbz, int c) { //double * const u__u_0[16] = { u_0_0, u_0_1 } ; int _idx0; int _idx1; int _idx10; int _idx11; int _idx12; int _idx13; int _idx14; int _idx15; int _idx2; int _idx3; int _idx4; int _idx5; int _idx6; int _idx7; int _idx8; int _idx9; int chunk_idx_x; int chunk_idx_x_max; int chunk_idx_y; int chunk_idx_y_max; int chunk_idx_z; int chunk_idx_z_max; int idx_1_2; int size_1_1; int size_1_2; //int t; int thd_idx_x; int thd_idx_y; int thd_idx_z; int thdblks_idx_x; int thdblks_idx_x_max; int thdblks_idx_y; int thdblks_idx_y_max; int thdblks_idx_z; int thdblks_idx_z_max; int tmp; /* Initializations */ size_1_1=(y_max/blockDim.y); size_1_2=(z_max/blockDim.z); idx_1_2=(blockIdx.y/size_1_2); tmp=(blockIdx.y-(idx_1_2*size_1_2)); chunk_idx_x=(c*(threadIdx.x+(blockDim.x*blockIdx.x))); chunk_idx_x_max=(chunk_idx_x+c); chunk_idx_y=(threadIdx.y+(tmp*blockDim.y)); chunk_idx_y_max=(chunk_idx_y+1); chunk_idx_z=(threadIdx.z+(idx_1_2*blockDim.z)); chunk_idx_z_max=(chunk_idx_z+1); thdblks_idx_x=(tbx*(threadIdx.x+(blockDim.x*blockIdx.x))); thdblks_idx_x_max=(thdblks_idx_x+tbx); thdblks_idx_y=(tby*(threadIdx.y+(tmp*blockDim.y))); thdblks_idx_y_max=(thdblks_idx_y+tby); thdblks_idx_z=(tbz*(threadIdx.z+(idx_1_2*blockDim.z))); thdblks_idx_z_max=(thdblks_idx_z+tbz); /* Implementation */ /* for t = 1..t_max by 1 parallel 1 <level 0> schedule { ... } */ //for (t=1; t<=t_max; t+=1) { /* Index bounds calculations for iterators in thdblks[t=t, s=(tbx, tby, tbz)][0] */ /* Index bounds calculations for iterators in chunk[t=t, s=(c, 1, 1)][0] */ /* for POINT thd[t=t, s=(1, 1, 1)][0] of size [1, 1, 1] in chunk[t=t, s=(:, :, :)][0] parallel 1 <level 2> schedule default { ... } */ { /* Index bounds calculations for iterators in thd[t=t, s=(1, 1, 1)][0] */ thd_idx_z=chunk_idx_z; thd_idx_y=chunk_idx_y; for (thd_idx_x=chunk_idx_x; thd_idx_x<(chunk_idx_x_max-0); thd_idx_x+=1) { /* Index bounds calculations for iterators in thd[t=t, s=(1, 1, 1)][0] */ /* u[t=(t+1), s=thd[t=?, s=?][0]][0]=stencil(u[t=t, s=thd[t=?, s=?][0]][0]) */ /* _idx0 = (((((((((((thd_idx_z+3)*x_max)+((5*t)*thd_idx_z))+(15*t))*y_max)+((((((5*t)*thd_idx_z)+thd_idx_y)+(15*t))+3)*x_max))+((25*(t*t))*thd_idx_z))+((5*t)*thd_idx_y))+thd_idx_x)+(75*(t*t)))+(15*t)) */ _idx0=(((((((((((thd_idx_z+3)*x_max)+((5*t)*thd_idx_z))+(15*t))*y_max)+((((((5*t)*thd_idx_z)+thd_idx_y)+(15*t))+3)*x_max))+((25*(t*t))*thd_idx_z))+((5*t)*thd_idx_y))+thd_idx_x)+(75*(t*t)))+(15*t)); /* _idx1 = (((((((((((thd_idx_z+3)*x_max)+((5*t)*thd_idx_z))+(15*t))*y_max)+(((((5*t)*thd_idx_z)+thd_idx_y)+(15*t))*x_max))+((25*(t*t))*thd_idx_z))+((5*t)*thd_idx_y))+thd_idx_x)+(75*(t*t)))+3) */ _idx1=(((_idx0-(3*x_max))-(15*t))+3); /* _idx2 = (((((((((thd_idx_z*x_max)+((5*t)*thd_idx_z))*y_max)+(((((5*t)*thd_idx_z)+thd_idx_y)+3)*x_max))+((25*(t*t))*thd_idx_z))+((5*t)*thd_idx_y))+thd_idx_x)+(15*t))+3) */ _idx2=((((_idx0+(((-3*x_max)-(15*t))*y_max))-((15*t)*x_max))-(75*(t*t)))+3); /* _idx3 = ((((((((((((thd_idx_z+3)*x_max)+((5*t)*thd_idx_z))+(15*t))*y_max)+((((((5*t)*thd_idx_z)+thd_idx_y)+(15*t))+3)*x_max))+((25*(t*t))*thd_idx_z))+((5*t)*thd_idx_y))+thd_idx_x)+(75*(t*t)))+(15*t))+1) */ _idx3=(_idx0+1); /* _idx4 = ((((((((((((thd_idx_z+3)*x_max)+((5*t)*thd_idx_z))+(15*t))*y_max)+((((((5*t)*thd_idx_z)+thd_idx_y)+(15*t))+1)*x_max))+((25*(t*t))*thd_idx_z))+((5*t)*thd_idx_y))+thd_idx_x)+(75*(t*t)))+(5*t))+3) */ _idx4=((_idx1+x_max)+(5*t)); /* _idx5 = ((((((((((((thd_idx_z+1)*x_max)+((5*t)*thd_idx_z))+(5*t))*y_max)+((((((5*t)*thd_idx_z)+thd_idx_y)+(5*t))+3)*x_max))+((25*(t*t))*thd_idx_z))+((5*t)*thd_idx_y))+thd_idx_x)+(25*(t*t)))+(15*t))+3) */ _idx5=(((_idx2+((x_max+(5*t))*y_max))+((5*t)*x_max))+(25*(t*t))); /* _idx6 = ((((((((((((thd_idx_z+3)*x_max)+((5*t)*thd_idx_z))+(15*t))*y_max)+((((((5*t)*thd_idx_z)+thd_idx_y)+(15*t))+3)*x_max))+((25*(t*t))*thd_idx_z))+((5*t)*thd_idx_y))+thd_idx_x)+(75*(t*t)))+(15*t))+2) */ _idx6=(_idx3+1); /* _idx7 = ((((((((((((thd_idx_z+3)*x_max)+((5*t)*thd_idx_z))+(15*t))*y_max)+((((((5*t)*thd_idx_z)+thd_idx_y)+(15*t))+2)*x_max))+((25*(t*t))*thd_idx_z))+((5*t)*thd_idx_y))+thd_idx_x)+(75*(t*t)))+(10*t))+3) */ _idx7=((_idx4+x_max)+(5*t)); /* _idx8 = ((((((((((((thd_idx_z+2)*x_max)+((5*t)*thd_idx_z))+(10*t))*y_max)+((((((5*t)*thd_idx_z)+thd_idx_y)+(10*t))+3)*x_max))+((25*(t*t))*thd_idx_z))+((5*t)*thd_idx_y))+thd_idx_x)+(50*(t*t)))+(15*t))+3) */ _idx8=(((_idx5+((x_max+(5*t))*y_max))+((5*t)*x_max))+(25*(t*t))); /* _idx9 = ((((((((((((thd_idx_z+3)*x_max)+((5*t)*thd_idx_z))+(15*t))*y_max)+((((((5*t)*thd_idx_z)+thd_idx_y)+(15*t))+3)*x_max))+((25*(t*t))*thd_idx_z))+((5*t)*thd_idx_y))+thd_idx_x)+(75*(t*t)))+(15*t))+3) */ _idx9=(_idx3+2); /* _idx10 = ((((((((((((thd_idx_z+3)*x_max)+((5*t)*thd_idx_z))+(15*t))*y_max)+((((((5*t)*thd_idx_z)+thd_idx_y)+(15*t))+3)*x_max))+((25*(t*t))*thd_idx_z))+((5*t)*thd_idx_y))+thd_idx_x)+(75*(t*t)))+(15*t))+4) */ _idx10=(_idx3+3); /* _idx11 = ((((((((((((thd_idx_z+3)*x_max)+((5*t)*thd_idx_z))+(15*t))*y_max)+((((((5*t)*thd_idx_z)+thd_idx_y)+(15*t))+4)*x_max))+((25*(t*t))*thd_idx_z))+((5*t)*thd_idx_y))+thd_idx_x)+(75*(t*t)))+(20*t))+3) */ _idx11=((_idx9+x_max)+(5*t)); /* _idx12 = ((((((((((((thd_idx_z+4)*x_max)+((5*t)*thd_idx_z))+(20*t))*y_max)+((((((5*t)*thd_idx_z)+thd_idx_y)+(20*t))+3)*x_max))+((25*(t*t))*thd_idx_z))+((5*t)*thd_idx_y))+thd_idx_x)+(100*(t*t)))+(15*t))+3) */ _idx12=(((_idx9+((x_max+(5*t))*y_max))+((5*t)*x_max))+(25*(t*t))); /* _idx13 = ((((((((((((thd_idx_z+3)*x_max)+((5*t)*thd_idx_z))+(15*t))*y_max)+((((((5*t)*thd_idx_z)+thd_idx_y)+(15*t))+3)*x_max))+((25*(t*t))*thd_idx_z))+((5*t)*thd_idx_y))+thd_idx_x)+(75*(t*t)))+(15*t))+5) */ _idx13=(_idx3+4); /* _idx14 = ((((((((((((thd_idx_z+3)*x_max)+((5*t)*thd_idx_z))+(15*t))*y_max)+((((((5*t)*thd_idx_z)+thd_idx_y)+(15*t))+5)*x_max))+((25*(t*t))*thd_idx_z))+((5*t)*thd_idx_y))+thd_idx_x)+(75*(t*t)))+(25*t))+3) */ _idx14=((_idx11+x_max)+(5*t)); /* _idx15 = ((((((((((((thd_idx_z+5)*x_max)+((5*t)*thd_idx_z))+(25*t))*y_max)+((((((5*t)*thd_idx_z)+thd_idx_y)+(25*t))+3)*x_max))+((25*(t*t))*thd_idx_z))+((5*t)*thd_idx_y))+thd_idx_x)+(125*(t*t)))+(15*t))+3) */ _idx15=(((_idx12+((x_max+(5*t))*y_max))+((5*t)*x_max))+(25*(t*t))); u_0_1[_idx9]=(a*((((u_0_0[_idx0]+(u_0_0[_idx1]+u_0_0[_idx2]))*-2.0)+(((u_0_0[_idx3]+(u_0_0[_idx4]+u_0_0[_idx5]))*15.0)+((u_0_0[_idx6]+(u_0_0[_idx7]+u_0_0[_idx8]))*-60.0)))+((u_0_0[_idx9]*20.0)+(((u_0_0[_idx10]+(u_0_0[_idx11]+u_0_0[_idx12]))*30.0)+((u_0_0[_idx13]+(u_0_0[_idx14]+u_0_0[_idx15]))*-3.0))))); } } } } __global__ void initialize(double * u_0_0, double * u_0_1, double a, int x_max, int y_max, int z_max, int tbx, int tby, int tbz, int c) { double * const u__u_0[16] = { u_0_0, u_0_1 } ; int _idx0; int _idx1; int _idx10; int _idx11; int _idx12; int _idx13; int _idx14; int _idx15; int _idx2; int _idx3; int _idx4; int _idx5; int _idx6; int _idx7; int _idx8; int _idx9; int chunk_idx_x; int chunk_idx_x_max; int chunk_idx_y; int chunk_idx_y_max; int chunk_idx_z; int chunk_idx_z_max; int idx_1_2; int size_1_1; int size_1_2; //int t; int thd_idx_x; int thd_idx_y; int thd_idx_z; int thdblks_idx_x; int thdblks_idx_x_max; int thdblks_idx_y; int thdblks_idx_y_max; int thdblks_idx_z; int thdblks_idx_z_max; int tmp; /* Initializations */ size_1_1=(y_max/blockDim.y); size_1_2=(z_max/blockDim.z); idx_1_2=(blockIdx.y/size_1_2); tmp=(blockIdx.y-(idx_1_2*size_1_2)); chunk_idx_x=(c*(threadIdx.x+(blockDim.x*blockIdx.x))); chunk_idx_x_max=(chunk_idx_x+c); chunk_idx_y=(threadIdx.y+(tmp*blockDim.y)); chunk_idx_y_max=(chunk_idx_y+1); chunk_idx_z=(threadIdx.z+(idx_1_2*blockDim.z)); chunk_idx_z_max=(chunk_idx_z+1); thdblks_idx_x=(tbx*(threadIdx.x+(blockDim.x*blockIdx.x))); thdblks_idx_x_max=(thdblks_idx_x+tbx); thdblks_idx_y=(tby*(threadIdx.y+(tmp*blockDim.y))); thdblks_idx_y_max=(thdblks_idx_y+tby); thdblks_idx_z=(tbz*(threadIdx.z+(idx_1_2*blockDim.z))); thdblks_idx_z_max=(thdblks_idx_z+tbz); /* Implementation */ /* for t = 1..t_max by 1 parallel 1 <level 0> schedule { ... } */ //for (t=1; t<=t_max; t+=1) { /* Index bounds calculations for iterators in thdblks[t=t, s=(tbx, tby, tbz)][0] */ /* Index bounds calculations for iterators in chunk[t=t, s=(c, 1, 1)][0] */ /* for POINT thd[t=t, s=(1, 1, 1)][0] of size [1, 1, 1] in chunk[t=t, s=(:, :, :)][0] parallel 1 <level 2> schedule default { ... } */ { /* Index bounds calculations for iterators in thd[t=t, s=(1, 1, 1)][0] */ thd_idx_z=chunk_idx_z; thd_idx_y=chunk_idx_y; for (thd_idx_x=chunk_idx_x; thd_idx_x<(chunk_idx_x_max-0); thd_idx_x+=1) { /* Index bounds calculations for iterators in thd[t=t, s=(1, 1, 1)][0] */ /* u[t=(t+1), s=thd[t=?, s=?][0]][0]=stencil(u[t=t, s=thd[t=?, s=?][0]][0]) */ /* _idx0 = (((((((((((thd_idx_z+3)*x_max)+((5*t)*thd_idx_z))+(15*t))*y_max)+((((((5*t)*thd_idx_z)+thd_idx_y)+(15*t))+3)*x_max))+((25*(t*t))*thd_idx_z))+((5*t)*thd_idx_y))+thd_idx_x)+(75*(t*t)))+(15*t)) */ _idx0=(((((((((((thd_idx_z+3)*x_max)+((5*t)*thd_idx_z))+(15*t))*y_max)+((((((5*t)*thd_idx_z)+thd_idx_y)+(15*t))+3)*x_max))+((25*(t*t))*thd_idx_z))+((5*t)*thd_idx_y))+thd_idx_x)+(75*(t*t)))+(15*t)); u__u_0[(t-1)][_idx0]=0.1; /* _idx1 = ((((((((((((thd_idx_z+3)*x_max)+((5*t)*thd_idx_z))+(15*t))*y_max)+((((((5*t)*thd_idx_z)+thd_idx_y)+(15*t))+3)*x_max))+((25*(t*t))*thd_idx_z))+((5*t)*thd_idx_y))+thd_idx_x)+(75*(t*t)))+(15*t))+1) */ _idx1=(_idx0+1); u__u_0[(t-1)][_idx1]=0.1; /* _idx2 = ((((((((((((thd_idx_z+3)*x_max)+((5*t)*thd_idx_z))+(15*t))*y_max)+((((((5*t)*thd_idx_z)+thd_idx_y)+(15*t))+3)*x_max))+((25*(t*t))*thd_idx_z))+((5*t)*thd_idx_y))+thd_idx_x)+(75*(t*t)))+(15*t))+2) */ _idx2=(_idx1+1); u__u_0[(t-1)][_idx2]=0.1; /* _idx3 = (((((((((((thd_idx_z+3)*x_max)+((5*t)*thd_idx_z))+(15*t))*y_max)+(((((5*t)*thd_idx_z)+thd_idx_y)+(15*t))*x_max))+((25*(t*t))*thd_idx_z))+((5*t)*thd_idx_y))+thd_idx_x)+(75*(t*t)))+3) */ _idx3=(((_idx1-(3*x_max))-(15*t))+2); u__u_0[(t-1)][_idx3]=0.1; /* _idx4 = ((((((((((((thd_idx_z+3)*x_max)+((5*t)*thd_idx_z))+(15*t))*y_max)+((((((5*t)*thd_idx_z)+thd_idx_y)+(15*t))+1)*x_max))+((25*(t*t))*thd_idx_z))+((5*t)*thd_idx_y))+thd_idx_x)+(75*(t*t)))+(5*t))+3) */ _idx4=((_idx3+x_max)+(5*t)); u__u_0[(t-1)][_idx4]=0.1; /* _idx5 = ((((((((((((thd_idx_z+3)*x_max)+((5*t)*thd_idx_z))+(15*t))*y_max)+((((((5*t)*thd_idx_z)+thd_idx_y)+(15*t))+2)*x_max))+((25*(t*t))*thd_idx_z))+((5*t)*thd_idx_y))+thd_idx_x)+(75*(t*t)))+(10*t))+3) */ _idx5=((_idx4+x_max)+(5*t)); u__u_0[(t-1)][_idx5]=0.1; /* _idx6 = (((((((((thd_idx_z*x_max)+((5*t)*thd_idx_z))*y_max)+(((((5*t)*thd_idx_z)+thd_idx_y)+3)*x_max))+((25*(t*t))*thd_idx_z))+((5*t)*thd_idx_y))+thd_idx_x)+(15*t))+3) */ _idx6=((((_idx1+(((-3*x_max)-(15*t))*y_max))-((15*t)*x_max))-(75*(t*t)))+2); u__u_0[(t-1)][_idx6]=0.1; /* _idx7 = ((((((((((((thd_idx_z+1)*x_max)+((5*t)*thd_idx_z))+(5*t))*y_max)+((((((5*t)*thd_idx_z)+thd_idx_y)+(5*t))+3)*x_max))+((25*(t*t))*thd_idx_z))+((5*t)*thd_idx_y))+thd_idx_x)+(25*(t*t)))+(15*t))+3) */ _idx7=(((_idx6+((x_max+(5*t))*y_max))+((5*t)*x_max))+(25*(t*t))); u__u_0[(t-1)][_idx7]=0.1; /* _idx8 = ((((((((((((thd_idx_z+2)*x_max)+((5*t)*thd_idx_z))+(10*t))*y_max)+((((((5*t)*thd_idx_z)+thd_idx_y)+(10*t))+3)*x_max))+((25*(t*t))*thd_idx_z))+((5*t)*thd_idx_y))+thd_idx_x)+(50*(t*t)))+(15*t))+3) */ _idx8=(((_idx7+((x_max+(5*t))*y_max))+((5*t)*x_max))+(25*(t*t))); u__u_0[(t-1)][_idx8]=0.1; /* _idx9 = ((((((((((((thd_idx_z+3)*x_max)+((5*t)*thd_idx_z))+(15*t))*y_max)+((((((5*t)*thd_idx_z)+thd_idx_y)+(15*t))+3)*x_max))+((25*(t*t))*thd_idx_z))+((5*t)*thd_idx_y))+thd_idx_x)+(75*(t*t)))+(15*t))+3) */ _idx9=(_idx1+2); u__u_0[(t-1)][_idx9]=0.1; /* _idx10 = ((((((((((((thd_idx_z+4)*x_max)+((5*t)*thd_idx_z))+(20*t))*y_max)+((((((5*t)*thd_idx_z)+thd_idx_y)+(20*t))+3)*x_max))+((25*(t*t))*thd_idx_z))+((5*t)*thd_idx_y))+thd_idx_x)+(100*(t*t)))+(15*t))+3) */ _idx10=(((_idx9+((x_max+(5*t))*y_max))+((5*t)*x_max))+(25*(t*t))); u__u_0[(t-1)][_idx10]=0.1; /* _idx11 = ((((((((((((thd_idx_z+5)*x_max)+((5*t)*thd_idx_z))+(25*t))*y_max)+((((((5*t)*thd_idx_z)+thd_idx_y)+(25*t))+3)*x_max))+((25*(t*t))*thd_idx_z))+((5*t)*thd_idx_y))+thd_idx_x)+(125*(t*t)))+(15*t))+3) */ _idx11=(((_idx10+((x_max+(5*t))*y_max))+((5*t)*x_max))+(25*(t*t))); u__u_0[(t-1)][_idx11]=0.1; /* _idx12 = ((((((((((((thd_idx_z+3)*x_max)+((5*t)*thd_idx_z))+(15*t))*y_max)+((((((5*t)*thd_idx_z)+thd_idx_y)+(15*t))+4)*x_max))+((25*(t*t))*thd_idx_z))+((5*t)*thd_idx_y))+thd_idx_x)+(75*(t*t)))+(20*t))+3) */ _idx12=((_idx9+x_max)+(5*t)); u__u_0[(t-1)][_idx12]=0.1; /* _idx13 = ((((((((((((thd_idx_z+3)*x_max)+((5*t)*thd_idx_z))+(15*t))*y_max)+((((((5*t)*thd_idx_z)+thd_idx_y)+(15*t))+5)*x_max))+((25*(t*t))*thd_idx_z))+((5*t)*thd_idx_y))+thd_idx_x)+(75*(t*t)))+(25*t))+3) */ _idx13=((_idx12+x_max)+(5*t)); u__u_0[(t-1)][_idx13]=0.1; /* _idx14 = ((((((((((((thd_idx_z+3)*x_max)+((5*t)*thd_idx_z))+(15*t))*y_max)+((((((5*t)*thd_idx_z)+thd_idx_y)+(15*t))+3)*x_max))+((25*(t*t))*thd_idx_z))+((5*t)*thd_idx_y))+thd_idx_x)+(75*(t*t)))+(15*t))+4) */ _idx14=(_idx1+3); u__u_0[(t-1)][_idx14]=0.1; /* _idx15 = ((((((((((((thd_idx_z+3)*x_max)+((5*t)*thd_idx_z))+(15*t))*y_max)+((((((5*t)*thd_idx_z)+thd_idx_y)+(15*t))+3)*x_max))+((25*(t*t))*thd_idx_z))+((5*t)*thd_idx_y))+thd_idx_x)+(75*(t*t)))+(15*t))+5) */ _idx15=(_idx1+4); u__u_0[(t-1)][_idx15]=0.1; u__u_0[t][_idx9]=1.1; } } } }
86cfcd8449fbf20e68d2b10fd7a43598649ca30f.cu
#define t_max 1 #define t 1 /* (u[0][0][0][1][0]=(a*((((u[-3][0][0][0][0]+(u[0][-3][0][0][0]+u[0][0][-3][0][0]))*-2.0)+(((u[-2][0][0][0][0]+(u[0][-2][0][0][0]+u[0][0][-2][0][0]))*15.0)+((u[-1][0][0][0][0]+(u[0][-1][0][0][0]+u[0][0][-1][0][0]))*-60.0)))+((u[0][0][0][0][0]*20.0)+(((u[1][0][0][0][0]+(u[0][1][0][0][0]+u[0][0][1][0][0]))*30.0)+((u[2][0][0][0][0]+(u[0][2][0][0][0]+u[0][0][2][0][0]))*-3.0)))))) */ __global__ void upstream_5_3d(double * * u_0_1_out, double * u_0_0, double * u_0_1, double a, int x_max, int y_max, int z_max, int tbx, int tby, int tbz, int c) { //double * const u__u_0[16] = { u_0_0, u_0_1 } ; int _idx0; int _idx1; int _idx10; int _idx11; int _idx12; int _idx13; int _idx14; int _idx15; int _idx2; int _idx3; int _idx4; int _idx5; int _idx6; int _idx7; int _idx8; int _idx9; int chunk_idx_x; int chunk_idx_x_max; int chunk_idx_y; int chunk_idx_y_max; int chunk_idx_z; int chunk_idx_z_max; int idx_1_2; int size_1_1; int size_1_2; //int t; int thd_idx_x; int thd_idx_y; int thd_idx_z; int thdblks_idx_x; int thdblks_idx_x_max; int thdblks_idx_y; int thdblks_idx_y_max; int thdblks_idx_z; int thdblks_idx_z_max; int tmp; /* Initializations */ size_1_1=(y_max/blockDim.y); size_1_2=(z_max/blockDim.z); idx_1_2=(blockIdx.y/size_1_2); tmp=(blockIdx.y-(idx_1_2*size_1_2)); chunk_idx_x=(c*(threadIdx.x+(blockDim.x*blockIdx.x))); chunk_idx_x_max=(chunk_idx_x+c); chunk_idx_y=(threadIdx.y+(tmp*blockDim.y)); chunk_idx_y_max=(chunk_idx_y+1); chunk_idx_z=(threadIdx.z+(idx_1_2*blockDim.z)); chunk_idx_z_max=(chunk_idx_z+1); thdblks_idx_x=(tbx*(threadIdx.x+(blockDim.x*blockIdx.x))); thdblks_idx_x_max=(thdblks_idx_x+tbx); thdblks_idx_y=(tby*(threadIdx.y+(tmp*blockDim.y))); thdblks_idx_y_max=(thdblks_idx_y+tby); thdblks_idx_z=(tbz*(threadIdx.z+(idx_1_2*blockDim.z))); thdblks_idx_z_max=(thdblks_idx_z+tbz); /* Implementation */ /* for t = 1..t_max by 1 parallel 1 <level 0> schedule { ... } */ //for (t=1; t<=t_max; t+=1) { /* Index bounds calculations for iterators in thdblks[t=t, s=(tbx, tby, tbz)][0] */ /* Index bounds calculations for iterators in chunk[t=t, s=(c, 1, 1)][0] */ /* for POINT thd[t=t, s=(1, 1, 1)][0] of size [1, 1, 1] in chunk[t=t, s=(:, :, :)][0] parallel 1 <level 2> schedule default { ... } */ { /* Index bounds calculations for iterators in thd[t=t, s=(1, 1, 1)][0] */ thd_idx_z=chunk_idx_z; thd_idx_y=chunk_idx_y; for (thd_idx_x=chunk_idx_x; thd_idx_x<(chunk_idx_x_max-0); thd_idx_x+=1) { /* Index bounds calculations for iterators in thd[t=t, s=(1, 1, 1)][0] */ /* u[t=(t+1), s=thd[t=?, s=?][0]][0]=stencil(u[t=t, s=thd[t=?, s=?][0]][0]) */ /* _idx0 = (((((((((((thd_idx_z+3)*x_max)+((5*t)*thd_idx_z))+(15*t))*y_max)+((((((5*t)*thd_idx_z)+thd_idx_y)+(15*t))+3)*x_max))+((25*(t*t))*thd_idx_z))+((5*t)*thd_idx_y))+thd_idx_x)+(75*(t*t)))+(15*t)) */ _idx0=(((((((((((thd_idx_z+3)*x_max)+((5*t)*thd_idx_z))+(15*t))*y_max)+((((((5*t)*thd_idx_z)+thd_idx_y)+(15*t))+3)*x_max))+((25*(t*t))*thd_idx_z))+((5*t)*thd_idx_y))+thd_idx_x)+(75*(t*t)))+(15*t)); /* _idx1 = (((((((((((thd_idx_z+3)*x_max)+((5*t)*thd_idx_z))+(15*t))*y_max)+(((((5*t)*thd_idx_z)+thd_idx_y)+(15*t))*x_max))+((25*(t*t))*thd_idx_z))+((5*t)*thd_idx_y))+thd_idx_x)+(75*(t*t)))+3) */ _idx1=(((_idx0-(3*x_max))-(15*t))+3); /* _idx2 = (((((((((thd_idx_z*x_max)+((5*t)*thd_idx_z))*y_max)+(((((5*t)*thd_idx_z)+thd_idx_y)+3)*x_max))+((25*(t*t))*thd_idx_z))+((5*t)*thd_idx_y))+thd_idx_x)+(15*t))+3) */ _idx2=((((_idx0+(((-3*x_max)-(15*t))*y_max))-((15*t)*x_max))-(75*(t*t)))+3); /* _idx3 = ((((((((((((thd_idx_z+3)*x_max)+((5*t)*thd_idx_z))+(15*t))*y_max)+((((((5*t)*thd_idx_z)+thd_idx_y)+(15*t))+3)*x_max))+((25*(t*t))*thd_idx_z))+((5*t)*thd_idx_y))+thd_idx_x)+(75*(t*t)))+(15*t))+1) */ _idx3=(_idx0+1); /* _idx4 = ((((((((((((thd_idx_z+3)*x_max)+((5*t)*thd_idx_z))+(15*t))*y_max)+((((((5*t)*thd_idx_z)+thd_idx_y)+(15*t))+1)*x_max))+((25*(t*t))*thd_idx_z))+((5*t)*thd_idx_y))+thd_idx_x)+(75*(t*t)))+(5*t))+3) */ _idx4=((_idx1+x_max)+(5*t)); /* _idx5 = ((((((((((((thd_idx_z+1)*x_max)+((5*t)*thd_idx_z))+(5*t))*y_max)+((((((5*t)*thd_idx_z)+thd_idx_y)+(5*t))+3)*x_max))+((25*(t*t))*thd_idx_z))+((5*t)*thd_idx_y))+thd_idx_x)+(25*(t*t)))+(15*t))+3) */ _idx5=(((_idx2+((x_max+(5*t))*y_max))+((5*t)*x_max))+(25*(t*t))); /* _idx6 = ((((((((((((thd_idx_z+3)*x_max)+((5*t)*thd_idx_z))+(15*t))*y_max)+((((((5*t)*thd_idx_z)+thd_idx_y)+(15*t))+3)*x_max))+((25*(t*t))*thd_idx_z))+((5*t)*thd_idx_y))+thd_idx_x)+(75*(t*t)))+(15*t))+2) */ _idx6=(_idx3+1); /* _idx7 = ((((((((((((thd_idx_z+3)*x_max)+((5*t)*thd_idx_z))+(15*t))*y_max)+((((((5*t)*thd_idx_z)+thd_idx_y)+(15*t))+2)*x_max))+((25*(t*t))*thd_idx_z))+((5*t)*thd_idx_y))+thd_idx_x)+(75*(t*t)))+(10*t))+3) */ _idx7=((_idx4+x_max)+(5*t)); /* _idx8 = ((((((((((((thd_idx_z+2)*x_max)+((5*t)*thd_idx_z))+(10*t))*y_max)+((((((5*t)*thd_idx_z)+thd_idx_y)+(10*t))+3)*x_max))+((25*(t*t))*thd_idx_z))+((5*t)*thd_idx_y))+thd_idx_x)+(50*(t*t)))+(15*t))+3) */ _idx8=(((_idx5+((x_max+(5*t))*y_max))+((5*t)*x_max))+(25*(t*t))); /* _idx9 = ((((((((((((thd_idx_z+3)*x_max)+((5*t)*thd_idx_z))+(15*t))*y_max)+((((((5*t)*thd_idx_z)+thd_idx_y)+(15*t))+3)*x_max))+((25*(t*t))*thd_idx_z))+((5*t)*thd_idx_y))+thd_idx_x)+(75*(t*t)))+(15*t))+3) */ _idx9=(_idx3+2); /* _idx10 = ((((((((((((thd_idx_z+3)*x_max)+((5*t)*thd_idx_z))+(15*t))*y_max)+((((((5*t)*thd_idx_z)+thd_idx_y)+(15*t))+3)*x_max))+((25*(t*t))*thd_idx_z))+((5*t)*thd_idx_y))+thd_idx_x)+(75*(t*t)))+(15*t))+4) */ _idx10=(_idx3+3); /* _idx11 = ((((((((((((thd_idx_z+3)*x_max)+((5*t)*thd_idx_z))+(15*t))*y_max)+((((((5*t)*thd_idx_z)+thd_idx_y)+(15*t))+4)*x_max))+((25*(t*t))*thd_idx_z))+((5*t)*thd_idx_y))+thd_idx_x)+(75*(t*t)))+(20*t))+3) */ _idx11=((_idx9+x_max)+(5*t)); /* _idx12 = ((((((((((((thd_idx_z+4)*x_max)+((5*t)*thd_idx_z))+(20*t))*y_max)+((((((5*t)*thd_idx_z)+thd_idx_y)+(20*t))+3)*x_max))+((25*(t*t))*thd_idx_z))+((5*t)*thd_idx_y))+thd_idx_x)+(100*(t*t)))+(15*t))+3) */ _idx12=(((_idx9+((x_max+(5*t))*y_max))+((5*t)*x_max))+(25*(t*t))); /* _idx13 = ((((((((((((thd_idx_z+3)*x_max)+((5*t)*thd_idx_z))+(15*t))*y_max)+((((((5*t)*thd_idx_z)+thd_idx_y)+(15*t))+3)*x_max))+((25*(t*t))*thd_idx_z))+((5*t)*thd_idx_y))+thd_idx_x)+(75*(t*t)))+(15*t))+5) */ _idx13=(_idx3+4); /* _idx14 = ((((((((((((thd_idx_z+3)*x_max)+((5*t)*thd_idx_z))+(15*t))*y_max)+((((((5*t)*thd_idx_z)+thd_idx_y)+(15*t))+5)*x_max))+((25*(t*t))*thd_idx_z))+((5*t)*thd_idx_y))+thd_idx_x)+(75*(t*t)))+(25*t))+3) */ _idx14=((_idx11+x_max)+(5*t)); /* _idx15 = ((((((((((((thd_idx_z+5)*x_max)+((5*t)*thd_idx_z))+(25*t))*y_max)+((((((5*t)*thd_idx_z)+thd_idx_y)+(25*t))+3)*x_max))+((25*(t*t))*thd_idx_z))+((5*t)*thd_idx_y))+thd_idx_x)+(125*(t*t)))+(15*t))+3) */ _idx15=(((_idx12+((x_max+(5*t))*y_max))+((5*t)*x_max))+(25*(t*t))); u_0_1[_idx9]=(a*((((u_0_0[_idx0]+(u_0_0[_idx1]+u_0_0[_idx2]))*-2.0)+(((u_0_0[_idx3]+(u_0_0[_idx4]+u_0_0[_idx5]))*15.0)+((u_0_0[_idx6]+(u_0_0[_idx7]+u_0_0[_idx8]))*-60.0)))+((u_0_0[_idx9]*20.0)+(((u_0_0[_idx10]+(u_0_0[_idx11]+u_0_0[_idx12]))*30.0)+((u_0_0[_idx13]+(u_0_0[_idx14]+u_0_0[_idx15]))*-3.0))))); } } } } __global__ void initialize(double * u_0_0, double * u_0_1, double a, int x_max, int y_max, int z_max, int tbx, int tby, int tbz, int c) { double * const u__u_0[16] = { u_0_0, u_0_1 } ; int _idx0; int _idx1; int _idx10; int _idx11; int _idx12; int _idx13; int _idx14; int _idx15; int _idx2; int _idx3; int _idx4; int _idx5; int _idx6; int _idx7; int _idx8; int _idx9; int chunk_idx_x; int chunk_idx_x_max; int chunk_idx_y; int chunk_idx_y_max; int chunk_idx_z; int chunk_idx_z_max; int idx_1_2; int size_1_1; int size_1_2; //int t; int thd_idx_x; int thd_idx_y; int thd_idx_z; int thdblks_idx_x; int thdblks_idx_x_max; int thdblks_idx_y; int thdblks_idx_y_max; int thdblks_idx_z; int thdblks_idx_z_max; int tmp; /* Initializations */ size_1_1=(y_max/blockDim.y); size_1_2=(z_max/blockDim.z); idx_1_2=(blockIdx.y/size_1_2); tmp=(blockIdx.y-(idx_1_2*size_1_2)); chunk_idx_x=(c*(threadIdx.x+(blockDim.x*blockIdx.x))); chunk_idx_x_max=(chunk_idx_x+c); chunk_idx_y=(threadIdx.y+(tmp*blockDim.y)); chunk_idx_y_max=(chunk_idx_y+1); chunk_idx_z=(threadIdx.z+(idx_1_2*blockDim.z)); chunk_idx_z_max=(chunk_idx_z+1); thdblks_idx_x=(tbx*(threadIdx.x+(blockDim.x*blockIdx.x))); thdblks_idx_x_max=(thdblks_idx_x+tbx); thdblks_idx_y=(tby*(threadIdx.y+(tmp*blockDim.y))); thdblks_idx_y_max=(thdblks_idx_y+tby); thdblks_idx_z=(tbz*(threadIdx.z+(idx_1_2*blockDim.z))); thdblks_idx_z_max=(thdblks_idx_z+tbz); /* Implementation */ /* for t = 1..t_max by 1 parallel 1 <level 0> schedule { ... } */ //for (t=1; t<=t_max; t+=1) { /* Index bounds calculations for iterators in thdblks[t=t, s=(tbx, tby, tbz)][0] */ /* Index bounds calculations for iterators in chunk[t=t, s=(c, 1, 1)][0] */ /* for POINT thd[t=t, s=(1, 1, 1)][0] of size [1, 1, 1] in chunk[t=t, s=(:, :, :)][0] parallel 1 <level 2> schedule default { ... } */ { /* Index bounds calculations for iterators in thd[t=t, s=(1, 1, 1)][0] */ thd_idx_z=chunk_idx_z; thd_idx_y=chunk_idx_y; for (thd_idx_x=chunk_idx_x; thd_idx_x<(chunk_idx_x_max-0); thd_idx_x+=1) { /* Index bounds calculations for iterators in thd[t=t, s=(1, 1, 1)][0] */ /* u[t=(t+1), s=thd[t=?, s=?][0]][0]=stencil(u[t=t, s=thd[t=?, s=?][0]][0]) */ /* _idx0 = (((((((((((thd_idx_z+3)*x_max)+((5*t)*thd_idx_z))+(15*t))*y_max)+((((((5*t)*thd_idx_z)+thd_idx_y)+(15*t))+3)*x_max))+((25*(t*t))*thd_idx_z))+((5*t)*thd_idx_y))+thd_idx_x)+(75*(t*t)))+(15*t)) */ _idx0=(((((((((((thd_idx_z+3)*x_max)+((5*t)*thd_idx_z))+(15*t))*y_max)+((((((5*t)*thd_idx_z)+thd_idx_y)+(15*t))+3)*x_max))+((25*(t*t))*thd_idx_z))+((5*t)*thd_idx_y))+thd_idx_x)+(75*(t*t)))+(15*t)); u__u_0[(t-1)][_idx0]=0.1; /* _idx1 = ((((((((((((thd_idx_z+3)*x_max)+((5*t)*thd_idx_z))+(15*t))*y_max)+((((((5*t)*thd_idx_z)+thd_idx_y)+(15*t))+3)*x_max))+((25*(t*t))*thd_idx_z))+((5*t)*thd_idx_y))+thd_idx_x)+(75*(t*t)))+(15*t))+1) */ _idx1=(_idx0+1); u__u_0[(t-1)][_idx1]=0.1; /* _idx2 = ((((((((((((thd_idx_z+3)*x_max)+((5*t)*thd_idx_z))+(15*t))*y_max)+((((((5*t)*thd_idx_z)+thd_idx_y)+(15*t))+3)*x_max))+((25*(t*t))*thd_idx_z))+((5*t)*thd_idx_y))+thd_idx_x)+(75*(t*t)))+(15*t))+2) */ _idx2=(_idx1+1); u__u_0[(t-1)][_idx2]=0.1; /* _idx3 = (((((((((((thd_idx_z+3)*x_max)+((5*t)*thd_idx_z))+(15*t))*y_max)+(((((5*t)*thd_idx_z)+thd_idx_y)+(15*t))*x_max))+((25*(t*t))*thd_idx_z))+((5*t)*thd_idx_y))+thd_idx_x)+(75*(t*t)))+3) */ _idx3=(((_idx1-(3*x_max))-(15*t))+2); u__u_0[(t-1)][_idx3]=0.1; /* _idx4 = ((((((((((((thd_idx_z+3)*x_max)+((5*t)*thd_idx_z))+(15*t))*y_max)+((((((5*t)*thd_idx_z)+thd_idx_y)+(15*t))+1)*x_max))+((25*(t*t))*thd_idx_z))+((5*t)*thd_idx_y))+thd_idx_x)+(75*(t*t)))+(5*t))+3) */ _idx4=((_idx3+x_max)+(5*t)); u__u_0[(t-1)][_idx4]=0.1; /* _idx5 = ((((((((((((thd_idx_z+3)*x_max)+((5*t)*thd_idx_z))+(15*t))*y_max)+((((((5*t)*thd_idx_z)+thd_idx_y)+(15*t))+2)*x_max))+((25*(t*t))*thd_idx_z))+((5*t)*thd_idx_y))+thd_idx_x)+(75*(t*t)))+(10*t))+3) */ _idx5=((_idx4+x_max)+(5*t)); u__u_0[(t-1)][_idx5]=0.1; /* _idx6 = (((((((((thd_idx_z*x_max)+((5*t)*thd_idx_z))*y_max)+(((((5*t)*thd_idx_z)+thd_idx_y)+3)*x_max))+((25*(t*t))*thd_idx_z))+((5*t)*thd_idx_y))+thd_idx_x)+(15*t))+3) */ _idx6=((((_idx1+(((-3*x_max)-(15*t))*y_max))-((15*t)*x_max))-(75*(t*t)))+2); u__u_0[(t-1)][_idx6]=0.1; /* _idx7 = ((((((((((((thd_idx_z+1)*x_max)+((5*t)*thd_idx_z))+(5*t))*y_max)+((((((5*t)*thd_idx_z)+thd_idx_y)+(5*t))+3)*x_max))+((25*(t*t))*thd_idx_z))+((5*t)*thd_idx_y))+thd_idx_x)+(25*(t*t)))+(15*t))+3) */ _idx7=(((_idx6+((x_max+(5*t))*y_max))+((5*t)*x_max))+(25*(t*t))); u__u_0[(t-1)][_idx7]=0.1; /* _idx8 = ((((((((((((thd_idx_z+2)*x_max)+((5*t)*thd_idx_z))+(10*t))*y_max)+((((((5*t)*thd_idx_z)+thd_idx_y)+(10*t))+3)*x_max))+((25*(t*t))*thd_idx_z))+((5*t)*thd_idx_y))+thd_idx_x)+(50*(t*t)))+(15*t))+3) */ _idx8=(((_idx7+((x_max+(5*t))*y_max))+((5*t)*x_max))+(25*(t*t))); u__u_0[(t-1)][_idx8]=0.1; /* _idx9 = ((((((((((((thd_idx_z+3)*x_max)+((5*t)*thd_idx_z))+(15*t))*y_max)+((((((5*t)*thd_idx_z)+thd_idx_y)+(15*t))+3)*x_max))+((25*(t*t))*thd_idx_z))+((5*t)*thd_idx_y))+thd_idx_x)+(75*(t*t)))+(15*t))+3) */ _idx9=(_idx1+2); u__u_0[(t-1)][_idx9]=0.1; /* _idx10 = ((((((((((((thd_idx_z+4)*x_max)+((5*t)*thd_idx_z))+(20*t))*y_max)+((((((5*t)*thd_idx_z)+thd_idx_y)+(20*t))+3)*x_max))+((25*(t*t))*thd_idx_z))+((5*t)*thd_idx_y))+thd_idx_x)+(100*(t*t)))+(15*t))+3) */ _idx10=(((_idx9+((x_max+(5*t))*y_max))+((5*t)*x_max))+(25*(t*t))); u__u_0[(t-1)][_idx10]=0.1; /* _idx11 = ((((((((((((thd_idx_z+5)*x_max)+((5*t)*thd_idx_z))+(25*t))*y_max)+((((((5*t)*thd_idx_z)+thd_idx_y)+(25*t))+3)*x_max))+((25*(t*t))*thd_idx_z))+((5*t)*thd_idx_y))+thd_idx_x)+(125*(t*t)))+(15*t))+3) */ _idx11=(((_idx10+((x_max+(5*t))*y_max))+((5*t)*x_max))+(25*(t*t))); u__u_0[(t-1)][_idx11]=0.1; /* _idx12 = ((((((((((((thd_idx_z+3)*x_max)+((5*t)*thd_idx_z))+(15*t))*y_max)+((((((5*t)*thd_idx_z)+thd_idx_y)+(15*t))+4)*x_max))+((25*(t*t))*thd_idx_z))+((5*t)*thd_idx_y))+thd_idx_x)+(75*(t*t)))+(20*t))+3) */ _idx12=((_idx9+x_max)+(5*t)); u__u_0[(t-1)][_idx12]=0.1; /* _idx13 = ((((((((((((thd_idx_z+3)*x_max)+((5*t)*thd_idx_z))+(15*t))*y_max)+((((((5*t)*thd_idx_z)+thd_idx_y)+(15*t))+5)*x_max))+((25*(t*t))*thd_idx_z))+((5*t)*thd_idx_y))+thd_idx_x)+(75*(t*t)))+(25*t))+3) */ _idx13=((_idx12+x_max)+(5*t)); u__u_0[(t-1)][_idx13]=0.1; /* _idx14 = ((((((((((((thd_idx_z+3)*x_max)+((5*t)*thd_idx_z))+(15*t))*y_max)+((((((5*t)*thd_idx_z)+thd_idx_y)+(15*t))+3)*x_max))+((25*(t*t))*thd_idx_z))+((5*t)*thd_idx_y))+thd_idx_x)+(75*(t*t)))+(15*t))+4) */ _idx14=(_idx1+3); u__u_0[(t-1)][_idx14]=0.1; /* _idx15 = ((((((((((((thd_idx_z+3)*x_max)+((5*t)*thd_idx_z))+(15*t))*y_max)+((((((5*t)*thd_idx_z)+thd_idx_y)+(15*t))+3)*x_max))+((25*(t*t))*thd_idx_z))+((5*t)*thd_idx_y))+thd_idx_x)+(75*(t*t)))+(15*t))+5) */ _idx15=(_idx1+4); u__u_0[(t-1)][_idx15]=0.1; u__u_0[t][_idx9]=1.1; } } } }
ded770cf7f6dda0169677498147db6edd7d74d10.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "../include/Scene.h" #include "../include/Kernel.h" Scene::Scene(){} Scene::Scene(pcl::PointCloud<pcl::PointNormal> *cloud_ptr, float d_dist, unsigned int refPointDownsampleFactor) { /* *Scene PPF Hash */ this->cloud_ptr =cloud_ptr; this->d_dist = d_dist; this->sceneStep = refPointDownsampleFactor; cout<<"GPU::scene point size:"<<this->cloud_ptr->size()<<endl; cout<<"GPU::scene d_dist:"<<this->d_dist <<endl; cout<<"GPU::scene reference step:"<<this->sceneStep<<endl; thrust::host_vector<float3> *points = new thrust::host_vector<float3>(cloud_ptr->size()); thrust::host_vector<float3> *normals = new thrust::host_vector<float3>(cloud_ptr->size()); for(int i = 0; i < cloud_ptr->size(); i++){ (*points)[i].x = (*cloud_ptr)[i].x; (*points)[i].y = (*cloud_ptr)[i].y; (*points)[i].z = (*cloud_ptr)[i].z; (*normals)[i].x = (*cloud_ptr)[i].normal_x; (*normals)[i].y = (*cloud_ptr)[i].normal_y; (*normals)[i].z = (*cloud_ptr)[i].normal_z; } HANDLE_ERROR(hipGetLastError()); HANDLE_ERROR(hipDeviceSynchronize()); this->initPPFs(points, normals, cloud_ptr->size(), d_dist, refPointDownsampleFactor); cout<< "GPU::scenePPF size: " <<scenePPFs->size()<<endl; HANDLE_ERROR(hipGetLastError()); HANDLE_ERROR(hipDeviceSynchronize()); this->scenehashKeys = new thrust::device_vector<unsigned int>(this->scenePPFs->size()); int blocks = ::min(((int)(this->scenePPFs->size()) + BLOCK_SIZE - 1) / BLOCK_SIZE, MAX_NBLOCKS); hipLaunchKernelGGL(( ppf_hash_kernel), dim3(blocks),dim3(BLOCK_SIZE), 0, 0, RAW_PTR(this->scenePPFs), RAW_PTR(this->scenehashKeys), this->scenePPFs->size()); } Scene::~Scene() { delete this->scenePoints; delete this->sceneNormals; delete this->scenePPFs; delete this->sceneAngles; delete this->scenehashKeys; } void Scene::initPPFs(thrust::host_vector<float3> *points, thrust::host_vector<float3> *normals, int n, float d_dist, unsigned int refPointDownsampleFactor){ this->n = n; this->ppfS = n * n; // check if these are used later or can be discarded after this function this->scenePoints = new thrust::device_vector<float3>(*points); this->sceneNormals = new thrust::device_vector<float3>(*normals); this->scenePPFs = new thrust::device_vector<int4>(ppfS); this->sceneAngles = new thrust::device_vector<float>(ppfS); // This will crash if n = 0; int blocks = ::min(((int)(this->n + BLOCK_SIZE) - 1) / BLOCK_SIZE, MAX_NBLOCKS); // ppfKernel computes ppfs and descritizes them, but does *not* hash them // hashing is done by ppf_hash_kernel, called only for model, not scene (model.cu:46) hipLaunchKernelGGL(( ppfKernel), dim3(blocks),dim3(BLOCK_SIZE), 0, 0, RAW_PTR(this->scenePoints), RAW_PTR(this->sceneNormals), RAW_PTR(this->scenePPFs), n, refPointDownsampleFactor, this->d_dist); hipLaunchKernelGGL(( ppfAngle), dim3(blocks),dim3(BLOCK_SIZE), 0, 0, RAW_PTR(this->scenePoints), RAW_PTR(this->sceneNormals), RAW_PTR(this->sceneAngles), n, refPointDownsampleFactor, this->d_dist); } int Scene::numPoints(){ return this->n; } int Scene::getSceneStep(){ return this->sceneStep; } thrust::device_vector<float3> *Scene::getScenePoints(){ return this->scenePoints; } thrust::device_vector<float3> *Scene::getSceneNormals(){ return this->sceneNormals; } thrust::device_vector<int4>* Scene::getScenePPFs(){ return this->scenePPFs; } thrust::device_vector<float>* Scene::getSceneAngles(){ return this->sceneAngles; } thrust::device_vector<unsigned int>* Scene::getSceneHashKeys(){ return this->scenehashKeys; }
ded770cf7f6dda0169677498147db6edd7d74d10.cu
#include "../include/Scene.h" #include "../include/Kernel.h" Scene::Scene(){} Scene::Scene(pcl::PointCloud<pcl::PointNormal> *cloud_ptr, float d_dist, unsigned int refPointDownsampleFactor) { /* *计算Scene PPF 和 Hash */ this->cloud_ptr =cloud_ptr; this->d_dist = d_dist; this->sceneStep = refPointDownsampleFactor; cout<<"GPU::scene point size:"<<this->cloud_ptr->size()<<endl; cout<<"GPU::scene d_dist:"<<this->d_dist <<endl; cout<<"GPU::scene reference step:"<<this->sceneStep<<endl; thrust::host_vector<float3> *points = new thrust::host_vector<float3>(cloud_ptr->size()); thrust::host_vector<float3> *normals = new thrust::host_vector<float3>(cloud_ptr->size()); for(int i = 0; i < cloud_ptr->size(); i++){ (*points)[i].x = (*cloud_ptr)[i].x; (*points)[i].y = (*cloud_ptr)[i].y; (*points)[i].z = (*cloud_ptr)[i].z; (*normals)[i].x = (*cloud_ptr)[i].normal_x; (*normals)[i].y = (*cloud_ptr)[i].normal_y; (*normals)[i].z = (*cloud_ptr)[i].normal_z; } HANDLE_ERROR(cudaGetLastError()); HANDLE_ERROR(cudaDeviceSynchronize()); this->initPPFs(points, normals, cloud_ptr->size(), d_dist, refPointDownsampleFactor); cout<< "GPU::scenePPF size: " <<scenePPFs->size()<<endl; HANDLE_ERROR(cudaGetLastError()); HANDLE_ERROR(cudaDeviceSynchronize()); this->scenehashKeys = new thrust::device_vector<unsigned int>(this->scenePPFs->size()); int blocks = std::min(((int)(this->scenePPFs->size()) + BLOCK_SIZE - 1) / BLOCK_SIZE, MAX_NBLOCKS); ppf_hash_kernel<<<blocks,BLOCK_SIZE>>> (RAW_PTR(this->scenePPFs), RAW_PTR(this->scenehashKeys), this->scenePPFs->size()); } Scene::~Scene() { delete this->scenePoints; delete this->sceneNormals; delete this->scenePPFs; delete this->sceneAngles; delete this->scenehashKeys; } void Scene::initPPFs(thrust::host_vector<float3> *points, thrust::host_vector<float3> *normals, int n, float d_dist, unsigned int refPointDownsampleFactor){ this->n = n; this->ppfS = n * n; // check if these are used later or can be discarded after this function this->scenePoints = new thrust::device_vector<float3>(*points); this->sceneNormals = new thrust::device_vector<float3>(*normals); this->scenePPFs = new thrust::device_vector<int4>(ppfS); this->sceneAngles = new thrust::device_vector<float>(ppfS); // This will crash if n = 0; int blocks = std::min(((int)(this->n + BLOCK_SIZE) - 1) / BLOCK_SIZE, MAX_NBLOCKS); // ppfKernel computes ppfs and descritizes them, but does *not* hash them // hashing is done by ppf_hash_kernel, called only for model, not scene (model.cu:46) ppfKernel<<<blocks,BLOCK_SIZE>>> (RAW_PTR(this->scenePoints), RAW_PTR(this->sceneNormals), RAW_PTR(this->scenePPFs), n, refPointDownsampleFactor, this->d_dist); ppfAngle<<<blocks,BLOCK_SIZE>>> (RAW_PTR(this->scenePoints), RAW_PTR(this->sceneNormals), RAW_PTR(this->sceneAngles), n, refPointDownsampleFactor, this->d_dist); } int Scene::numPoints(){ return this->n; } int Scene::getSceneStep(){ return this->sceneStep; } thrust::device_vector<float3> *Scene::getScenePoints(){ return this->scenePoints; } thrust::device_vector<float3> *Scene::getSceneNormals(){ return this->sceneNormals; } thrust::device_vector<int4>* Scene::getScenePPFs(){ return this->scenePPFs; } thrust::device_vector<float>* Scene::getSceneAngles(){ return this->sceneAngles; } thrust::device_vector<unsigned int>* Scene::getSceneHashKeys(){ return this->scenehashKeys; }
908f827ec4d31727dd276725ff7bff5f42642850.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <hip/hip_runtime_api.h> __global__ void empty() { return; } int main() { dim3 gridSize = dim3(1, 1, 1); dim3 blockSize = dim3(1, 1, 1); hipLaunchKernelGGL(( empty), dim3(gridSize), dim3(blockSize), 0, 0, ); printf("Hello World\n"); return 0; }
908f827ec4d31727dd276725ff7bff5f42642850.cu
#include <stdio.h> #include <cuda_runtime_api.h> __global__ void empty() { return; } int main() { dim3 gridSize = dim3(1, 1, 1); dim3 blockSize = dim3(1, 1, 1); empty<<<gridSize, blockSize>>>(); printf("Hello World\n"); return 0; }
948eb67a6af0a809607f2a7c1592fca8a021e60c.hip
// !!! This is a file automatically generated by hipify!!! /*! * Copyright 2017 XGBoost contributors */ #include "./host_device_vector.h" #include <thrust/fill.h> #include <xgboost/data.h> #include <algorithm> #include <cstdint> #include <mutex> #include "device_helpers_hip.cuh" namespace xgboost { // the handler to call instead of hipSetDevice; only used for testing static void (*cudaSetDeviceHandler)(int) = nullptr; // NOLINT void SetCudaSetDeviceHandler(void (*handler)(int)) { cudaSetDeviceHandler = handler; } // wrapper over access with useful methods class Permissions { GPUAccess access_; explicit Permissions(GPUAccess access) : access_{access} {} public: Permissions() : access_{GPUAccess::kNone} {} explicit Permissions(bool perm) : access_(perm ? GPUAccess::kWrite : GPUAccess::kNone) {} bool CanRead() const { return access_ >= kRead; } bool CanWrite() const { return access_ == kWrite; } bool CanAccess(GPUAccess access) const { return access_ >= access; } void Grant(GPUAccess access) { access_ = ::max(access_, access); } void DenyComplementary(GPUAccess compl_access) { access_ = ::min(access_, GPUAccess::kWrite - compl_access); } Permissions Complementary() const { return Permissions(GPUAccess::kWrite - access_); } }; template <typename T> struct HostDeviceVectorImpl { struct DeviceShard { DeviceShard() : proper_size_{0}, device_{-1}, start_{0}, perm_d_{false}, cached_size_{static_cast<size_t>(~0)}, vec_{nullptr} {} void Init(HostDeviceVectorImpl<T>* vec, int device) { if (vec_ == nullptr) { vec_ = vec; } CHECK_EQ(vec, vec_); device_ = device; LazyResize(vec_->Size()); perm_d_ = vec_->perm_h_.Complementary(); } void Init(HostDeviceVectorImpl<T>* vec, const DeviceShard& other) { if (vec_ == nullptr) { vec_ = vec; } CHECK_EQ(vec, vec_); device_ = other.device_; cached_size_ = other.cached_size_; start_ = other.start_; proper_size_ = other.proper_size_; SetDevice(); data_.resize(other.data_.size()); perm_d_ = other.perm_d_; } void ScatterFrom(const T* begin) { // TODO(canonizer): avoid full copy of host data LazySyncDevice(GPUAccess::kWrite); SetDevice(); dh::safe_cuda(hipMemcpyAsync(data_.data().get(), begin + start_, data_.size() * sizeof(T), hipMemcpyDefault)); } void GatherTo(thrust::device_ptr<T> begin) { LazySyncDevice(GPUAccess::kRead); SetDevice(); dh::safe_cuda(hipMemcpyAsync(begin.get() + start_, data_.data().get(), proper_size_ * sizeof(T), hipMemcpyDefault)); } void Fill(T v) { // TODO(canonizer): avoid full copy of host data LazySyncDevice(GPUAccess::kWrite); SetDevice(); thrust::fill(data_.begin(), data_.end(), v); } void Copy(DeviceShard* other) { // TODO(canonizer): avoid full copy of host data for this (but not for other) LazySyncDevice(GPUAccess::kWrite); other->LazySyncDevice(GPUAccess::kRead); SetDevice(); dh::safe_cuda(hipMemcpyAsync(data_.data().get(), other->data_.data().get(), data_.size() * sizeof(T), hipMemcpyDefault)); } void LazySyncHost(GPUAccess access) { SetDevice(); dh::safe_cuda(hipMemcpy(vec_->data_h_.data() + start_, data_.data().get(), proper_size_ * sizeof(T), hipMemcpyDeviceToHost)); perm_d_.DenyComplementary(access); } void LazyResize(size_t new_size) { if (new_size == cached_size_) { return; } // resize is required int ndevices = vec_->distribution_.devices_.Size(); int device_index = vec_->distribution_.devices_.Index(device_); start_ = vec_->distribution_.ShardStart(new_size, device_index); proper_size_ = vec_->distribution_.ShardProperSize(new_size, device_index); // The size on this device. size_t size_d = vec_->distribution_.ShardSize(new_size, device_index); SetDevice(); data_.resize(size_d); cached_size_ = new_size; } void LazySyncDevice(GPUAccess access) { if (perm_d_.CanAccess(access)) { return; } if (perm_d_.CanRead()) { // deny read to the host perm_d_.Grant(access); std::lock_guard<std::mutex> lock(vec_->mutex_); vec_->perm_h_.DenyComplementary(access); return; } // data is on the host size_t size_h = vec_->data_h_.size(); LazyResize(size_h); SetDevice(); dh::safe_cuda( hipMemcpy(data_.data().get(), vec_->data_h_.data() + start_, data_.size() * sizeof(T), hipMemcpyHostToDevice)); perm_d_.Grant(access); std::lock_guard<std::mutex> lock(vec_->mutex_); vec_->perm_h_.DenyComplementary(access); vec_->size_d_ = size_h; } void SetDevice() { if (cudaSetDeviceHandler == nullptr) { dh::safe_cuda(hipSetDevice(device_)); } else { (*cudaSetDeviceHandler)(device_); } } T* Raw() { return data_.data().get(); } size_t Start() const { return start_; } size_t DataSize() const { return data_.size(); } Permissions& Perm() { return perm_d_; } Permissions const& Perm() const { return perm_d_; } private: int device_; thrust::device_vector<T> data_; // cached vector size size_t cached_size_; size_t start_; // size of the portion to copy back to the host size_t proper_size_; Permissions perm_d_; HostDeviceVectorImpl<T>* vec_; }; HostDeviceVectorImpl(size_t size, T v, GPUDistribution distribution) : distribution_(distribution), perm_h_(distribution.IsEmpty()), size_d_(0) { if (!distribution_.IsEmpty()) { size_d_ = size; InitShards(); Fill(v); } else { data_h_.resize(size, v); } } // required, as a new std::mutex has to be created HostDeviceVectorImpl(const HostDeviceVectorImpl<T>& other) : data_h_(other.data_h_), perm_h_(other.perm_h_), size_d_(other.size_d_), distribution_(other.distribution_), mutex_() { shards_.resize(other.shards_.size()); dh::ExecuteIndexShards(&shards_, [&](int i, DeviceShard& shard) { shard.Init(this, other.shards_.at(i)); }); } // Initializer can be std::vector<T> or std::initializer_list<T> template <class Initializer> HostDeviceVectorImpl(const Initializer& init, GPUDistribution distribution) : distribution_(distribution), perm_h_(distribution.IsEmpty()), size_d_(0) { if (!distribution_.IsEmpty()) { size_d_ = init.size(); InitShards(); Copy(init); } else { data_h_ = init; } } void InitShards() { int ndevices = distribution_.devices_.Size(); shards_.resize(ndevices); dh::ExecuteIndexShards(&shards_, [&](int i, DeviceShard& shard) { shard.Init(this, distribution_.devices_.DeviceId(i)); }); } size_t Size() const { return perm_h_.CanRead() ? data_h_.size() : size_d_; } GPUSet Devices() const { return distribution_.devices_; } const GPUDistribution& Distribution() const { return distribution_; } T* DevicePointer(int device) { CHECK(distribution_.devices_.Contains(device)); LazySyncDevice(device, GPUAccess::kWrite); return shards_.at(distribution_.devices_.Index(device)).Raw(); } const T* ConstDevicePointer(int device) { CHECK(distribution_.devices_.Contains(device)); LazySyncDevice(device, GPUAccess::kRead); return shards_.at(distribution_.devices_.Index(device)).Raw(); } common::Span<T> DeviceSpan(int device) { GPUSet devices = distribution_.devices_; CHECK(devices.Contains(device)); LazySyncDevice(device, GPUAccess::kWrite); return {shards_.at(devices.Index(device)).Raw(), static_cast<typename common::Span<T>::index_type>(DeviceSize(device))}; } common::Span<const T> ConstDeviceSpan(int device) { GPUSet devices = distribution_.devices_; CHECK(devices.Contains(device)); LazySyncDevice(device, GPUAccess::kRead); using SpanInd = typename common::Span<const T>::index_type; return {shards_.at(devices.Index(device)).Raw(), static_cast<SpanInd>(DeviceSize(device))}; } size_t DeviceSize(int device) { CHECK(distribution_.devices_.Contains(device)); LazySyncDevice(device, GPUAccess::kRead); return shards_.at(distribution_.devices_.Index(device)).DataSize(); } size_t DeviceStart(int device) { CHECK(distribution_.devices_.Contains(device)); LazySyncDevice(device, GPUAccess::kRead); return shards_.at(distribution_.devices_.Index(device)).Start(); } thrust::device_ptr<T> tbegin(int device) { // NOLINT return thrust::device_ptr<T>(DevicePointer(device)); } thrust::device_ptr<const T> tcbegin(int device) { // NOLINT return thrust::device_ptr<const T>(ConstDevicePointer(device)); } thrust::device_ptr<T> tend(int device) { // NOLINT return tbegin(device) + DeviceSize(device); } thrust::device_ptr<const T> tcend(int device) { // NOLINT return tcbegin(device) + DeviceSize(device); } void ScatterFrom(thrust::device_ptr<const T> begin, thrust::device_ptr<const T> end) { CHECK_EQ(end - begin, Size()); if (perm_h_.CanWrite()) { dh::safe_cuda(hipMemcpy(data_h_.data(), begin.get(), (end - begin) * sizeof(T), hipMemcpyDeviceToHost)); } else { dh::ExecuteIndexShards(&shards_, [&](int idx, DeviceShard& shard) { shard.ScatterFrom(begin.get()); }); } } void GatherTo(thrust::device_ptr<T> begin, thrust::device_ptr<T> end) { CHECK_EQ(end - begin, Size()); if (perm_h_.CanWrite()) { dh::safe_cuda(hipMemcpy(begin.get(), data_h_.data(), data_h_.size() * sizeof(T), hipMemcpyHostToDevice)); } else { dh::ExecuteIndexShards(&shards_, [&](int idx, DeviceShard& shard) { shard.GatherTo(begin); }); } } void Fill(T v) { // NOLINT if (perm_h_.CanWrite()) { std::fill(data_h_.begin(), data_h_.end(), v); } else { dh::ExecuteIndexShards(&shards_, [&](int idx, DeviceShard& shard) { shard.Fill(v); }); } } void Copy(HostDeviceVectorImpl<T>* other) { CHECK_EQ(Size(), other->Size()); // Data is on host. if (perm_h_.CanWrite() && other->perm_h_.CanWrite()) { std::copy(other->data_h_.begin(), other->data_h_.end(), data_h_.begin()); return; } // Data is on device; if (distribution_ != other->distribution_) { distribution_ = GPUDistribution(); Reshard(other->Distribution()); size_d_ = other->size_d_; } dh::ExecuteIndexShards(&shards_, [&](int i, DeviceShard& shard) { shard.Copy(&other->shards_.at(i)); }); } void Copy(const std::vector<T>& other) { CHECK_EQ(Size(), other.size()); if (perm_h_.CanWrite()) { std::copy(other.begin(), other.end(), data_h_.begin()); } else { dh::ExecuteIndexShards(&shards_, [&](int idx, DeviceShard& shard) { shard.ScatterFrom(other.data()); }); } } void Copy(std::initializer_list<T> other) { CHECK_EQ(Size(), other.size()); if (perm_h_.CanWrite()) { std::copy(other.begin(), other.end(), data_h_.begin()); } else { dh::ExecuteIndexShards(&shards_, [&](int idx, DeviceShard& shard) { shard.ScatterFrom(other.begin()); }); } } std::vector<T>& HostVector() { LazySyncHost(GPUAccess::kWrite); return data_h_; } const std::vector<T>& ConstHostVector() { LazySyncHost(GPUAccess::kRead); return data_h_; } void Reshard(const GPUDistribution& distribution) { if (distribution_ == distribution) { return; } CHECK(distribution_.IsEmpty() || distribution.IsEmpty()); if (distribution.IsEmpty()) { LazySyncHost(GPUAccess::kWrite); } distribution_ = distribution; InitShards(); } void Reshard(GPUSet new_devices) { if (distribution_.Devices() == new_devices) { return; } Reshard(GPUDistribution::Block(new_devices)); } void Resize(size_t new_size, T v) { if (new_size == Size()) { return; } if (distribution_.IsFixedSize()) { CHECK_EQ(new_size, distribution_.offsets_.back()); } if (Size() == 0 && !distribution_.IsEmpty()) { // fast on-device resize perm_h_ = Permissions(false); size_d_ = new_size; InitShards(); Fill(v); } else { // resize on host LazySyncHost(GPUAccess::kWrite); data_h_.resize(new_size, v); } } void LazySyncHost(GPUAccess access) { if (perm_h_.CanAccess(access)) { return; } if (perm_h_.CanRead()) { // data is present, just need to deny access to the device dh::ExecuteIndexShards(&shards_, [&](int idx, DeviceShard& shard) { shard.Perm().DenyComplementary(access); }); perm_h_.Grant(access); return; } if (data_h_.size() != size_d_) { data_h_.resize(size_d_); } dh::ExecuteIndexShards(&shards_, [&](int idx, DeviceShard& shard) { shard.LazySyncHost(access); }); perm_h_.Grant(access); } void LazySyncDevice(int device, GPUAccess access) { GPUSet devices = distribution_.Devices(); CHECK(devices.Contains(device)); shards_.at(devices.Index(device)).LazySyncDevice(access); } bool HostCanAccess(GPUAccess access) { return perm_h_.CanAccess(access); } bool DeviceCanAccess(int device, GPUAccess access) { GPUSet devices = distribution_.Devices(); if (!devices.Contains(device)) { return false; } return shards_.at(devices.Index(device)).Perm().CanAccess(access); } private: std::vector<T> data_h_; Permissions perm_h_; // the total size of the data stored on the devices size_t size_d_; GPUDistribution distribution_; // protects size_d_ and perm_h_ when updated from multiple threads std::mutex mutex_; std::vector<DeviceShard> shards_; }; template <typename T> HostDeviceVector<T>::HostDeviceVector (size_t size, T v, GPUDistribution distribution) : impl_(nullptr) { impl_ = new HostDeviceVectorImpl<T>(size, v, distribution); } template <typename T> HostDeviceVector<T>::HostDeviceVector (std::initializer_list<T> init, GPUDistribution distribution) : impl_(nullptr) { impl_ = new HostDeviceVectorImpl<T>(init, distribution); } template <typename T> HostDeviceVector<T>::HostDeviceVector (const std::vector<T>& init, GPUDistribution distribution) : impl_(nullptr) { impl_ = new HostDeviceVectorImpl<T>(init, distribution); } template <typename T> HostDeviceVector<T>::HostDeviceVector(const HostDeviceVector<T>& other) : impl_(nullptr) { impl_ = new HostDeviceVectorImpl<T>(*other.impl_); } template <typename T> HostDeviceVector<T>& HostDeviceVector<T>::operator= (const HostDeviceVector<T>& other) { if (this == &other) { return *this; } delete impl_; impl_ = new HostDeviceVectorImpl<T>(*other.impl_); return *this; } template <typename T> HostDeviceVector<T>::~HostDeviceVector() { delete impl_; impl_ = nullptr; } template <typename T> size_t HostDeviceVector<T>::Size() const { return impl_->Size(); } template <typename T> GPUSet HostDeviceVector<T>::Devices() const { return impl_->Devices(); } template <typename T> const GPUDistribution& HostDeviceVector<T>::Distribution() const { return impl_->Distribution(); } template <typename T> T* HostDeviceVector<T>::DevicePointer(int device) { return impl_->DevicePointer(device); } template <typename T> const T* HostDeviceVector<T>::ConstDevicePointer(int device) const { return impl_->ConstDevicePointer(device); } template <typename T> common::Span<T> HostDeviceVector<T>::DeviceSpan(int device) { return impl_->DeviceSpan(device); } template <typename T> common::Span<const T> HostDeviceVector<T>::ConstDeviceSpan(int device) const { return impl_->ConstDeviceSpan(device); } template <typename T> size_t HostDeviceVector<T>::DeviceStart(int device) const { return impl_->DeviceStart(device); } template <typename T> size_t HostDeviceVector<T>::DeviceSize(int device) const { return impl_->DeviceSize(device); } template <typename T> thrust::device_ptr<T> HostDeviceVector<T>::tbegin(int device) { // NOLINT return impl_->tbegin(device); } template <typename T> thrust::device_ptr<const T> HostDeviceVector<T>::tcbegin(int device) const { // NOLINT return impl_->tcbegin(device); } template <typename T> thrust::device_ptr<T> HostDeviceVector<T>::tend(int device) { // NOLINT return impl_->tend(device); } template <typename T> thrust::device_ptr<const T> HostDeviceVector<T>::tcend(int device) const { // NOLINT return impl_->tcend(device); } template <typename T> void HostDeviceVector<T>::ScatterFrom (thrust::device_ptr<const T> begin, thrust::device_ptr<const T> end) { impl_->ScatterFrom(begin, end); } template <typename T> void HostDeviceVector<T>::GatherTo (thrust::device_ptr<T> begin, thrust::device_ptr<T> end) const { impl_->GatherTo(begin, end); } template <typename T> void HostDeviceVector<T>::Fill(T v) { impl_->Fill(v); } template <typename T> void HostDeviceVector<T>::Copy(const HostDeviceVector<T>& other) { impl_->Copy(other.impl_); } template <typename T> void HostDeviceVector<T>::Copy(const std::vector<T>& other) { impl_->Copy(other); } template <typename T> void HostDeviceVector<T>::Copy(std::initializer_list<T> other) { impl_->Copy(other); } template <typename T> std::vector<T>& HostDeviceVector<T>::HostVector() { return impl_->HostVector(); } template <typename T> const std::vector<T>& HostDeviceVector<T>::ConstHostVector() const { return impl_->ConstHostVector(); } template <typename T> bool HostDeviceVector<T>::HostCanAccess(GPUAccess access) const { return impl_->HostCanAccess(access); } template <typename T> bool HostDeviceVector<T>::DeviceCanAccess(int device, GPUAccess access) const { return impl_->DeviceCanAccess(device, access); } template <typename T> void HostDeviceVector<T>::Reshard(GPUSet new_devices) const { impl_->Reshard(new_devices); } template <typename T> void HostDeviceVector<T>::Reshard(const GPUDistribution& distribution) const { impl_->Reshard(distribution); } template <typename T> void HostDeviceVector<T>::Resize(size_t new_size, T v) { impl_->Resize(new_size, v); } // explicit instantiations are required, as HostDeviceVector isn't header-only template class HostDeviceVector<bst_float>; template class HostDeviceVector<GradientPair>; template class HostDeviceVector<int>; template class HostDeviceVector<Entry>; template class HostDeviceVector<size_t>; } // namespace xgboost
948eb67a6af0a809607f2a7c1592fca8a021e60c.cu
/*! * Copyright 2017 XGBoost contributors */ #include "./host_device_vector.h" #include <thrust/fill.h> #include <xgboost/data.h> #include <algorithm> #include <cstdint> #include <mutex> #include "./device_helpers.cuh" namespace xgboost { // the handler to call instead of cudaSetDevice; only used for testing static void (*cudaSetDeviceHandler)(int) = nullptr; // NOLINT void SetCudaSetDeviceHandler(void (*handler)(int)) { cudaSetDeviceHandler = handler; } // wrapper over access with useful methods class Permissions { GPUAccess access_; explicit Permissions(GPUAccess access) : access_{access} {} public: Permissions() : access_{GPUAccess::kNone} {} explicit Permissions(bool perm) : access_(perm ? GPUAccess::kWrite : GPUAccess::kNone) {} bool CanRead() const { return access_ >= kRead; } bool CanWrite() const { return access_ == kWrite; } bool CanAccess(GPUAccess access) const { return access_ >= access; } void Grant(GPUAccess access) { access_ = std::max(access_, access); } void DenyComplementary(GPUAccess compl_access) { access_ = std::min(access_, GPUAccess::kWrite - compl_access); } Permissions Complementary() const { return Permissions(GPUAccess::kWrite - access_); } }; template <typename T> struct HostDeviceVectorImpl { struct DeviceShard { DeviceShard() : proper_size_{0}, device_{-1}, start_{0}, perm_d_{false}, cached_size_{static_cast<size_t>(~0)}, vec_{nullptr} {} void Init(HostDeviceVectorImpl<T>* vec, int device) { if (vec_ == nullptr) { vec_ = vec; } CHECK_EQ(vec, vec_); device_ = device; LazyResize(vec_->Size()); perm_d_ = vec_->perm_h_.Complementary(); } void Init(HostDeviceVectorImpl<T>* vec, const DeviceShard& other) { if (vec_ == nullptr) { vec_ = vec; } CHECK_EQ(vec, vec_); device_ = other.device_; cached_size_ = other.cached_size_; start_ = other.start_; proper_size_ = other.proper_size_; SetDevice(); data_.resize(other.data_.size()); perm_d_ = other.perm_d_; } void ScatterFrom(const T* begin) { // TODO(canonizer): avoid full copy of host data LazySyncDevice(GPUAccess::kWrite); SetDevice(); dh::safe_cuda(cudaMemcpyAsync(data_.data().get(), begin + start_, data_.size() * sizeof(T), cudaMemcpyDefault)); } void GatherTo(thrust::device_ptr<T> begin) { LazySyncDevice(GPUAccess::kRead); SetDevice(); dh::safe_cuda(cudaMemcpyAsync(begin.get() + start_, data_.data().get(), proper_size_ * sizeof(T), cudaMemcpyDefault)); } void Fill(T v) { // TODO(canonizer): avoid full copy of host data LazySyncDevice(GPUAccess::kWrite); SetDevice(); thrust::fill(data_.begin(), data_.end(), v); } void Copy(DeviceShard* other) { // TODO(canonizer): avoid full copy of host data for this (but not for other) LazySyncDevice(GPUAccess::kWrite); other->LazySyncDevice(GPUAccess::kRead); SetDevice(); dh::safe_cuda(cudaMemcpyAsync(data_.data().get(), other->data_.data().get(), data_.size() * sizeof(T), cudaMemcpyDefault)); } void LazySyncHost(GPUAccess access) { SetDevice(); dh::safe_cuda(cudaMemcpy(vec_->data_h_.data() + start_, data_.data().get(), proper_size_ * sizeof(T), cudaMemcpyDeviceToHost)); perm_d_.DenyComplementary(access); } void LazyResize(size_t new_size) { if (new_size == cached_size_) { return; } // resize is required int ndevices = vec_->distribution_.devices_.Size(); int device_index = vec_->distribution_.devices_.Index(device_); start_ = vec_->distribution_.ShardStart(new_size, device_index); proper_size_ = vec_->distribution_.ShardProperSize(new_size, device_index); // The size on this device. size_t size_d = vec_->distribution_.ShardSize(new_size, device_index); SetDevice(); data_.resize(size_d); cached_size_ = new_size; } void LazySyncDevice(GPUAccess access) { if (perm_d_.CanAccess(access)) { return; } if (perm_d_.CanRead()) { // deny read to the host perm_d_.Grant(access); std::lock_guard<std::mutex> lock(vec_->mutex_); vec_->perm_h_.DenyComplementary(access); return; } // data is on the host size_t size_h = vec_->data_h_.size(); LazyResize(size_h); SetDevice(); dh::safe_cuda( cudaMemcpy(data_.data().get(), vec_->data_h_.data() + start_, data_.size() * sizeof(T), cudaMemcpyHostToDevice)); perm_d_.Grant(access); std::lock_guard<std::mutex> lock(vec_->mutex_); vec_->perm_h_.DenyComplementary(access); vec_->size_d_ = size_h; } void SetDevice() { if (cudaSetDeviceHandler == nullptr) { dh::safe_cuda(cudaSetDevice(device_)); } else { (*cudaSetDeviceHandler)(device_); } } T* Raw() { return data_.data().get(); } size_t Start() const { return start_; } size_t DataSize() const { return data_.size(); } Permissions& Perm() { return perm_d_; } Permissions const& Perm() const { return perm_d_; } private: int device_; thrust::device_vector<T> data_; // cached vector size size_t cached_size_; size_t start_; // size of the portion to copy back to the host size_t proper_size_; Permissions perm_d_; HostDeviceVectorImpl<T>* vec_; }; HostDeviceVectorImpl(size_t size, T v, GPUDistribution distribution) : distribution_(distribution), perm_h_(distribution.IsEmpty()), size_d_(0) { if (!distribution_.IsEmpty()) { size_d_ = size; InitShards(); Fill(v); } else { data_h_.resize(size, v); } } // required, as a new std::mutex has to be created HostDeviceVectorImpl(const HostDeviceVectorImpl<T>& other) : data_h_(other.data_h_), perm_h_(other.perm_h_), size_d_(other.size_d_), distribution_(other.distribution_), mutex_() { shards_.resize(other.shards_.size()); dh::ExecuteIndexShards(&shards_, [&](int i, DeviceShard& shard) { shard.Init(this, other.shards_.at(i)); }); } // Initializer can be std::vector<T> or std::initializer_list<T> template <class Initializer> HostDeviceVectorImpl(const Initializer& init, GPUDistribution distribution) : distribution_(distribution), perm_h_(distribution.IsEmpty()), size_d_(0) { if (!distribution_.IsEmpty()) { size_d_ = init.size(); InitShards(); Copy(init); } else { data_h_ = init; } } void InitShards() { int ndevices = distribution_.devices_.Size(); shards_.resize(ndevices); dh::ExecuteIndexShards(&shards_, [&](int i, DeviceShard& shard) { shard.Init(this, distribution_.devices_.DeviceId(i)); }); } size_t Size() const { return perm_h_.CanRead() ? data_h_.size() : size_d_; } GPUSet Devices() const { return distribution_.devices_; } const GPUDistribution& Distribution() const { return distribution_; } T* DevicePointer(int device) { CHECK(distribution_.devices_.Contains(device)); LazySyncDevice(device, GPUAccess::kWrite); return shards_.at(distribution_.devices_.Index(device)).Raw(); } const T* ConstDevicePointer(int device) { CHECK(distribution_.devices_.Contains(device)); LazySyncDevice(device, GPUAccess::kRead); return shards_.at(distribution_.devices_.Index(device)).Raw(); } common::Span<T> DeviceSpan(int device) { GPUSet devices = distribution_.devices_; CHECK(devices.Contains(device)); LazySyncDevice(device, GPUAccess::kWrite); return {shards_.at(devices.Index(device)).Raw(), static_cast<typename common::Span<T>::index_type>(DeviceSize(device))}; } common::Span<const T> ConstDeviceSpan(int device) { GPUSet devices = distribution_.devices_; CHECK(devices.Contains(device)); LazySyncDevice(device, GPUAccess::kRead); using SpanInd = typename common::Span<const T>::index_type; return {shards_.at(devices.Index(device)).Raw(), static_cast<SpanInd>(DeviceSize(device))}; } size_t DeviceSize(int device) { CHECK(distribution_.devices_.Contains(device)); LazySyncDevice(device, GPUAccess::kRead); return shards_.at(distribution_.devices_.Index(device)).DataSize(); } size_t DeviceStart(int device) { CHECK(distribution_.devices_.Contains(device)); LazySyncDevice(device, GPUAccess::kRead); return shards_.at(distribution_.devices_.Index(device)).Start(); } thrust::device_ptr<T> tbegin(int device) { // NOLINT return thrust::device_ptr<T>(DevicePointer(device)); } thrust::device_ptr<const T> tcbegin(int device) { // NOLINT return thrust::device_ptr<const T>(ConstDevicePointer(device)); } thrust::device_ptr<T> tend(int device) { // NOLINT return tbegin(device) + DeviceSize(device); } thrust::device_ptr<const T> tcend(int device) { // NOLINT return tcbegin(device) + DeviceSize(device); } void ScatterFrom(thrust::device_ptr<const T> begin, thrust::device_ptr<const T> end) { CHECK_EQ(end - begin, Size()); if (perm_h_.CanWrite()) { dh::safe_cuda(cudaMemcpy(data_h_.data(), begin.get(), (end - begin) * sizeof(T), cudaMemcpyDeviceToHost)); } else { dh::ExecuteIndexShards(&shards_, [&](int idx, DeviceShard& shard) { shard.ScatterFrom(begin.get()); }); } } void GatherTo(thrust::device_ptr<T> begin, thrust::device_ptr<T> end) { CHECK_EQ(end - begin, Size()); if (perm_h_.CanWrite()) { dh::safe_cuda(cudaMemcpy(begin.get(), data_h_.data(), data_h_.size() * sizeof(T), cudaMemcpyHostToDevice)); } else { dh::ExecuteIndexShards(&shards_, [&](int idx, DeviceShard& shard) { shard.GatherTo(begin); }); } } void Fill(T v) { // NOLINT if (perm_h_.CanWrite()) { std::fill(data_h_.begin(), data_h_.end(), v); } else { dh::ExecuteIndexShards(&shards_, [&](int idx, DeviceShard& shard) { shard.Fill(v); }); } } void Copy(HostDeviceVectorImpl<T>* other) { CHECK_EQ(Size(), other->Size()); // Data is on host. if (perm_h_.CanWrite() && other->perm_h_.CanWrite()) { std::copy(other->data_h_.begin(), other->data_h_.end(), data_h_.begin()); return; } // Data is on device; if (distribution_ != other->distribution_) { distribution_ = GPUDistribution(); Reshard(other->Distribution()); size_d_ = other->size_d_; } dh::ExecuteIndexShards(&shards_, [&](int i, DeviceShard& shard) { shard.Copy(&other->shards_.at(i)); }); } void Copy(const std::vector<T>& other) { CHECK_EQ(Size(), other.size()); if (perm_h_.CanWrite()) { std::copy(other.begin(), other.end(), data_h_.begin()); } else { dh::ExecuteIndexShards(&shards_, [&](int idx, DeviceShard& shard) { shard.ScatterFrom(other.data()); }); } } void Copy(std::initializer_list<T> other) { CHECK_EQ(Size(), other.size()); if (perm_h_.CanWrite()) { std::copy(other.begin(), other.end(), data_h_.begin()); } else { dh::ExecuteIndexShards(&shards_, [&](int idx, DeviceShard& shard) { shard.ScatterFrom(other.begin()); }); } } std::vector<T>& HostVector() { LazySyncHost(GPUAccess::kWrite); return data_h_; } const std::vector<T>& ConstHostVector() { LazySyncHost(GPUAccess::kRead); return data_h_; } void Reshard(const GPUDistribution& distribution) { if (distribution_ == distribution) { return; } CHECK(distribution_.IsEmpty() || distribution.IsEmpty()); if (distribution.IsEmpty()) { LazySyncHost(GPUAccess::kWrite); } distribution_ = distribution; InitShards(); } void Reshard(GPUSet new_devices) { if (distribution_.Devices() == new_devices) { return; } Reshard(GPUDistribution::Block(new_devices)); } void Resize(size_t new_size, T v) { if (new_size == Size()) { return; } if (distribution_.IsFixedSize()) { CHECK_EQ(new_size, distribution_.offsets_.back()); } if (Size() == 0 && !distribution_.IsEmpty()) { // fast on-device resize perm_h_ = Permissions(false); size_d_ = new_size; InitShards(); Fill(v); } else { // resize on host LazySyncHost(GPUAccess::kWrite); data_h_.resize(new_size, v); } } void LazySyncHost(GPUAccess access) { if (perm_h_.CanAccess(access)) { return; } if (perm_h_.CanRead()) { // data is present, just need to deny access to the device dh::ExecuteIndexShards(&shards_, [&](int idx, DeviceShard& shard) { shard.Perm().DenyComplementary(access); }); perm_h_.Grant(access); return; } if (data_h_.size() != size_d_) { data_h_.resize(size_d_); } dh::ExecuteIndexShards(&shards_, [&](int idx, DeviceShard& shard) { shard.LazySyncHost(access); }); perm_h_.Grant(access); } void LazySyncDevice(int device, GPUAccess access) { GPUSet devices = distribution_.Devices(); CHECK(devices.Contains(device)); shards_.at(devices.Index(device)).LazySyncDevice(access); } bool HostCanAccess(GPUAccess access) { return perm_h_.CanAccess(access); } bool DeviceCanAccess(int device, GPUAccess access) { GPUSet devices = distribution_.Devices(); if (!devices.Contains(device)) { return false; } return shards_.at(devices.Index(device)).Perm().CanAccess(access); } private: std::vector<T> data_h_; Permissions perm_h_; // the total size of the data stored on the devices size_t size_d_; GPUDistribution distribution_; // protects size_d_ and perm_h_ when updated from multiple threads std::mutex mutex_; std::vector<DeviceShard> shards_; }; template <typename T> HostDeviceVector<T>::HostDeviceVector (size_t size, T v, GPUDistribution distribution) : impl_(nullptr) { impl_ = new HostDeviceVectorImpl<T>(size, v, distribution); } template <typename T> HostDeviceVector<T>::HostDeviceVector (std::initializer_list<T> init, GPUDistribution distribution) : impl_(nullptr) { impl_ = new HostDeviceVectorImpl<T>(init, distribution); } template <typename T> HostDeviceVector<T>::HostDeviceVector (const std::vector<T>& init, GPUDistribution distribution) : impl_(nullptr) { impl_ = new HostDeviceVectorImpl<T>(init, distribution); } template <typename T> HostDeviceVector<T>::HostDeviceVector(const HostDeviceVector<T>& other) : impl_(nullptr) { impl_ = new HostDeviceVectorImpl<T>(*other.impl_); } template <typename T> HostDeviceVector<T>& HostDeviceVector<T>::operator= (const HostDeviceVector<T>& other) { if (this == &other) { return *this; } delete impl_; impl_ = new HostDeviceVectorImpl<T>(*other.impl_); return *this; } template <typename T> HostDeviceVector<T>::~HostDeviceVector() { delete impl_; impl_ = nullptr; } template <typename T> size_t HostDeviceVector<T>::Size() const { return impl_->Size(); } template <typename T> GPUSet HostDeviceVector<T>::Devices() const { return impl_->Devices(); } template <typename T> const GPUDistribution& HostDeviceVector<T>::Distribution() const { return impl_->Distribution(); } template <typename T> T* HostDeviceVector<T>::DevicePointer(int device) { return impl_->DevicePointer(device); } template <typename T> const T* HostDeviceVector<T>::ConstDevicePointer(int device) const { return impl_->ConstDevicePointer(device); } template <typename T> common::Span<T> HostDeviceVector<T>::DeviceSpan(int device) { return impl_->DeviceSpan(device); } template <typename T> common::Span<const T> HostDeviceVector<T>::ConstDeviceSpan(int device) const { return impl_->ConstDeviceSpan(device); } template <typename T> size_t HostDeviceVector<T>::DeviceStart(int device) const { return impl_->DeviceStart(device); } template <typename T> size_t HostDeviceVector<T>::DeviceSize(int device) const { return impl_->DeviceSize(device); } template <typename T> thrust::device_ptr<T> HostDeviceVector<T>::tbegin(int device) { // NOLINT return impl_->tbegin(device); } template <typename T> thrust::device_ptr<const T> HostDeviceVector<T>::tcbegin(int device) const { // NOLINT return impl_->tcbegin(device); } template <typename T> thrust::device_ptr<T> HostDeviceVector<T>::tend(int device) { // NOLINT return impl_->tend(device); } template <typename T> thrust::device_ptr<const T> HostDeviceVector<T>::tcend(int device) const { // NOLINT return impl_->tcend(device); } template <typename T> void HostDeviceVector<T>::ScatterFrom (thrust::device_ptr<const T> begin, thrust::device_ptr<const T> end) { impl_->ScatterFrom(begin, end); } template <typename T> void HostDeviceVector<T>::GatherTo (thrust::device_ptr<T> begin, thrust::device_ptr<T> end) const { impl_->GatherTo(begin, end); } template <typename T> void HostDeviceVector<T>::Fill(T v) { impl_->Fill(v); } template <typename T> void HostDeviceVector<T>::Copy(const HostDeviceVector<T>& other) { impl_->Copy(other.impl_); } template <typename T> void HostDeviceVector<T>::Copy(const std::vector<T>& other) { impl_->Copy(other); } template <typename T> void HostDeviceVector<T>::Copy(std::initializer_list<T> other) { impl_->Copy(other); } template <typename T> std::vector<T>& HostDeviceVector<T>::HostVector() { return impl_->HostVector(); } template <typename T> const std::vector<T>& HostDeviceVector<T>::ConstHostVector() const { return impl_->ConstHostVector(); } template <typename T> bool HostDeviceVector<T>::HostCanAccess(GPUAccess access) const { return impl_->HostCanAccess(access); } template <typename T> bool HostDeviceVector<T>::DeviceCanAccess(int device, GPUAccess access) const { return impl_->DeviceCanAccess(device, access); } template <typename T> void HostDeviceVector<T>::Reshard(GPUSet new_devices) const { impl_->Reshard(new_devices); } template <typename T> void HostDeviceVector<T>::Reshard(const GPUDistribution& distribution) const { impl_->Reshard(distribution); } template <typename T> void HostDeviceVector<T>::Resize(size_t new_size, T v) { impl_->Resize(new_size, v); } // explicit instantiations are required, as HostDeviceVector isn't header-only template class HostDeviceVector<bst_float>; template class HostDeviceVector<GradientPair>; template class HostDeviceVector<int>; template class HostDeviceVector<Entry>; template class HostDeviceVector<size_t>; } // namespace xgboost
21172d942433938003103afbe11d111d24da2369.hip
// !!! This is a file automatically generated by hipify!!! /* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * Copyright (c) 2015 by Contributors * \file np_repeat_op.cu * \brief GPU Implementation of numpy-compatible repeat operator */ #include <hipcub/hipcub.hpp> #include "./np_repeat_op-inl.h" namespace mxnet { namespace op { NNVM_REGISTER_OP(_npi_repeats) .set_attr<FCompute>("FCompute<gpu>", NumpyRepeatsOpForward<gpu>); } // namespace op } // namespace mxnet
21172d942433938003103afbe11d111d24da2369.cu
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * Copyright (c) 2015 by Contributors * \file np_repeat_op.cu * \brief GPU Implementation of numpy-compatible repeat operator */ #include <cub/cub.cuh> #include "./np_repeat_op-inl.h" namespace mxnet { namespace op { NNVM_REGISTER_OP(_npi_repeats) .set_attr<FCompute>("FCompute<gpu>", NumpyRepeatsOpForward<gpu>); } // namespace op } // namespace mxnet
6e88024152ef63c50e4fb14a45c3220813f770d2.hip
// !!! This is a file automatically generated by hipify!!! #include "stdio.h" #include "hip/hip_runtime.h" #include "stdlib.h" const int NT = 16; const int N = NT * 16; __global__ void add(int* a, int* b, int* c) { int idx = threadIdx.x + blockDim.x * blockIdx.x; if (idx < N) { c[idx] = a[idx] + b[idx]; } } int main() { int* host_a; int* host_b; int* host_c; int* dev_a; int* dev_b; int* dev_c; // allocate memory on host hipHostMalloc((void**)&host_a, N * sizeof(int), hipHostMallocDefault); hipHostMalloc((void**)&host_b, N * sizeof(int), hipHostMallocDefault); hipHostMalloc((void**)&host_c, N * sizeof(int), hipHostMallocDefault); // allocate memory on device hipMalloc((void**)&dev_a, N * sizeof(int)); hipMalloc((void**)&dev_b, N * sizeof(int)); hipMalloc((void**)&dev_c, N * sizeof(int)); // fill the arrays 'a' and 'b' on the CPU for (int i = 0; i < N; i++) { host_a[i] = -i + 1; host_b[i] = i * i; } // copy data to device hipMemcpy((void*)dev_a, (void*)host_a, sizeof(int)*N, hipMemcpyHostToDevice); hipMemcpy((void*)dev_b, (void*)host_b, sizeof(int)*N, hipMemcpyHostToDevice); hipEvent_t start; hipEvent_t stop; hipEventCreate(&start); hipEventCreate(&stop); hipEventRecord(start); hipLaunchKernelGGL(( add), dim3((N + (NT - 1))/NT),dim3(NT), 0, 0, dev_a, dev_b, dev_c); hipEventRecord(stop); hipEventSynchronize(stop); float time = 0; hipEventElapsedTime(&time, start, stop); printf("GPU compute time: %f\n", time); hipEventRecord(stop); hipMemcpy((void*)host_c, (void*)dev_c, sizeof(int)*N, hipMemcpyDeviceToHost); // display the results for (int i=0; i<N; i++) { printf("%d ", host_c[i]); } hipHostFree(host_a); hipHostFree(host_b); hipHostFree(host_c); hipFree(dev_a); hipFree(dev_b); hipFree(dev_c); return 0; }
6e88024152ef63c50e4fb14a45c3220813f770d2.cu
#include "stdio.h" #include "cuda.h" #include "stdlib.h" const int NT = 16; const int N = NT * 16; __global__ void add(int* a, int* b, int* c) { int idx = threadIdx.x + blockDim.x * blockIdx.x; if (idx < N) { c[idx] = a[idx] + b[idx]; } } int main() { int* host_a; int* host_b; int* host_c; int* dev_a; int* dev_b; int* dev_c; // allocate memory on host cudaHostAlloc((void**)&host_a, N * sizeof(int), cudaHostAllocDefault); cudaHostAlloc((void**)&host_b, N * sizeof(int), cudaHostAllocDefault); cudaHostAlloc((void**)&host_c, N * sizeof(int), cudaHostAllocDefault); // allocate memory on device cudaMalloc((void**)&dev_a, N * sizeof(int)); cudaMalloc((void**)&dev_b, N * sizeof(int)); cudaMalloc((void**)&dev_c, N * sizeof(int)); // fill the arrays 'a' and 'b' on the CPU for (int i = 0; i < N; i++) { host_a[i] = -i + 1; host_b[i] = i * i; } // copy data to device cudaMemcpy((void*)dev_a, (void*)host_a, sizeof(int)*N, cudaMemcpyHostToDevice); cudaMemcpy((void*)dev_b, (void*)host_b, sizeof(int)*N, cudaMemcpyHostToDevice); cudaEvent_t start; cudaEvent_t stop; cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start); add<<<(N + (NT - 1))/NT,NT>>>(dev_a, dev_b, dev_c); cudaEventRecord(stop); cudaEventSynchronize(stop); float time = 0; cudaEventElapsedTime(&time, start, stop); printf("GPU compute time: %f\n", time); cudaEventRecord(stop); cudaMemcpy((void*)host_c, (void*)dev_c, sizeof(int)*N, cudaMemcpyDeviceToHost); // display the results for (int i=0; i<N; i++) { printf("%d ", host_c[i]); } cudaFreeHost(host_a); cudaFreeHost(host_b); cudaFreeHost(host_c); cudaFree(dev_a); cudaFree(dev_b); cudaFree(dev_c); return 0; }
dcf1f961f568db4dbd1f15204e9eb67cc03cb229.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include "helpers.cuh" //Check whether RAM is full void isMemoryFull(void *ptr){ if (ptr==NULL){ fprintf(stderr, "Memory Full.\nYour array is too large. Please try a smaller array.\n"); exit(EXIT_FAILURE); } } //check whether file access is ok void isFileOK(FILE *fp){ if (fp==NULL){ perror("A file access error occurred\n"); exit(EXIT_FAILURE); } } static const char *getcudaError(hipError_t error){ switch (error){ case hipSuccess: return "hipSuccess"; case hipErrorMissingConfiguration: return "hipErrorMissingConfiguration"; case hipErrorMemoryAllocation: return "hipErrorMemoryAllocation"; case hipErrorInitializationError: return "hipErrorInitializationError"; case hipErrorLaunchFailure: return "hipErrorLaunchFailure"; case hipErrorPriorLaunchFailure: return "hipErrorPriorLaunchFailure"; case hipErrorLaunchTimeOut: return "hipErrorLaunchTimeOut"; case hipErrorLaunchOutOfResources: return "hipErrorLaunchOutOfResources"; case hipErrorInvalidDeviceFunction: return "hipErrorInvalidDeviceFunction"; case hipErrorInvalidConfiguration: return "hipErrorInvalidConfiguration"; case hipErrorInvalidDevice: return "hipErrorInvalidDevice"; case hipErrorInvalidValue: return "hipErrorInvalidValue"; case hipErrorInvalidPitchValue: return "hipErrorInvalidPitchValue"; case hipErrorInvalidSymbol: return "hipErrorInvalidSymbol"; case hipErrorMapFailed: return "hipErrorMapFailed"; case hipErrorUnmapFailed: return "hipErrorUnmapFailed"; case hipErrorInvalidHostPointer: return "hipErrorInvalidHostPointer"; case hipErrorInvalidDevicePointer: return "hipErrorInvalidDevicePointer"; case hipErrorInvalidTexture: return "hipErrorInvalidTexture"; case hipErrorInvalidTextureBinding: return "hipErrorInvalidTextureBinding"; case hipErrorInvalidChannelDescriptor: return "hipErrorInvalidChannelDescriptor"; case hipErrorInvalidMemcpyDirection: return "hipErrorInvalidMemcpyDirection"; case hipErrorAddressOfConstant: return "hipErrorAddressOfConstant"; case hipErrorTextureFetchFailed: return "hipErrorTextureFetchFailed"; case hipErrorTextureNotBound: return "hipErrorTextureNotBound"; case hipErrorSynchronizationError: return "hipErrorSynchronizationError"; case hipErrorInvalidFilterSetting: return "hipErrorInvalidFilterSetting"; case hipErrorInvalidNormSetting: return "hipErrorInvalidNormSetting"; case hipErrorMixedDeviceExecution: return "hipErrorMixedDeviceExecution"; case hipErrorDeinitialized: return "hipErrorDeinitialized"; case hipErrorUnknown: return "hipErrorUnknown"; case hipErrorNotYetImplemented: return "hipErrorNotYetImplemented"; case hipErrorMemoryValueTooLarge: return "hipErrorMemoryValueTooLarge"; case hipErrorInvalidResourceHandle: return "hipErrorInvalidResourceHandle"; case hipErrorNotReady: return "hipErrorNotReady"; case hipErrorInsufficientDriver: return "hipErrorInsufficientDriver"; case hipErrorSetOnActiveProcess: return "hipErrorSetOnActiveProcess"; case hipErrorInvalidSurface: return "hipErrorInvalidSurface"; case hipErrorNoDevice: return "hipErrorNoDevice"; case hipErrorECCNotCorrectable: return "hipErrorECCNotCorrectable"; case hipErrorSharedObjectSymbolNotFound: return "hipErrorSharedObjectSymbolNotFound"; case hipErrorSharedObjectInitFailed: return "hipErrorSharedObjectInitFailed"; case hipErrorUnsupportedLimit: return "hipErrorUnsupportedLimit"; case hipErrorDuplicateVariableName: return "hipErrorDuplicateVariableName"; case hipErrorDuplicateTextureName: return "hipErrorDuplicateTextureName"; case hipErrorDuplicateSurfaceName: return "hipErrorDuplicateSurfaceName"; case hipErrorDevicesUnavailable: return "hipErrorDevicesUnavailable"; case hipErrorInvalidImage: return "hipErrorInvalidImage"; case hipErrorNoBinaryForGpu: return "hipErrorNoBinaryForGpu"; case hipErrorIncompatibleDriverContext: return "hipErrorIncompatibleDriverContext"; case hipErrorPeerAccessAlreadyEnabled: return "hipErrorPeerAccessAlreadyEnabled"; case hipErrorPeerAccessNotEnabled: return "hipErrorPeerAccessNotEnabled"; case hipErrorDeviceAlreadyInUse: return "hipErrorDeviceAlreadyInUse"; case hipErrorProfilerDisabled: return "hipErrorProfilerDisabled"; case hipErrorProfilerNotInitialized: return "hipErrorProfilerNotInitialized"; case hipErrorProfilerAlreadyStarted: return "hipErrorProfilerAlreadyStarted"; case hipErrorProfilerAlreadyStopped: return "hipErrorProfilerAlreadyStopped"; /* Since CUDA 4.0*/ case hipErrorAssert: return "hipErrorAssert"; case hipErrorTooManyPeers: return "hipErrorTooManyPeers"; case hipErrorHostMemoryAlreadyRegistered: return "hipErrorHostMemoryAlreadyRegistered"; case hipErrorHostMemoryNotRegistered: return "hipErrorHostMemoryNotRegistered"; /* Since CUDA 5.0 */ case hipErrorOperatingSystem: return "hipErrorOperatingSystem"; case hipErrorPeerAccessUnsupported: return "hipErrorPeerAccessUnsupported"; case hipErrorLaunchMaxDepthExceeded: return "hipErrorLaunchMaxDepthExceeded"; case hipErrorLaunchFileScopedTex: return "hipErrorLaunchFileScopedTex"; case hipErrorLaunchFileScopedSurf: return "hipErrorLaunchFileScopedSurf"; case hipErrorSyncDepthExceeded: return "hipErrorSyncDepthExceeded"; case hipErrorLaunchPendingCountExceeded: return "hipErrorLaunchPendingCountExceeded"; case hipErrorNotPermitted: return "hipErrorNotPermitted"; case hipErrorNotSupported: return "hipErrorNotSupported"; /* Since CUDA 6.0 */ case hipErrorHardwareStackError: return "hipErrorHardwareStackError"; case hipErrorIllegalInstruction: return "hipErrorIllegalInstruction"; case hipErrorMisalignedAddress: return "hipErrorMisalignedAddress"; case hipErrorInvalidAddressSpace: return "hipErrorInvalidAddressSpace"; case hipErrorInvalidPc: return "hipErrorInvalidPc"; case hipErrorIllegalAddress: return "hipErrorIllegalAddress"; /* Since CUDA 6.5*/ case hipErrorInvalidKernelFile: return "hipErrorInvalidKernelFile"; case hipErrorInvalidGraphicsContext: return "hipErrorInvalidGraphicsContext"; case hipErrorStartupFailure: return "hipErrorStartupFailure"; case hipErrorApiFailureBase: return "hipErrorApiFailureBase"; } return "<unknown>"; } //check whether cuda errors void checkCudaError(hipError_t status){ if (status!=hipSuccess){ fprintf(stderr,"Some Error occured in CUDA.\n:%s \nError Code : %d\n",getcudaError(status),status); exit(EXIT_FAILURE); } }
dcf1f961f568db4dbd1f15204e9eb67cc03cb229.cu
#include <stdio.h> #include "helpers.cuh" //Check whether RAM is full void isMemoryFull(void *ptr){ if (ptr==NULL){ fprintf(stderr, "Memory Full.\nYour array is too large. Please try a smaller array.\n"); exit(EXIT_FAILURE); } } //check whether file access is ok void isFileOK(FILE *fp){ if (fp==NULL){ perror("A file access error occurred\n"); exit(EXIT_FAILURE); } } static const char *getcudaError(cudaError_t error){ switch (error){ case cudaSuccess: return "cudaSuccess"; case cudaErrorMissingConfiguration: return "cudaErrorMissingConfiguration"; case cudaErrorMemoryAllocation: return "cudaErrorMemoryAllocation"; case cudaErrorInitializationError: return "cudaErrorInitializationError"; case cudaErrorLaunchFailure: return "cudaErrorLaunchFailure"; case cudaErrorPriorLaunchFailure: return "cudaErrorPriorLaunchFailure"; case cudaErrorLaunchTimeout: return "cudaErrorLaunchTimeout"; case cudaErrorLaunchOutOfResources: return "cudaErrorLaunchOutOfResources"; case cudaErrorInvalidDeviceFunction: return "cudaErrorInvalidDeviceFunction"; case cudaErrorInvalidConfiguration: return "cudaErrorInvalidConfiguration"; case cudaErrorInvalidDevice: return "cudaErrorInvalidDevice"; case cudaErrorInvalidValue: return "cudaErrorInvalidValue"; case cudaErrorInvalidPitchValue: return "cudaErrorInvalidPitchValue"; case cudaErrorInvalidSymbol: return "cudaErrorInvalidSymbol"; case cudaErrorMapBufferObjectFailed: return "cudaErrorMapBufferObjectFailed"; case cudaErrorUnmapBufferObjectFailed: return "cudaErrorUnmapBufferObjectFailed"; case cudaErrorInvalidHostPointer: return "cudaErrorInvalidHostPointer"; case cudaErrorInvalidDevicePointer: return "cudaErrorInvalidDevicePointer"; case cudaErrorInvalidTexture: return "cudaErrorInvalidTexture"; case cudaErrorInvalidTextureBinding: return "cudaErrorInvalidTextureBinding"; case cudaErrorInvalidChannelDescriptor: return "cudaErrorInvalidChannelDescriptor"; case cudaErrorInvalidMemcpyDirection: return "cudaErrorInvalidMemcpyDirection"; case cudaErrorAddressOfConstant: return "cudaErrorAddressOfConstant"; case cudaErrorTextureFetchFailed: return "cudaErrorTextureFetchFailed"; case cudaErrorTextureNotBound: return "cudaErrorTextureNotBound"; case cudaErrorSynchronizationError: return "cudaErrorSynchronizationError"; case cudaErrorInvalidFilterSetting: return "cudaErrorInvalidFilterSetting"; case cudaErrorInvalidNormSetting: return "cudaErrorInvalidNormSetting"; case cudaErrorMixedDeviceExecution: return "cudaErrorMixedDeviceExecution"; case cudaErrorCudartUnloading: return "cudaErrorCudartUnloading"; case cudaErrorUnknown: return "cudaErrorUnknown"; case cudaErrorNotYetImplemented: return "cudaErrorNotYetImplemented"; case cudaErrorMemoryValueTooLarge: return "cudaErrorMemoryValueTooLarge"; case cudaErrorInvalidResourceHandle: return "cudaErrorInvalidResourceHandle"; case cudaErrorNotReady: return "cudaErrorNotReady"; case cudaErrorInsufficientDriver: return "cudaErrorInsufficientDriver"; case cudaErrorSetOnActiveProcess: return "cudaErrorSetOnActiveProcess"; case cudaErrorInvalidSurface: return "cudaErrorInvalidSurface"; case cudaErrorNoDevice: return "cudaErrorNoDevice"; case cudaErrorECCUncorrectable: return "cudaErrorECCUncorrectable"; case cudaErrorSharedObjectSymbolNotFound: return "cudaErrorSharedObjectSymbolNotFound"; case cudaErrorSharedObjectInitFailed: return "cudaErrorSharedObjectInitFailed"; case cudaErrorUnsupportedLimit: return "cudaErrorUnsupportedLimit"; case cudaErrorDuplicateVariableName: return "cudaErrorDuplicateVariableName"; case cudaErrorDuplicateTextureName: return "cudaErrorDuplicateTextureName"; case cudaErrorDuplicateSurfaceName: return "cudaErrorDuplicateSurfaceName"; case cudaErrorDevicesUnavailable: return "cudaErrorDevicesUnavailable"; case cudaErrorInvalidKernelImage: return "cudaErrorInvalidKernelImage"; case cudaErrorNoKernelImageForDevice: return "cudaErrorNoKernelImageForDevice"; case cudaErrorIncompatibleDriverContext: return "cudaErrorIncompatibleDriverContext"; case cudaErrorPeerAccessAlreadyEnabled: return "cudaErrorPeerAccessAlreadyEnabled"; case cudaErrorPeerAccessNotEnabled: return "cudaErrorPeerAccessNotEnabled"; case cudaErrorDeviceAlreadyInUse: return "cudaErrorDeviceAlreadyInUse"; case cudaErrorProfilerDisabled: return "cudaErrorProfilerDisabled"; case cudaErrorProfilerNotInitialized: return "cudaErrorProfilerNotInitialized"; case cudaErrorProfilerAlreadyStarted: return "cudaErrorProfilerAlreadyStarted"; case cudaErrorProfilerAlreadyStopped: return "cudaErrorProfilerAlreadyStopped"; /* Since CUDA 4.0*/ case cudaErrorAssert: return "cudaErrorAssert"; case cudaErrorTooManyPeers: return "cudaErrorTooManyPeers"; case cudaErrorHostMemoryAlreadyRegistered: return "cudaErrorHostMemoryAlreadyRegistered"; case cudaErrorHostMemoryNotRegistered: return "cudaErrorHostMemoryNotRegistered"; /* Since CUDA 5.0 */ case cudaErrorOperatingSystem: return "cudaErrorOperatingSystem"; case cudaErrorPeerAccessUnsupported: return "cudaErrorPeerAccessUnsupported"; case cudaErrorLaunchMaxDepthExceeded: return "cudaErrorLaunchMaxDepthExceeded"; case cudaErrorLaunchFileScopedTex: return "cudaErrorLaunchFileScopedTex"; case cudaErrorLaunchFileScopedSurf: return "cudaErrorLaunchFileScopedSurf"; case cudaErrorSyncDepthExceeded: return "cudaErrorSyncDepthExceeded"; case cudaErrorLaunchPendingCountExceeded: return "cudaErrorLaunchPendingCountExceeded"; case cudaErrorNotPermitted: return "cudaErrorNotPermitted"; case cudaErrorNotSupported: return "cudaErrorNotSupported"; /* Since CUDA 6.0 */ case cudaErrorHardwareStackError: return "cudaErrorHardwareStackError"; case cudaErrorIllegalInstruction: return "cudaErrorIllegalInstruction"; case cudaErrorMisalignedAddress: return "cudaErrorMisalignedAddress"; case cudaErrorInvalidAddressSpace: return "cudaErrorInvalidAddressSpace"; case cudaErrorInvalidPc: return "cudaErrorInvalidPc"; case cudaErrorIllegalAddress: return "cudaErrorIllegalAddress"; /* Since CUDA 6.5*/ case cudaErrorInvalidPtx: return "cudaErrorInvalidPtx"; case cudaErrorInvalidGraphicsContext: return "cudaErrorInvalidGraphicsContext"; case cudaErrorStartupFailure: return "cudaErrorStartupFailure"; case cudaErrorApiFailureBase: return "cudaErrorApiFailureBase"; } return "<unknown>"; } //check whether cuda errors void checkCudaError(cudaError_t status){ if (status!=cudaSuccess){ fprintf(stderr,"Some Error occured in CUDA.\n:%s \nError Code : %d\n",getcudaError(status),status); exit(EXIT_FAILURE); } }
59d39100ce747de988b95c669af00d428592494a.hip
// !!! This is a file automatically generated by hipify!!! /* * @brief: this file contains the definition of svm predictor class * Created on: May 24, 2012 * Author: Zeyi Wen * Copyright @DBGroup University of Melbourne */ #include "../svm-shared/gpu_global_utility.h" #include "svmPredictor.h" #include "../svm-shared/storageManager.h" #include <hip/hip_runtime.h> #include <helper_cuda.h> #include <hip/hip_runtime_api.h> /* * @brief: read kernel values based on support vectors */ void CSVMPredictor::ReadKVbasedOnSV(float_point *pfSVsKernelValues, int *pnSVSampleId, int nNumofSVs, int nNumofTestSamples) { FILE *pFile = fopen(HESSIAN_FILE, "rb"); float_point *pfSVHessianSubRow = new float_point[nNumofTestSamples]; float_point *pfHessianFullRow = new float_point[m_pHessianReader->m_nTotalNumofInstance]; memset(pfSVHessianSubRow, 0, sizeof(float_point) * nNumofTestSamples); for(int i = 0; i < nNumofSVs; i++) { //read part of the Hessian Row if(m_pHessianReader->m_nNumofCachedHessianRow > pnSVSampleId[i]) { //if the hessian row is in host memory long long nIndexofFirstElement; //only one if-statement holds, as testing samples are continuously allocated in RAM if(m_pHessianReader->m_nRowStartPos1 != -1) { nIndexofFirstElement = (long long)pnSVSampleId[i] * m_pHessianReader->m_nTotalNumofInstance + m_pHessianReader->m_nRowStartPos1; } if(m_pHessianReader->m_nRowStartPos2 != -1) { nIndexofFirstElement = (long long)pnSVSampleId[i] * m_pHessianReader->m_nTotalNumofInstance + m_pHessianReader->m_nRowStartPos2; } //copy the memory memcpy(pfSVHessianSubRow, m_pHessianReader->m_pfHessianRowsInHostMem + nIndexofFirstElement, nNumofTestSamples * sizeof(float_point)); } else//the hessian row is in SSD { int nStartPos; if(m_pHessianReader->m_nRowStartPos1 != -1) { nStartPos = m_pHessianReader->m_nRowStartPos1; } else if(m_pHessianReader->m_nRowStartPos2 != -1) { nStartPos = m_pHessianReader->m_nRowStartPos2; } else { assert(0); } m_pHessianReader->ReadRow(pnSVSampleId[i], pfSVHessianSubRow); //int nIndexInSSD = pnSVSampleId[i] - m_pHessianOps->m_nNumofCachedHessianRow; //m_pHessianOps->ReadHessianFullRow(pFile, nIndexInSSD, 1, pfHessianFullRow); //memcpy(pfSVHessianSubRow, pfHessianFullRow + nStartPos, nNumofTestSamples * sizeof(float_point)); } for(int j = 0; j < nNumofTestSamples; j++) { //store kernel values in a matrix with the form that row is testing samples, column is SVs. pfSVsKernelValues[j * (long long)nNumofSVs + i] = pfSVHessianSubRow[j]; } } fclose(pFile); delete[] pfSVHessianSubRow; delete[] pfHessianFullRow; } /* * @brief: read kernel values based on testing examples */ void CSVMPredictor::ReadKVbasedOnTest(float_point *pfSVsKernelValues, int *pnSVSampleId, int nNumofSVs, int nNumofTestSamples) { FILE *pFile = fopen(HESSIAN_FILE, "rb"); float_point *pfSVHessianSubRow = new float_point[nNumofSVs]; memset(pfSVHessianSubRow, 0, sizeof(float_point) * nNumofSVs); float_point *pfHessianRow = new float_point[m_pHessianReader->m_nTotalNumofInstance]; int nTestStartId = m_nTestStart; assert(nTestStartId >= 0); int nTestEndId = nTestStartId + nNumofTestSamples - 1;//include the last sample for(int i = nTestStartId; i <= nTestEndId; i++) { //read part of the Hessian Row //if the hessian row is in host memory if(m_pHessianReader->m_nNumofCachedHessianRow > i) { for(int j = 0; j < nNumofSVs; j++) { pfSVHessianSubRow[j] = m_pHessianReader->m_pfHessianRowsInHostMem[i * (long long)m_pHessianReader->m_nTotalNumofInstance + pnSVSampleId[j]]; } } else//the hessian row is in SSD { m_pHessianReader->ReadRow(i, pfHessianRow); for(int j = 0; j < nNumofSVs; j++) { pfSVHessianSubRow[j] = pfHessianRow[pnSVSampleId[j]]; } } for(int j = 0; j < nNumofSVs; j++) { //store kernel values in a matrix with the form that row is testing samples, column is SVs. pfSVsKernelValues[(i - nTestStartId) * (long long)nNumofSVs + j] = pfSVHessianSubRow[j]; } } if(pFile != NULL) fclose(pFile); delete[] pfSVHessianSubRow; delete[] pfHessianRow; } /** * @brief: read kernel values from precomputed results */ void CSVMPredictor::ReadFromHessian(float_point *pfSVsKernelValues, int *pnSVSampleId, int nNumofSVs, int *pnTestSampleId, int nNumofTestSamples) { //get Hessian rows of support vectors m_pHessianReader->AllocateBuffer(1); if(nNumofSVs >= nNumofTestSamples) { m_pHessianReader->SetInvolveData(-1, -1, 0, m_pHessianReader->m_nTotalNumofInstance - 1); ReadKVbasedOnTest(pfSVsKernelValues, pnSVSampleId, nNumofSVs, nNumofTestSamples); } else { m_pHessianReader->SetInvolveData(-1, -1, pnTestSampleId[0], pnTestSampleId[nNumofTestSamples - 1]); ReadKVbasedOnSV(pfSVsKernelValues, pnSVSampleId, nNumofSVs, nNumofTestSamples); } m_pHessianReader->ReleaseBuffer(); } /** * @brief: allocate memory for kernel values involved in the prediction */ float_point *CSVMPredictor::AllocateKVMem(int nNumofSVs, const int &nNumofTestSamples) { //store sub Hessian Matrix float_point *pfSVsKernelValues = new float_point[nNumofTestSamples * nNumofSVs]; memset(pfSVsKernelValues, 0, sizeof(float_point) * nNumofTestSamples * nNumofSVs); return pfSVsKernelValues; } /** * @return the number of support vectors in the model */ int CSVMPredictor::GetNumSV(svm_model *pModel) { return (pModel->nSV[0] + pModel->nSV[1]); } /** * @brief: predict the label helper function */ float_point* CSVMPredictor::PredictLabel(svm_model *pModel, int nNumofTestSamples, float_point *pfSVsKernelValues) { //get infomation from SVM model int nNumofSVs = GetNumSV(pModel); float_point fBias = *(pModel->rho); float_point **pyfSVsYiAlpha = pModel->sv_coef; float_point *pfSVsYiAlpha = pyfSVsYiAlpha[0]; int *pnSVsLabel = pModel->label; float_point *pfYiAlphaofSVs; /*compute y_i*alpha_i*K(i, z) by GPU, where i is id of support vector. * pfDevSVYiAlphaHessian stores in the order of T1 sv1 sv2 ... T2 sv1 sv2 ... T3 sv1 sv2 ... */ float_point *pfDevSVYiAlphaHessian; float_point *pfDevSVsYiAlpha; int *pnDevSVsLabel; //if the memory is not enough for the storage when classifying all testing samples at once, divide it into multiple parts StorageManager *manager = StorageManager::getManager(); int nMaxNumofFloatPoint = manager->GetFreeGPUMem(); int nNumofPart = Ceil(nNumofSVs * nNumofTestSamples, nMaxNumofFloatPoint); // cout << "cache size is: " << nMaxNumofFloatPoint << " v.s.. " << nNumofSVs * nNumofTestSamples << endl; // cout << "perform classification in " << nNumofPart << " time(s)" << endl; //allocate memory for storing classification result float_point *pfClassificaitonResult = new float_point[nNumofTestSamples]; //initialise the size of each part int *pSizeofPart = new int[nNumofPart]; int nAverageSize = nNumofTestSamples / nNumofPart; for(int i = 0; i < nNumofPart; i++) { if(i != nNumofPart - 1) { pSizeofPart[i] = nAverageSize; } else { pSizeofPart[i] = nNumofTestSamples - nAverageSize * i; } } //perform classification for each part for(int i = 0; i < nNumofPart; i++) { checkCudaErrors(hipMalloc((void**)&pfDevSVYiAlphaHessian, sizeof(float_point) * nNumofSVs * pSizeofPart[i])); checkCudaErrors(hipMalloc((void**)&pfDevSVsYiAlpha, sizeof(float_point) * nNumofSVs)); checkCudaErrors(hipMalloc((void**)&pnDevSVsLabel, sizeof(int) * nNumofSVs)); checkCudaErrors(hipMemset(pfDevSVYiAlphaHessian, 0, sizeof(float_point) * nNumofSVs * pSizeofPart[i])); checkCudaErrors(hipMemset(pfDevSVsYiAlpha, 0, sizeof(float_point) * nNumofSVs)); checkCudaErrors(hipMemset(pnDevSVsLabel, 0, sizeof(int) * nNumofSVs)); checkCudaErrors(hipMemcpy(pfDevSVYiAlphaHessian, pfSVsKernelValues + i * nAverageSize * nNumofSVs, sizeof(float_point) * nNumofSVs * pSizeofPart[i], hipMemcpyHostToDevice)); checkCudaErrors(hipMemcpy(pfDevSVsYiAlpha, pfSVsYiAlpha, sizeof(float_point) * nNumofSVs, hipMemcpyHostToDevice)); checkCudaErrors(hipMemcpy(pnDevSVsLabel, pnSVsLabel, sizeof(int) * nNumofSVs, hipMemcpyHostToDevice)); //compute y_i*alpha_i*K(i, z) int nVecMatxMulGridDimY = pSizeofPart[i]; int nVecMatxMulGridDimX = Ceil(nNumofSVs, BLOCK_SIZE); dim3 vecMatxMulGridDim(nVecMatxMulGridDimX, nVecMatxMulGridDimY); hipLaunchKernelGGL(( VectorMatrixMul), dim3(vecMatxMulGridDim), dim3(BLOCK_SIZE), 0, 0, pfDevSVsYiAlpha, pfDevSVYiAlphaHessian, pSizeofPart[i], nNumofSVs); //perform classification ComputeClassLabel(pSizeofPart[i], pfDevSVYiAlphaHessian, nNumofSVs, fBias, pfClassificaitonResult + i * nAverageSize); if(pfClassificaitonResult == NULL) { cerr << "error in computeSVYiAlphaHessianSum" << endl; exit(-1); } //free memory checkCudaErrors(hipFree(pfDevSVYiAlphaHessian)); pfDevSVYiAlphaHessian = NULL; checkCudaErrors(hipFree(pfDevSVsYiAlpha)); checkCudaErrors(hipFree(pnDevSVsLabel)); } return pfClassificaitonResult; } /* * @brief: predict class labels using precomputed kernel valules */ float_point* CSVMPredictor::Predict(svm_model *pModel, int *pnTestSampleId, const int &nNumofTestSamples) { float_point *pfReturn = NULL; if(pModel == NULL) { cerr << "error in Predict function: invalid input params" << endl; return pfReturn; } //get infomation from SVM model int nNumofSVs = GetNumSV(pModel); int *pnSVSampleId = pModel->pnIndexofSV; //store sub Hessian Matrix float_point *pfSVsKernelValues = AllocateKVMem(nNumofSVs, nNumofTestSamples); //get Hessian rows of support vectors ReadFromHessian(pfSVsKernelValues, pnSVSampleId, nNumofSVs, pnTestSampleId, nNumofTestSamples); pfReturn = PredictLabel(pModel, nNumofTestSamples, pfSVsKernelValues); delete[] pfSVsKernelValues; return pfReturn; } double k_function(const svm_node *x, const svm_node *y, const SVMParam &param) { switch(param.kernel_type) { case RBF: { double sum = 0; while(x->index != -1 && y->index !=-1) { if(x->index == y->index) { double d = x->value - y->value; sum += d*d; ++x; ++y; } else { if(x->index > y->index) { sum += y->value * y->value; ++y; } else { sum += x->value * x->value; ++x; } } } while(x->index != -1) { sum += x->value * x->value; ++x; } while(y->index != -1) { sum += y->value * y->value; ++y; } return exp(-param.gamma*sum); } case PRECOMPUTED: //x: test (validation), y: SV return x[(int)(y->value)].value; default: return 0; // Unreachable } } /** * @brief: compute kernel values on-the-fly */ void CSVMPredictor::ComputeOnTheFly(float_point *pfSVsKernelValues, svm_model *model, svm_node **pInstance, int numInstance) { int nr_class = model->nr_class; int l = model->l; //store kernel values in a matrix with the form that row is testing samples, column is SVs. for(int j = 0; j < numInstance; j++) { for(int i=0;i<l;i++) { pfSVsKernelValues[j*l+i] = k_function(pInstance[j], model->SV[i], model->param); } } } /** * @brief: predict labels with computing kernel values on-the-fly */ float_point* CSVMPredictor::Predict(svm_model *pModel, svm_node **pInstance, int numInstance) { float_point *pfReturn = NULL; if(pModel == NULL) { cerr << "error in Predict function: invalid input params" << endl; return pfReturn; } //get infomation from SVM model int nNumofSVs = GetNumSV(pModel); //store sub Hessian Matrix float_point *pfSVsKernelValues = AllocateKVMem(nNumofSVs, numInstance); //get Hessian rows of support vectors ComputeOnTheFly(pfSVsKernelValues, pModel, pInstance, numInstance); pfReturn = PredictLabel(pModel, numInstance, pfSVsKernelValues); delete[] pfSVsKernelValues; return pfReturn; } /* * @brief: compute/predict the labels of testing samples * @output: a set of class labels, associated to testing samples */ float_point* CSVMPredictor::ComputeClassLabel(int nNumofTestSamples, float_point *pfDevSVYiAlphaHessian, const int &nNumofSVs, float_point fBias, float_point *pfFinalResult) { float_point *pfReturn = NULL; if(nNumofTestSamples <= 0 || pfDevSVYiAlphaHessian == NULL || nNumofSVs <= 0) { cerr << "error in computeSVYiAlphaHessianSum: invalid input params" << endl; return pfReturn; } //compute the size of current processing testing samples long long lMega = 1024 * 1024; long long cacheSizeInByte = (CACHE_SIZE * lMega * 4); long long nMaxSizeofProcessingSample = (cacheSizeInByte / (sizeof(float_point) * nNumofSVs)); //reduce by half nMaxSizeofProcessingSample = nMaxSizeofProcessingSample / 2; //if the number of samples in small if(nMaxSizeofProcessingSample > nNumofTestSamples) { nMaxSizeofProcessingSample = nNumofTestSamples; } //compute grid size, and block size for partial sum int nPartialGridDimX = Ceil(nNumofSVs, BLOCK_SIZE); int nPartialGridDimY = nMaxSizeofProcessingSample; dim3 dimPartialSumGrid(nPartialGridDimX, nPartialGridDimY); dim3 dimPartialSumBlock(BLOCK_SIZE); //compute grid size, and block size for global sum and class label computing int nGlobalGridDimX = 1; int nGlobalGridDimY = nMaxSizeofProcessingSample; dim3 dimGlobalSumGrid(nGlobalGridDimX, nGlobalGridDimY); //can use 1D grid dim3 dimGlobalSumBlock(nPartialGridDimX); //memory for computing partial sum by GPU float_point* pfDevPartialSum; // cout << "dimx=" << nPartialGridDimX << "; dimy=" << nPartialGridDimY << endl; checkCudaErrors(hipMalloc((void**)&pfDevPartialSum, sizeof(float_point) * nPartialGridDimX * nPartialGridDimY)); checkCudaErrors(hipMemset(pfDevPartialSum, 0, sizeof(float_point) * nPartialGridDimX * nPartialGridDimY)); //memory for computing global sum by GPU float_point *pfDevClassificationResult; checkCudaErrors(hipMalloc((void**)&pfDevClassificationResult, sizeof(float_point) * nGlobalGridDimY)); checkCudaErrors(hipMemset(pfDevClassificationResult, 0, sizeof(float_point) * nGlobalGridDimY)); //reduce step size of partial sum, and global sum int nPartialReduceStepSize = 0; nPartialReduceStepSize = (int)pow(2, (ceil(log2((float)BLOCK_SIZE))-1)); int nGlobalReduceStepSize = 0; nGlobalReduceStepSize = (int) pow(2, ceil(log2((float) nPartialGridDimX)) - 1); for(int nStartPosofTestSample = 0; nStartPosofTestSample < nNumofTestSamples; nStartPosofTestSample += nMaxSizeofProcessingSample) { if(nStartPosofTestSample + nMaxSizeofProcessingSample > nNumofTestSamples) { //the last part of the testing samples nMaxSizeofProcessingSample = nNumofTestSamples - nStartPosofTestSample; nPartialGridDimY = nMaxSizeofProcessingSample; dimPartialSumGrid = dim3(nPartialGridDimX, nPartialGridDimY); nGlobalGridDimY = nMaxSizeofProcessingSample; dimGlobalSumGrid = dim3(nGlobalGridDimX, nGlobalGridDimY); checkCudaErrors(hipFree(pfDevPartialSum)); checkCudaErrors(hipMalloc((void**)&pfDevPartialSum, sizeof(float_point) * nPartialGridDimX * nPartialGridDimY)); checkCudaErrors(hipMemset(pfDevPartialSum, 0, sizeof(float_point) * nPartialGridDimX * nPartialGridDimY)); checkCudaErrors(hipFree(pfDevClassificationResult)); checkCudaErrors(hipMalloc((void**)&pfDevClassificationResult, sizeof(float_point) * nGlobalGridDimY)); checkCudaErrors(hipMemset(pfDevClassificationResult, 0, sizeof(float_point) * nGlobalGridDimY)); } /********* compute partial sum **********/ hipLaunchKernelGGL(( ComputeKernelPartialSum), dim3(dimPartialSumGrid), dim3(dimPartialSumBlock), BLOCK_SIZE * sizeof(float_point), 0, pfDevSVYiAlphaHessian, nNumofSVs, pfDevPartialSum, nPartialReduceStepSize); hipDeviceSynchronize(); hipError_t error = hipGetLastError(); if(error != hipSuccess) { cerr << "cuda error in computeSVYiAlphaHessianSum: failed at ComputePartialSum: " << hipGetErrorString(error) << endl; exit(-1); } /********** compute global sum and class label *********/ //compute global sum hipLaunchKernelGGL(( ComputeKernelGlobalSum), dim3(dimGlobalSumGrid), dim3(dimGlobalSumBlock), nPartialGridDimX * sizeof(float_point), 0, pfDevClassificationResult, fBias, pfDevPartialSum, nGlobalReduceStepSize); hipDeviceSynchronize(); error = hipGetLastError(); if(error != hipSuccess) { cerr << "cuda error in computeSVYiAlphaHessianSum: failed at ComputeGlobalSum: " << hipGetErrorString(error) << endl; exit(-1); } //copy classification result back checkCudaErrors(hipMemcpy(pfFinalResult + nStartPosofTestSample, pfDevClassificationResult, nMaxSizeofProcessingSample * sizeof(float_point), hipMemcpyDeviceToHost)); } checkCudaErrors(hipFree(pfDevPartialSum)); checkCudaErrors(hipFree(pfDevClassificationResult)); pfReturn = pfFinalResult; return pfReturn; } /* * @brief: set data involved in prediction */ bool CSVMPredictor::SetInvolvePredictionData(int nStart1, int nEnd1) { bool bReturn = false; m_nTestStart = nStart1; bReturn = m_pHessianReader->SetInvolveData(-1, -1, 0, m_pHessianReader->m_nTotalNumofInstance - 1); return bReturn; }
59d39100ce747de988b95c669af00d428592494a.cu
/* * @brief: this file contains the definition of svm predictor class * Created on: May 24, 2012 * Author: Zeyi Wen * Copyright @DBGroup University of Melbourne */ #include "../svm-shared/gpu_global_utility.h" #include "svmPredictor.h" #include "../svm-shared/storageManager.h" #include <cuda.h> #include <helper_cuda.h> #include <cuda_runtime_api.h> /* * @brief: read kernel values based on support vectors */ void CSVMPredictor::ReadKVbasedOnSV(float_point *pfSVsKernelValues, int *pnSVSampleId, int nNumofSVs, int nNumofTestSamples) { FILE *pFile = fopen(HESSIAN_FILE, "rb"); float_point *pfSVHessianSubRow = new float_point[nNumofTestSamples]; float_point *pfHessianFullRow = new float_point[m_pHessianReader->m_nTotalNumofInstance]; memset(pfSVHessianSubRow, 0, sizeof(float_point) * nNumofTestSamples); for(int i = 0; i < nNumofSVs; i++) { //read part of the Hessian Row if(m_pHessianReader->m_nNumofCachedHessianRow > pnSVSampleId[i]) { //if the hessian row is in host memory long long nIndexofFirstElement; //only one if-statement holds, as testing samples are continuously allocated in RAM if(m_pHessianReader->m_nRowStartPos1 != -1) { nIndexofFirstElement = (long long)pnSVSampleId[i] * m_pHessianReader->m_nTotalNumofInstance + m_pHessianReader->m_nRowStartPos1; } if(m_pHessianReader->m_nRowStartPos2 != -1) { nIndexofFirstElement = (long long)pnSVSampleId[i] * m_pHessianReader->m_nTotalNumofInstance + m_pHessianReader->m_nRowStartPos2; } //copy the memory memcpy(pfSVHessianSubRow, m_pHessianReader->m_pfHessianRowsInHostMem + nIndexofFirstElement, nNumofTestSamples * sizeof(float_point)); } else//the hessian row is in SSD { int nStartPos; if(m_pHessianReader->m_nRowStartPos1 != -1) { nStartPos = m_pHessianReader->m_nRowStartPos1; } else if(m_pHessianReader->m_nRowStartPos2 != -1) { nStartPos = m_pHessianReader->m_nRowStartPos2; } else { assert(0); } m_pHessianReader->ReadRow(pnSVSampleId[i], pfSVHessianSubRow); //int nIndexInSSD = pnSVSampleId[i] - m_pHessianOps->m_nNumofCachedHessianRow; //m_pHessianOps->ReadHessianFullRow(pFile, nIndexInSSD, 1, pfHessianFullRow); //memcpy(pfSVHessianSubRow, pfHessianFullRow + nStartPos, nNumofTestSamples * sizeof(float_point)); } for(int j = 0; j < nNumofTestSamples; j++) { //store kernel values in a matrix with the form that row is testing samples, column is SVs. pfSVsKernelValues[j * (long long)nNumofSVs + i] = pfSVHessianSubRow[j]; } } fclose(pFile); delete[] pfSVHessianSubRow; delete[] pfHessianFullRow; } /* * @brief: read kernel values based on testing examples */ void CSVMPredictor::ReadKVbasedOnTest(float_point *pfSVsKernelValues, int *pnSVSampleId, int nNumofSVs, int nNumofTestSamples) { FILE *pFile = fopen(HESSIAN_FILE, "rb"); float_point *pfSVHessianSubRow = new float_point[nNumofSVs]; memset(pfSVHessianSubRow, 0, sizeof(float_point) * nNumofSVs); float_point *pfHessianRow = new float_point[m_pHessianReader->m_nTotalNumofInstance]; int nTestStartId = m_nTestStart; assert(nTestStartId >= 0); int nTestEndId = nTestStartId + nNumofTestSamples - 1;//include the last sample for(int i = nTestStartId; i <= nTestEndId; i++) { //read part of the Hessian Row //if the hessian row is in host memory if(m_pHessianReader->m_nNumofCachedHessianRow > i) { for(int j = 0; j < nNumofSVs; j++) { pfSVHessianSubRow[j] = m_pHessianReader->m_pfHessianRowsInHostMem[i * (long long)m_pHessianReader->m_nTotalNumofInstance + pnSVSampleId[j]]; } } else//the hessian row is in SSD { m_pHessianReader->ReadRow(i, pfHessianRow); for(int j = 0; j < nNumofSVs; j++) { pfSVHessianSubRow[j] = pfHessianRow[pnSVSampleId[j]]; } } for(int j = 0; j < nNumofSVs; j++) { //store kernel values in a matrix with the form that row is testing samples, column is SVs. pfSVsKernelValues[(i - nTestStartId) * (long long)nNumofSVs + j] = pfSVHessianSubRow[j]; } } if(pFile != NULL) fclose(pFile); delete[] pfSVHessianSubRow; delete[] pfHessianRow; } /** * @brief: read kernel values from precomputed results */ void CSVMPredictor::ReadFromHessian(float_point *pfSVsKernelValues, int *pnSVSampleId, int nNumofSVs, int *pnTestSampleId, int nNumofTestSamples) { //get Hessian rows of support vectors m_pHessianReader->AllocateBuffer(1); if(nNumofSVs >= nNumofTestSamples) { m_pHessianReader->SetInvolveData(-1, -1, 0, m_pHessianReader->m_nTotalNumofInstance - 1); ReadKVbasedOnTest(pfSVsKernelValues, pnSVSampleId, nNumofSVs, nNumofTestSamples); } else { m_pHessianReader->SetInvolveData(-1, -1, pnTestSampleId[0], pnTestSampleId[nNumofTestSamples - 1]); ReadKVbasedOnSV(pfSVsKernelValues, pnSVSampleId, nNumofSVs, nNumofTestSamples); } m_pHessianReader->ReleaseBuffer(); } /** * @brief: allocate memory for kernel values involved in the prediction */ float_point *CSVMPredictor::AllocateKVMem(int nNumofSVs, const int &nNumofTestSamples) { //store sub Hessian Matrix float_point *pfSVsKernelValues = new float_point[nNumofTestSamples * nNumofSVs]; memset(pfSVsKernelValues, 0, sizeof(float_point) * nNumofTestSamples * nNumofSVs); return pfSVsKernelValues; } /** * @return the number of support vectors in the model */ int CSVMPredictor::GetNumSV(svm_model *pModel) { return (pModel->nSV[0] + pModel->nSV[1]); } /** * @brief: predict the label helper function */ float_point* CSVMPredictor::PredictLabel(svm_model *pModel, int nNumofTestSamples, float_point *pfSVsKernelValues) { //get infomation from SVM model int nNumofSVs = GetNumSV(pModel); float_point fBias = *(pModel->rho); float_point **pyfSVsYiAlpha = pModel->sv_coef; float_point *pfSVsYiAlpha = pyfSVsYiAlpha[0]; int *pnSVsLabel = pModel->label; float_point *pfYiAlphaofSVs; /*compute y_i*alpha_i*K(i, z) by GPU, where i is id of support vector. * pfDevSVYiAlphaHessian stores in the order of T1 sv1 sv2 ... T2 sv1 sv2 ... T3 sv1 sv2 ... */ float_point *pfDevSVYiAlphaHessian; float_point *pfDevSVsYiAlpha; int *pnDevSVsLabel; //if the memory is not enough for the storage when classifying all testing samples at once, divide it into multiple parts StorageManager *manager = StorageManager::getManager(); int nMaxNumofFloatPoint = manager->GetFreeGPUMem(); int nNumofPart = Ceil(nNumofSVs * nNumofTestSamples, nMaxNumofFloatPoint); // cout << "cache size is: " << nMaxNumofFloatPoint << " v.s.. " << nNumofSVs * nNumofTestSamples << endl; // cout << "perform classification in " << nNumofPart << " time(s)" << endl; //allocate memory for storing classification result float_point *pfClassificaitonResult = new float_point[nNumofTestSamples]; //initialise the size of each part int *pSizeofPart = new int[nNumofPart]; int nAverageSize = nNumofTestSamples / nNumofPart; for(int i = 0; i < nNumofPart; i++) { if(i != nNumofPart - 1) { pSizeofPart[i] = nAverageSize; } else { pSizeofPart[i] = nNumofTestSamples - nAverageSize * i; } } //perform classification for each part for(int i = 0; i < nNumofPart; i++) { checkCudaErrors(cudaMalloc((void**)&pfDevSVYiAlphaHessian, sizeof(float_point) * nNumofSVs * pSizeofPart[i])); checkCudaErrors(cudaMalloc((void**)&pfDevSVsYiAlpha, sizeof(float_point) * nNumofSVs)); checkCudaErrors(cudaMalloc((void**)&pnDevSVsLabel, sizeof(int) * nNumofSVs)); checkCudaErrors(cudaMemset(pfDevSVYiAlphaHessian, 0, sizeof(float_point) * nNumofSVs * pSizeofPart[i])); checkCudaErrors(cudaMemset(pfDevSVsYiAlpha, 0, sizeof(float_point) * nNumofSVs)); checkCudaErrors(cudaMemset(pnDevSVsLabel, 0, sizeof(int) * nNumofSVs)); checkCudaErrors(cudaMemcpy(pfDevSVYiAlphaHessian, pfSVsKernelValues + i * nAverageSize * nNumofSVs, sizeof(float_point) * nNumofSVs * pSizeofPart[i], cudaMemcpyHostToDevice)); checkCudaErrors(cudaMemcpy(pfDevSVsYiAlpha, pfSVsYiAlpha, sizeof(float_point) * nNumofSVs, cudaMemcpyHostToDevice)); checkCudaErrors(cudaMemcpy(pnDevSVsLabel, pnSVsLabel, sizeof(int) * nNumofSVs, cudaMemcpyHostToDevice)); //compute y_i*alpha_i*K(i, z) int nVecMatxMulGridDimY = pSizeofPart[i]; int nVecMatxMulGridDimX = Ceil(nNumofSVs, BLOCK_SIZE); dim3 vecMatxMulGridDim(nVecMatxMulGridDimX, nVecMatxMulGridDimY); VectorMatrixMul<<<vecMatxMulGridDim, BLOCK_SIZE>>>(pfDevSVsYiAlpha, pfDevSVYiAlphaHessian, pSizeofPart[i], nNumofSVs); //perform classification ComputeClassLabel(pSizeofPart[i], pfDevSVYiAlphaHessian, nNumofSVs, fBias, pfClassificaitonResult + i * nAverageSize); if(pfClassificaitonResult == NULL) { cerr << "error in computeSVYiAlphaHessianSum" << endl; exit(-1); } //free memory checkCudaErrors(cudaFree(pfDevSVYiAlphaHessian)); pfDevSVYiAlphaHessian = NULL; checkCudaErrors(cudaFree(pfDevSVsYiAlpha)); checkCudaErrors(cudaFree(pnDevSVsLabel)); } return pfClassificaitonResult; } /* * @brief: predict class labels using precomputed kernel valules */ float_point* CSVMPredictor::Predict(svm_model *pModel, int *pnTestSampleId, const int &nNumofTestSamples) { float_point *pfReturn = NULL; if(pModel == NULL) { cerr << "error in Predict function: invalid input params" << endl; return pfReturn; } //get infomation from SVM model int nNumofSVs = GetNumSV(pModel); int *pnSVSampleId = pModel->pnIndexofSV; //store sub Hessian Matrix float_point *pfSVsKernelValues = AllocateKVMem(nNumofSVs, nNumofTestSamples); //get Hessian rows of support vectors ReadFromHessian(pfSVsKernelValues, pnSVSampleId, nNumofSVs, pnTestSampleId, nNumofTestSamples); pfReturn = PredictLabel(pModel, nNumofTestSamples, pfSVsKernelValues); delete[] pfSVsKernelValues; return pfReturn; } double k_function(const svm_node *x, const svm_node *y, const SVMParam &param) { switch(param.kernel_type) { case RBF: { double sum = 0; while(x->index != -1 && y->index !=-1) { if(x->index == y->index) { double d = x->value - y->value; sum += d*d; ++x; ++y; } else { if(x->index > y->index) { sum += y->value * y->value; ++y; } else { sum += x->value * x->value; ++x; } } } while(x->index != -1) { sum += x->value * x->value; ++x; } while(y->index != -1) { sum += y->value * y->value; ++y; } return exp(-param.gamma*sum); } case PRECOMPUTED: //x: test (validation), y: SV return x[(int)(y->value)].value; default: return 0; // Unreachable } } /** * @brief: compute kernel values on-the-fly */ void CSVMPredictor::ComputeOnTheFly(float_point *pfSVsKernelValues, svm_model *model, svm_node **pInstance, int numInstance) { int nr_class = model->nr_class; int l = model->l; //store kernel values in a matrix with the form that row is testing samples, column is SVs. for(int j = 0; j < numInstance; j++) { for(int i=0;i<l;i++) { pfSVsKernelValues[j*l+i] = k_function(pInstance[j], model->SV[i], model->param); } } } /** * @brief: predict labels with computing kernel values on-the-fly */ float_point* CSVMPredictor::Predict(svm_model *pModel, svm_node **pInstance, int numInstance) { float_point *pfReturn = NULL; if(pModel == NULL) { cerr << "error in Predict function: invalid input params" << endl; return pfReturn; } //get infomation from SVM model int nNumofSVs = GetNumSV(pModel); //store sub Hessian Matrix float_point *pfSVsKernelValues = AllocateKVMem(nNumofSVs, numInstance); //get Hessian rows of support vectors ComputeOnTheFly(pfSVsKernelValues, pModel, pInstance, numInstance); pfReturn = PredictLabel(pModel, numInstance, pfSVsKernelValues); delete[] pfSVsKernelValues; return pfReturn; } /* * @brief: compute/predict the labels of testing samples * @output: a set of class labels, associated to testing samples */ float_point* CSVMPredictor::ComputeClassLabel(int nNumofTestSamples, float_point *pfDevSVYiAlphaHessian, const int &nNumofSVs, float_point fBias, float_point *pfFinalResult) { float_point *pfReturn = NULL; if(nNumofTestSamples <= 0 || pfDevSVYiAlphaHessian == NULL || nNumofSVs <= 0) { cerr << "error in computeSVYiAlphaHessianSum: invalid input params" << endl; return pfReturn; } //compute the size of current processing testing samples long long lMega = 1024 * 1024; long long cacheSizeInByte = (CACHE_SIZE * lMega * 4); long long nMaxSizeofProcessingSample = (cacheSizeInByte / (sizeof(float_point) * nNumofSVs)); //reduce by half nMaxSizeofProcessingSample = nMaxSizeofProcessingSample / 2; //if the number of samples in small if(nMaxSizeofProcessingSample > nNumofTestSamples) { nMaxSizeofProcessingSample = nNumofTestSamples; } //compute grid size, and block size for partial sum int nPartialGridDimX = Ceil(nNumofSVs, BLOCK_SIZE); int nPartialGridDimY = nMaxSizeofProcessingSample; dim3 dimPartialSumGrid(nPartialGridDimX, nPartialGridDimY); dim3 dimPartialSumBlock(BLOCK_SIZE); //compute grid size, and block size for global sum and class label computing int nGlobalGridDimX = 1; int nGlobalGridDimY = nMaxSizeofProcessingSample; dim3 dimGlobalSumGrid(nGlobalGridDimX, nGlobalGridDimY); //can use 1D grid dim3 dimGlobalSumBlock(nPartialGridDimX); //memory for computing partial sum by GPU float_point* pfDevPartialSum; // cout << "dimx=" << nPartialGridDimX << "; dimy=" << nPartialGridDimY << endl; checkCudaErrors(cudaMalloc((void**)&pfDevPartialSum, sizeof(float_point) * nPartialGridDimX * nPartialGridDimY)); checkCudaErrors(cudaMemset(pfDevPartialSum, 0, sizeof(float_point) * nPartialGridDimX * nPartialGridDimY)); //memory for computing global sum by GPU float_point *pfDevClassificationResult; checkCudaErrors(cudaMalloc((void**)&pfDevClassificationResult, sizeof(float_point) * nGlobalGridDimY)); checkCudaErrors(cudaMemset(pfDevClassificationResult, 0, sizeof(float_point) * nGlobalGridDimY)); //reduce step size of partial sum, and global sum int nPartialReduceStepSize = 0; nPartialReduceStepSize = (int)pow(2, (ceil(log2((float)BLOCK_SIZE))-1)); int nGlobalReduceStepSize = 0; nGlobalReduceStepSize = (int) pow(2, ceil(log2((float) nPartialGridDimX)) - 1); for(int nStartPosofTestSample = 0; nStartPosofTestSample < nNumofTestSamples; nStartPosofTestSample += nMaxSizeofProcessingSample) { if(nStartPosofTestSample + nMaxSizeofProcessingSample > nNumofTestSamples) { //the last part of the testing samples nMaxSizeofProcessingSample = nNumofTestSamples - nStartPosofTestSample; nPartialGridDimY = nMaxSizeofProcessingSample; dimPartialSumGrid = dim3(nPartialGridDimX, nPartialGridDimY); nGlobalGridDimY = nMaxSizeofProcessingSample; dimGlobalSumGrid = dim3(nGlobalGridDimX, nGlobalGridDimY); checkCudaErrors(cudaFree(pfDevPartialSum)); checkCudaErrors(cudaMalloc((void**)&pfDevPartialSum, sizeof(float_point) * nPartialGridDimX * nPartialGridDimY)); checkCudaErrors(cudaMemset(pfDevPartialSum, 0, sizeof(float_point) * nPartialGridDimX * nPartialGridDimY)); checkCudaErrors(cudaFree(pfDevClassificationResult)); checkCudaErrors(cudaMalloc((void**)&pfDevClassificationResult, sizeof(float_point) * nGlobalGridDimY)); checkCudaErrors(cudaMemset(pfDevClassificationResult, 0, sizeof(float_point) * nGlobalGridDimY)); } /********* compute partial sum **********/ ComputeKernelPartialSum<<<dimPartialSumGrid, dimPartialSumBlock, BLOCK_SIZE * sizeof(float_point)>>> (pfDevSVYiAlphaHessian, nNumofSVs, pfDevPartialSum, nPartialReduceStepSize); cudaDeviceSynchronize(); cudaError_t error = cudaGetLastError(); if(error != cudaSuccess) { cerr << "cuda error in computeSVYiAlphaHessianSum: failed at ComputePartialSum: " << cudaGetErrorString(error) << endl; exit(-1); } /********** compute global sum and class label *********/ //compute global sum ComputeKernelGlobalSum<<<dimGlobalSumGrid, dimGlobalSumBlock, nPartialGridDimX * sizeof(float_point)>>> (pfDevClassificationResult, fBias, pfDevPartialSum, nGlobalReduceStepSize); cudaDeviceSynchronize(); error = cudaGetLastError(); if(error != cudaSuccess) { cerr << "cuda error in computeSVYiAlphaHessianSum: failed at ComputeGlobalSum: " << cudaGetErrorString(error) << endl; exit(-1); } //copy classification result back checkCudaErrors(cudaMemcpy(pfFinalResult + nStartPosofTestSample, pfDevClassificationResult, nMaxSizeofProcessingSample * sizeof(float_point), cudaMemcpyDeviceToHost)); } checkCudaErrors(cudaFree(pfDevPartialSum)); checkCudaErrors(cudaFree(pfDevClassificationResult)); pfReturn = pfFinalResult; return pfReturn; } /* * @brief: set data involved in prediction */ bool CSVMPredictor::SetInvolvePredictionData(int nStart1, int nEnd1) { bool bReturn = false; m_nTestStart = nStart1; bReturn = m_pHessianReader->SetInvolveData(-1, -1, 0, m_pHessianReader->m_nTotalNumofInstance - 1); return bReturn; }
4a9c7aa28e7a7d4196b68c55e4702a7840b0c7a2.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "../SoftBodyGrid3D.h" namespace ar3d { __global__ void GridComptueInitialVelocityKernel(dim3 size, const Vector3X referencePositions, Vector3X velocities, const real3 linearVelocity, const real3 angularVelocity, const real3 centerOfMass) { CUMAT_KERNEL_1D_LOOP(i, size) real3 pos = referencePositions.getRawCoeff(i); real3 vel = SoftBodySimulation3D::computeInitialVelocity(pos, centerOfMass, linearVelocity, angularVelocity); velocities.setRawCoeff(i, vel); CUMAT_KERNEL_1D_LOOP_END } void SoftBodyGrid3D::computeInitialVelocity(const Input & input, const Settings & settings, Vector3X & velocities) { cuMat::Context& ctx = cuMat::Context::current(); cuMat::KernelLaunchConfig cfg = ctx.createLaunchConfig1D(input.numActiveNodes_, GridComptueInitialVelocityKernel); hipLaunchKernelGGL(( GridComptueInitialVelocityKernel) , dim3(cfg.block_count), dim3(cfg.thread_per_block), 0, ctx.stream() , cfg.virtual_size, input.referencePositions_, velocities, settings.initialLinearVelocity_, settings.initialAngularVelocity_, input.centerOfMass_); CUMAT_CHECK_ERROR(); } }
4a9c7aa28e7a7d4196b68c55e4702a7840b0c7a2.cu
#include "../SoftBodyGrid3D.h" namespace ar3d { __global__ void GridComptueInitialVelocityKernel(dim3 size, const Vector3X referencePositions, Vector3X velocities, const real3 linearVelocity, const real3 angularVelocity, const real3 centerOfMass) { CUMAT_KERNEL_1D_LOOP(i, size) real3 pos = referencePositions.getRawCoeff(i); real3 vel = SoftBodySimulation3D::computeInitialVelocity(pos, centerOfMass, linearVelocity, angularVelocity); velocities.setRawCoeff(i, vel); CUMAT_KERNEL_1D_LOOP_END } void SoftBodyGrid3D::computeInitialVelocity(const Input & input, const Settings & settings, Vector3X & velocities) { cuMat::Context& ctx = cuMat::Context::current(); cuMat::KernelLaunchConfig cfg = ctx.createLaunchConfig1D(input.numActiveNodes_, GridComptueInitialVelocityKernel); GridComptueInitialVelocityKernel <<<cfg.block_count, cfg.thread_per_block, 0, ctx.stream() >>> (cfg.virtual_size, input.referencePositions_, velocities, settings.initialLinearVelocity_, settings.initialAngularVelocity_, input.centerOfMass_); CUMAT_CHECK_ERROR(); } }
fbb64cd7e53447f1945d688415a3fa74817c774b.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" __global__ void gpuFunc_copiarLayer(float *layer, float *layer_copy) { /*Formula para calcular la posicion*/ int hilosporbloque = blockDim.x * blockDim.y; int numhiloenbloque = threadIdx.x + blockDim.x * threadIdx.y; int numbloqueengrid = blockIdx.x + gridDim.x * blockIdx.y; int globalId = numbloqueengrid * hilosporbloque + numhiloenbloque; /*int bloqueId = blockIdx.x + blockIdx.y * gridDim.x; int globalId = bloqueId * (blockDim.x * blockDim.y) + (threadIdx.y * blockDim.x) + threadIdx.x;*/ layer_copy[globalId]=layer[globalId]; } __global__ void gpuFunc_actualiza(float *layer, int posicion, float energia,int layer_size) { /*Formula para calcular la posicion*/ /*int bloqueId = blockIdx.x + blockIdx.y * gridDim.x; int globalId = bloqueId * (blockDim.x * blockDim.y) + (threadIdx.y * blockDim.x) + threadIdx.x;*/ int hilosporbloque = blockDim.x * blockDim.y; int numhiloenbloque = threadIdx.x + blockDim.x * threadIdx.y; int numbloqueengrid = blockIdx.x + gridDim.x * blockIdx.y; int globalId = numbloqueengrid * hilosporbloque + numhiloenbloque; /* Funcin actualiza */ int distancia = posicion - globalId; if ( distancia < 0 ) distancia = - distancia; /* 2. El punto de impacto tiene distancia 1 */ distancia = distancia + 1; /* 3. Raiz cuadrada de la distancia */ //float atenuacion = (float)distancia*distancia; //float atenuacion = (float)distancia / PI; float atenuacion = sqrtf( (float)distancia ); /* 4. Calcular energia atenuada */ float energia_k = energia / atenuacion; /* 5. No sumar si el valor absoluto es menor que umbral */ if(globalId > 0 && globalId < layer_size){ if ( energia_k >= 0.001f || energia_k <= -0.001f ) layer[globalId] = layer[globalId] + energia_k; } } __global__ void gpuFunc_extremos(float *layer, float *layer_copy, int layer_size) { /*Formula para calcular la posicion*/ /*int bloqueId = blockIdx.x + blockIdx.y * gridDim.x; int globalId = bloqueId * (blockDim.x * blockDim.y) + (threadIdx.y * blockDim.x) + threadIdx.x;*/ int hilosporbloque = blockDim.x * blockDim.y; int numhiloenbloque = threadIdx.x + blockDim.x * threadIdx.y; int numbloqueengrid = blockIdx.x + gridDim.x * blockIdx.y; int globalId = numbloqueengrid * hilosporbloque + numhiloenbloque; if(globalId > 0 && globalId < layer_size-1){ layer[globalId] = ( layer_copy[globalId-1] + layer_copy[globalId] + layer_copy[globalId+1] ) / 3; } } __global__ void gpuFunc_maximos(float *layer, int *posiciones, float *maximos, int layer_size, int i) { /*Formula para calcular la posicion*/ int bloqueId = blockIdx.x + blockIdx.y * gridDim.x; int globalId = bloqueId * (blockDim.x * blockDim.y) + (threadIdx.y * blockDim.x) + threadIdx.x; if(globalId > 0 && globalId < layer_size){ if ( layer[globalId] > layer[globalId-1] && layer[globalId] > layer[globalId+1] ) { if ( layer[globalId] > maximos[i] ) { maximos[i] = layer[globalId]; posiciones[i] = globalId; } } } }
fbb64cd7e53447f1945d688415a3fa74817c774b.cu
__global__ void gpuFunc_copiarLayer(float *layer, float *layer_copy) { /*Formula para calcular la posicion*/ int hilosporbloque = blockDim.x * blockDim.y; int numhiloenbloque = threadIdx.x + blockDim.x * threadIdx.y; int numbloqueengrid = blockIdx.x + gridDim.x * blockIdx.y; int globalId = numbloqueengrid * hilosporbloque + numhiloenbloque; /*int bloqueId = blockIdx.x + blockIdx.y * gridDim.x; int globalId = bloqueId * (blockDim.x * blockDim.y) + (threadIdx.y * blockDim.x) + threadIdx.x;*/ layer_copy[globalId]=layer[globalId]; } __global__ void gpuFunc_actualiza(float *layer, int posicion, float energia,int layer_size) { /*Formula para calcular la posicion*/ /*int bloqueId = blockIdx.x + blockIdx.y * gridDim.x; int globalId = bloqueId * (blockDim.x * blockDim.y) + (threadIdx.y * blockDim.x) + threadIdx.x;*/ int hilosporbloque = blockDim.x * blockDim.y; int numhiloenbloque = threadIdx.x + blockDim.x * threadIdx.y; int numbloqueengrid = blockIdx.x + gridDim.x * blockIdx.y; int globalId = numbloqueengrid * hilosporbloque + numhiloenbloque; /* Función actualiza */ int distancia = posicion - globalId; if ( distancia < 0 ) distancia = - distancia; /* 2. El punto de impacto tiene distancia 1 */ distancia = distancia + 1; /* 3. Raiz cuadrada de la distancia */ //float atenuacion = (float)distancia*distancia; //float atenuacion = (float)distancia / PI; float atenuacion = sqrtf( (float)distancia ); /* 4. Calcular energia atenuada */ float energia_k = energia / atenuacion; /* 5. No sumar si el valor absoluto es menor que umbral */ if(globalId > 0 && globalId < layer_size){ if ( energia_k >= 0.001f || energia_k <= -0.001f ) layer[globalId] = layer[globalId] + energia_k; } } __global__ void gpuFunc_extremos(float *layer, float *layer_copy, int layer_size) { /*Formula para calcular la posicion*/ /*int bloqueId = blockIdx.x + blockIdx.y * gridDim.x; int globalId = bloqueId * (blockDim.x * blockDim.y) + (threadIdx.y * blockDim.x) + threadIdx.x;*/ int hilosporbloque = blockDim.x * blockDim.y; int numhiloenbloque = threadIdx.x + blockDim.x * threadIdx.y; int numbloqueengrid = blockIdx.x + gridDim.x * blockIdx.y; int globalId = numbloqueengrid * hilosporbloque + numhiloenbloque; if(globalId > 0 && globalId < layer_size-1){ layer[globalId] = ( layer_copy[globalId-1] + layer_copy[globalId] + layer_copy[globalId+1] ) / 3; } } __global__ void gpuFunc_maximos(float *layer, int *posiciones, float *maximos, int layer_size, int i) { /*Formula para calcular la posicion*/ int bloqueId = blockIdx.x + blockIdx.y * gridDim.x; int globalId = bloqueId * (blockDim.x * blockDim.y) + (threadIdx.y * blockDim.x) + threadIdx.x; if(globalId > 0 && globalId < layer_size){ if ( layer[globalId] > layer[globalId-1] && layer[globalId] > layer[globalId+1] ) { if ( layer[globalId] > maximos[i] ) { maximos[i] = layer[globalId]; posiciones[i] = globalId; } } } }
3b19c9bfdbe74d98134c6b4deac412aecf5b427c.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include </usr/local/cuda/include/cuda_runtime.h> #include "../glclasses/glut.h" #include "../glclasses/pixmap.h" // *************************** CUDA kernel *********************************************** __global__ void cuda_kernel( int scaleX, int scaleY, int width, int height, int stride, GLubyte *devPixmap ) { // first load up the bounds, and make sure we are within it int x = threadIdx.x + blockIdx.x * blockDim.x; if (x >= width) return; int y = threadIdx.y + blockIdx.y * blockDim.y; if (y >= height) return; // then fill in the pixel values devPixmap[(stride * y + x)*4] = 0xFF; devPixmap[(stride * y + x)*4 +1] = (y - scaleY) * (x - scaleX); devPixmap[(stride * y + x)*4 +2] = 0; devPixmap[(stride * y + x)*4 +3] = 0xFF; } // *************************** MyGlut class *********************************************** class MyGlut : public Glut { public: MyGlut() : scaleX(0), scaleY(0) {} ~MyGlut() { hipFree( devPixmap_ ); } virtual void init( int *argc, char **argv, int width, int height ) { Glut::init( argc, argv, width, height ); pixmap_.init( width, height ); hipMalloc( (void**)&devPixmap_, pixmap_.bytes() ); } virtual void display_event( void ) { glDrawPixels( pixmap_.width(), pixmap_.height(), GL_RGBA, GL_UNSIGNED_BYTE, pixmap_.pixels() ); } virtual void mouse_event( int button, int state, int x, int y ) { if (button == GLUT_LEFT_BUTTON && state == GLUT_DOWN) { lastX = x; lastY = y; } } virtual void mouse_active_motion_event( int x, int y ) { int deltaX = x - lastX; int deltaY = y - lastY; scaleX += deltaX; scaleY += deltaY; do_pixels(); lastX = x; lastY = y; glutPostRedisplay(); } virtual void reshape_event( int width, int height ) { pixmap_.resize( width, height, CLEAR | COPY ); hipFree( (void*)devPixmap_ ); hipMalloc( (void**)&devPixmap_, pixmap_.bytes() ); do_pixels(); Glut::reshape_event( width, height ); } void do_pixels( void ) { dim3 grids((pixmap_.width()+15)/16,(pixmap_.height()+15)/16); dim3 threads(16,16); hipLaunchKernelGGL(( cuda_kernel), dim3(grids),dim3(threads), 0, 0, scaleX, scaleY, pixmap_.width(), pixmap_.height(), pixmap_.stride(), devPixmap_ ); hipMemcpy( pixmap_.pixels(), devPixmap_, pixmap_.bytes(), hipMemcpyDeviceToHost ); } private: Pixmap pixmap_; GLubyte *devPixmap_; int scaleX, scaleY; int lastX, lastY; // used for moving mouse }; int main( int argc, char **argv ) { MyGlut glut; glut.init( &argc, argv, 1024, 768 ); glut.do_pixels(); int exitCode = 0; try { glut.loop(); } catch (int e) { exitCode = e; } return exitCode; }
3b19c9bfdbe74d98134c6b4deac412aecf5b427c.cu
#include </usr/local/cuda/include/cuda_runtime.h> #include "../glclasses/glut.h" #include "../glclasses/pixmap.h" // *************************** CUDA kernel *********************************************** __global__ void cuda_kernel( int scaleX, int scaleY, int width, int height, int stride, GLubyte *devPixmap ) { // first load up the bounds, and make sure we are within it int x = threadIdx.x + blockIdx.x * blockDim.x; if (x >= width) return; int y = threadIdx.y + blockIdx.y * blockDim.y; if (y >= height) return; // then fill in the pixel values devPixmap[(stride * y + x)*4] = 0xFF; devPixmap[(stride * y + x)*4 +1] = (y - scaleY) * (x - scaleX); devPixmap[(stride * y + x)*4 +2] = 0; devPixmap[(stride * y + x)*4 +3] = 0xFF; } // *************************** MyGlut class *********************************************** class MyGlut : public Glut { public: MyGlut() : scaleX(0), scaleY(0) {} ~MyGlut() { cudaFree( devPixmap_ ); } virtual void init( int *argc, char **argv, int width, int height ) { Glut::init( argc, argv, width, height ); pixmap_.init( width, height ); cudaMalloc( (void**)&devPixmap_, pixmap_.bytes() ); } virtual void display_event( void ) { glDrawPixels( pixmap_.width(), pixmap_.height(), GL_RGBA, GL_UNSIGNED_BYTE, pixmap_.pixels() ); } virtual void mouse_event( int button, int state, int x, int y ) { if (button == GLUT_LEFT_BUTTON && state == GLUT_DOWN) { lastX = x; lastY = y; } } virtual void mouse_active_motion_event( int x, int y ) { int deltaX = x - lastX; int deltaY = y - lastY; scaleX += deltaX; scaleY += deltaY; do_pixels(); lastX = x; lastY = y; glutPostRedisplay(); } virtual void reshape_event( int width, int height ) { pixmap_.resize( width, height, CLEAR | COPY ); cudaFree( (void*)devPixmap_ ); cudaMalloc( (void**)&devPixmap_, pixmap_.bytes() ); do_pixels(); Glut::reshape_event( width, height ); } void do_pixels( void ) { dim3 grids((pixmap_.width()+15)/16,(pixmap_.height()+15)/16); dim3 threads(16,16); cuda_kernel<<<grids,threads>>>( scaleX, scaleY, pixmap_.width(), pixmap_.height(), pixmap_.stride(), devPixmap_ ); cudaMemcpy( pixmap_.pixels(), devPixmap_, pixmap_.bytes(), cudaMemcpyDeviceToHost ); } private: Pixmap pixmap_; GLubyte *devPixmap_; int scaleX, scaleY; int lastX, lastY; // used for moving mouse }; int main( int argc, char **argv ) { MyGlut glut; glut.init( &argc, argv, 1024, 768 ); glut.do_pixels(); int exitCode = 0; try { glut.loop(); } catch (int e) { exitCode = e; } return exitCode; }
8f3da28d641f54c039d9608b84441e8aa6a43955.hip
// !!! This is a file automatically generated by hipify!!! #include "SceECM.h" #include "SceCells.h" // Because of forward declaration //# define debugModeECM // bending stiffness is given inside the code. It should be given as in input from a txt file. //isInitPhase bool variable is not active anymore. //Right now it is assumed that ECM stiffness is the same everywhere. __constant__ double sceInterCell_ECM[5]; //__constant__ double wLCPara_ECM[4]; __constant__ double restLenECMAdhSpringGPU ; __constant__ double maxLenECMAdhSpringGPU ; __constant__ double kAdhECMGPU ; __constant__ double stiffnessECMBasalGPU ; __constant__ double stiffnessECMBCGPU ; __constant__ double stiffnessECMPeripGPU ; __constant__ double lknotECMBasalGPU ; __constant__ double lknotECMBCGPU ; __constant__ double lknotECMPeripGPU ; const double smallNumber=.000001 ; namespace patch{ template <typename T> std::string to_string (const T& n) { std:: ostringstream stm ; stm << n ; return stm.str() ; } } __device__ void DefineECMStiffnessAndLknot ( EType nodeType, double & stiffness, double & sponLen) { if (nodeType==excm) { stiffness=stiffnessECMBasalGPU ; sponLen=lknotECMBasalGPU ; } if (nodeType==perip) { stiffness=stiffnessECMPeripGPU ; sponLen=lknotECMPeripGPU ; } if (nodeType==bc2) { stiffness=stiffnessECMBCGPU; sponLen=lknotECMBCGPU ; } } __device__ double calMorse_ECM(const double& linkLength ) { double forceValue=0.0 ; if (linkLength > sceInterCell_ECM[4]) { forceValue = 0; } else { forceValue = -sceInterCell_ECM[0] / sceInterCell_ECM[2] * exp(-linkLength / sceInterCell_ECM[2]) + sceInterCell_ECM[1] / sceInterCell_ECM[3] * exp(-linkLength / sceInterCell_ECM[3]); // if (forceValue > 0) { // forceValue = 0; // } } return (forceValue) ; } __device__ double calMorse_ECM_mitotic(const double& linkLength, double scaling ) { double forceValue=0.0 ; if (linkLength > sceInterCell_ECM[4]) { forceValue = 0; } else { forceValue = (1.0+(3.0-1.0)*scaling)*(-sceInterCell_ECM[0] / sceInterCell_ECM[2] * exp(-linkLength / sceInterCell_ECM[2]) + sceInterCell_ECM[1] / sceInterCell_ECM[3] * exp(-linkLength / sceInterCell_ECM[3])); // forceValue = -(1.0+(2.0-1.0)*scaling)*sceInterCell_ECM[0] / sceInterCell_ECM[2] // * exp(-linkLength / sceInterCell_ECM[2]) // + (1.0+(2.0-1.0)*scaling)*sceInterCell_ECM[1] / sceInterCell_ECM[3] // * exp(-linkLength / sceInterCell_ECM[3]); // if (forceValue > 0) { // forceValue = 0; // } } return (forceValue) ; } __device__ double calMorseEnergy_ECM(const double& linkLength ) { double energyValue=0.0 ; if (linkLength > sceInterCell_ECM[4]) { energyValue = 0; } else { energyValue = sceInterCell_ECM[0]* exp(-linkLength / sceInterCell_ECM[2]) - sceInterCell_ECM[1]* exp(-linkLength / sceInterCell_ECM[3]); } return (energyValue) ; } /* __device__ double calWLC_ECM(const double& linkLength ) { double x=linkLength/wLCPara_ECM[0] ; return (wLCPara_ECM[1]*( 6*x+ ( x*x*(3.0-2*x))/( (1-x)*(1-x) ) ) -wLCPara_ECM[2]/pow(linkLength,wLCPara_ECM[3]) ) ; } */ __device__ bool IsValidAdhPair(const double& dist ) { if (dist > restLenECMAdhSpringGPU && dist < maxLenECMAdhSpringGPU){ return true ; } else { return false ; } } __device__ bool IsValidAdhPairForNotInitPhase(const double& dist ) { if (dist > restLenECMAdhSpringGPU){ return true ; } else { return false ; } } __device__ double CalAdhECM(const double& dist ) { return (kAdhECMGPU*(dist-restLenECMAdhSpringGPU)); // in the function IsValid pair, distance already checked to be greater than neutral length } __device__ double CalAdhEnergy(const double& dist ) { return (0.5*kAdhECMGPU*(dist-restLenECMAdhSpringGPU)*(dist-restLenECMAdhSpringGPU)); // in the function IsValid pair, distance already checked to be greater than neutral length } EType SceECM:: ConvertStringToEType(string eNodeRead) { if (eNodeRead=="perip") { return perip ; } else if (eNodeRead=="bc2") { return bc2 ; } else if (eNodeRead=="excm") { return excm ; } else { cout << "Error in defining type of external nodes" << endl ; return excm ;// To just return something to avoid compiler complain } } SceECM::SceECM() { isECMNeighborSet=false ; eCMRemoved=false ; isECMNeighborResetPostDivision = false; } void SceECM::Initialize(uint maxAllNodePerCellECM, uint maxMembrNodePerCellECM, uint maxTotalNodesECM, int freqPlotData, string uniqueSymbol) { maxAllNodePerCell=maxAllNodePerCellECM ; maxMembrNodePerCell= maxMembrNodePerCellECM ; // maxTotalNodes=maxTotalNodesECM ; //Ali this->freqPlotData=freqPlotData ; this->uniqueSymbol=uniqueSymbol ; std::fstream readCoord_ECM ; std::fstream readInput_ECM ; int numberNodes_ECM ; double tmpPosX_ECM,tmpPosY_ECM ; vector<double> posXIni_ECM,posYIni_ECM ; vector <EType> eNodeVec ; int resumeSimulation = globalConfigVars.getConfigValue( "ResumeSimulation").toInt(); if (resumeSimulation==0) { cout << " In the ECM module, I am in start mode" << endl ; readCoord_ECM.open("./resources/coordinate_ECM21.txt") ; } else if(resumeSimulation==1) { cout << " In the ECM module, I am in resume mode" << endl ; std::string secondInputFileName = "./resources/DataFileECM_" + uniqueSymbol + "Resume.cfg"; readCoord_ECM.open(secondInputFileName.c_str()) ; } else{ throw std::invalid_argument(" ResumeSimulation parameter in the input file must be either 1 or 0. Error from ECM module"); } if (readCoord_ECM.is_open()) { cout << "ECM coordinates file opened successfully" <<endl ; } else { cout << "ECM coordinates file is not opened successfully" << endl ; } string inputInfoText ; string eNodeRead ; readCoord_ECM>>numberNodes_ECM ; for (int i=0 ; i<numberNodes_ECM ; i++){ readCoord_ECM>>tmpPosX_ECM>>tmpPosY_ECM>>eNodeRead ; posXIni_ECM.push_back(tmpPosX_ECM) ; posYIni_ECM.push_back(tmpPosY_ECM) ; EType eNode=ConvertStringToEType(eNodeRead) ; eNodeVec.push_back(eNode) ; } readInput_ECM.open("./resources/ECM_input.txt") ; if (readInput_ECM.is_open()) { cout << "ECM Mech input opened successfully" <<endl ; } else { cout << "ECM Mech input is not opened successfully" << endl ; } readInput_ECM>> inputInfoText ; for (int i=0 ; i<5; i++) { readInput_ECM>> mechPara_ECM.sceInterCellCPU_ECM[i] ; //=39.0 ; } // readInput_ECM>>restLenECMSpring ; // readInput_ECM>>eCMLinSpringStiff ; readInput_ECM>>restLenECMAdhSpring ; readInput_ECM>>maxLenECMAdhSpring ; readInput_ECM>>kAdhECM ; //for ( int i=0 ; i<4 ; i++) { // readInput_ECM>>mechPara_ECM.wLCParaCPU_ECM[i] ; // } std::fstream secondInput_ECM ; std:: string secondInputInfo ; //dummy std::string secondInputFileName = "./resources/ECM_" + uniqueSymbol + "input.cfg"; secondInput_ECM.open(secondInputFileName.c_str()) ; //secondInput_ECM.open("./resources/ECM_N01G00_input.cfg" ) ; if (secondInput_ECM.is_open()) { cout << "Second ECM Mech input opened successfully" <<endl ; } else { cout << "Second ECM Mech input is not opened successfully" << endl ; } secondInput_ECM>>secondInputInfo ; // just for information no use in the code secondInput_ECM>>stiffnessECMBasal ; secondInput_ECM>>stiffnessECMBC ; secondInput_ECM>>stiffnessECMPerip ; secondInput_ECM>>lknotECMBasal ; secondInput_ECM>>lknotECMBC ; secondInput_ECM>>lknotECMPerip ; secondInput_ECM>>dampBasal ; secondInput_ECM>>dampBC ; secondInput_ECM>>dampApical ; cout <<" stiffness of ECM at the basal side is="<<stiffnessECMBasal <<endl ; cout <<" stiffness of ECM at boundary is="<<stiffnessECMBC<<endl ; cout <<" stiffness of ECM peripodial side is="<<stiffnessECMPerip<<endl ; cout <<" rest len basal ECM is="<<lknotECMBasal<<endl ; cout <<" rest len boundary ECM is= "<<lknotECMBC<<endl ; cout << "rest len peripodial ECM is=" <<lknotECMPerip <<endl ; cout << "Damping for basal ECM is="<<dampBasal<<endl ; cout << "Damping for boundary ECM is= "<<dampBC<<endl ; cout << "Damping for peripodial ECM is=" <<dampApical <<endl ; cout << "number of ECM nodes is"<< numberNodes_ECM <<endl ; maxTotalNodes = numberNodes_ECM*2; cout << "max number of ECM nodes is"<< maxTotalNodes <<endl ; for (int i=0 ; i<5; i++) { cout <<"Morse parameter number"<<i<<" is " <<mechPara_ECM.sceInterCellCPU_ECM[i]<<endl ; } //cout <<"rest length of ECM spring is "<<restLenECMSpring<<endl ; // cout <<"ECM spring stiffness is "<<eCMLinSpringStiff<<endl ; cout <<"ECM Membrane neutral adhesion length is "<<restLenECMAdhSpring<<endl ; cout <<"ECM Membrane max adhesion length is "<<maxLenECMAdhSpring<<endl ; cout <<"ECM Membrane adhesion stiffness is "<<kAdhECM<<endl ; cout << "ECM only applies adhesvie force" << endl ; //for ( int i=0 ; i<4 ; i++) { // cout<<"wLC parameter "<< i << " is "<<mechPara_ECM.wLCParaCPU_ECM[i]<<endl ; ; //} hipMemcpyToSymbol(sceInterCell_ECM,mechPara_ECM.sceInterCellCPU_ECM ,5*sizeof(double)); //hipMemcpyToSymbol(wLCPara_ECM,mechPara_ECM.wLCParaCPU_ECM // ,4*sizeof(double)); hipMemcpyToSymbol(restLenECMAdhSpringGPU, &restLenECMAdhSpring,sizeof(double)); hipMemcpyToSymbol(maxLenECMAdhSpringGPU, &maxLenECMAdhSpring,sizeof(double)); hipMemcpyToSymbol(kAdhECMGPU, &kAdhECM,sizeof(double)); hipMemcpyToSymbol(stiffnessECMPeripGPU, &stiffnessECMPerip,sizeof(double)); hipMemcpyToSymbol(stiffnessECMBCGPU, &stiffnessECMBC,sizeof(double)); hipMemcpyToSymbol(stiffnessECMBasalGPU, &stiffnessECMBasal,sizeof(double)); hipMemcpyToSymbol(lknotECMPeripGPU, & lknotECMPerip,sizeof(double)); hipMemcpyToSymbol(lknotECMBCGPU, & lknotECMBC,sizeof(double)); hipMemcpyToSymbol(lknotECMBasalGPU, & lknotECMBasal,sizeof(double)); counter=100000 ; //large number lastPrintECM=1000000 ; // large number outputFrameECM=0 ; numNodesECM= numberNodes_ECM ; //(eCMMaxX-eCMMinX)/eCMMinDist ; // indexECM.resize(numNodesECM,0) ; // peripORexcm.resize(numNodesECM,perip) ; // dampCoef.resize(numNodesECM) ; // nodeECMLocX.resize(numNodesECM,0.0) ; // nodeECMLocY.resize(numNodesECM,0.0) ; // cellNeighborId.resize(numNodesECM,-1) ; // stiffLevel.resize(numNodesECM) ; // sponLen.resize(numNodesECM) ; // linSpringForceECMX.resize(numNodesECM,0.0); // linSpringForceECMY.resize(numNodesECM,0.0); // linSpringAvgTension.resize(numNodesECM,0.0); // linSpringEnergy.resize(numNodesECM,0.0); // morseEnergy.resize(numNodesECM,0.0); // adhEnergy.resize(numNodesECM,0.0); // bendSpringForceECMX.resize(numNodesECM,0.0); // bendSpringForceECMY.resize(numNodesECM,0.0); // memMorseForceECMX.resize(numNodesECM,0.0); // memMorseForceECMY.resize(numNodesECM,0.0); // fBendCenterX.resize(numNodesECM,0.0); // fBendCenterY.resize(numNodesECM,0.0); // fBendLeftX.resize(numNodesECM,0.0); // fBendLeftY.resize(numNodesECM,0.0); // fBendRightX.resize(numNodesECM,0.0); // fBendRightY.resize(numNodesECM,0.0); // totalForceECMX.resize(numNodesECM,0.0); // totalForceECMY.resize(numNodesECM,0.0); // totalExplicitForceECMX.resize(numNodesECM,0.0); // totalExplicitForceECMY.resize(numNodesECM,0.0); // rHSX.resize(numNodesECM,0.0); // rHSY.resize(numNodesECM,0.0); // //memNodeType.resize(maxTotalNodes,notAssigned1) ; // nodeIsActive.resize(numNodesECM,true) ; // thrust::sequence (indexECM.begin(),indexECM.begin()+numNodesECM); indexECM.resize(maxTotalNodes,0) ; peripORexcm.resize(maxTotalNodes,perip) ; dampCoef.resize(maxTotalNodes) ; nodeECMLocX.resize(maxTotalNodes,0.0) ; nodeECMLocY.resize(maxTotalNodes,0.0) ; // isActiveECM.resize(maxTotalNodes,false); cellNeighborId.resize(maxTotalNodes,-1) ; stiffLevel.resize(maxTotalNodes) ; sponLen.resize(maxTotalNodes) ; linSpringForceECMX.resize(maxTotalNodes,0.0); linSpringForceECMY.resize(maxTotalNodes,0.0); linSpringAvgTension.resize(maxTotalNodes,0.0); linSpringEnergy.resize(maxTotalNodes,0.0); morseEnergy.resize(maxTotalNodes,0.0); adhEnergy.resize(maxTotalNodes,0.0); bendSpringForceECMX.resize(maxTotalNodes,0.0); bendSpringForceECMY.resize(maxTotalNodes,0.0); memMorseForceECMX.resize(maxTotalNodes,0.0); memMorseForceECMY.resize(maxTotalNodes,0.0); fBendCenterX.resize(maxTotalNodes,0.0); fBendCenterY.resize(maxTotalNodes,0.0); fBendLeftX.resize(maxTotalNodes,0.0); fBendLeftY.resize(maxTotalNodes,0.0); fBendRightX.resize(maxTotalNodes,0.0); fBendRightY.resize(maxTotalNodes,0.0); totalForceECMX.resize(maxTotalNodes,0.0); totalForceECMY.resize(maxTotalNodes,0.0); totalExplicitForceECMX.resize(maxTotalNodes,0.0); totalExplicitForceECMY.resize(maxTotalNodes,0.0); rHSX.resize(maxTotalNodes,0.0); rHSY.resize(maxTotalNodes,0.0); //memNodeType.resize(maxTotalNodes,notAssigned1) ; nodeIsActive.resize(maxTotalNodes,true) ; thrust::sequence (indexECM.begin(),indexECM.begin()+maxTotalNodes); thrust::copy(posXIni_ECM.begin(),posXIni_ECM.end(),nodeECMLocX.begin()) ; thrust::copy(posYIni_ECM.begin(),posYIni_ECM.end(),nodeECMLocY.begin()) ; thrust::copy(eNodeVec.begin(),eNodeVec.end(),peripORexcm.begin()) ; // for (int i = 0; i < posXIni_ECM.size(); i++){ // isActiveECM[i] == true; // } AssignDampCoef() ; cout << "GPU level initial coordinates and type of external nodes are: " << endl ; for (int i=0; i<nodeECMLocX.size() ; i++) { cout<< nodeECMLocX[i]<<", "<<nodeECMLocY[i]<<", "<<peripORexcm[i] << endl; } PrintECM(0.0) ; std::string cSVFileName = "./ECMFolder/EnergyExport_" + uniqueSymbol + ".CSV"; ofstream EnergyExport ; EnergyExport.open(cSVFileName.c_str()); EnergyExport <<"Time,"<<"TotalMorseEnergyECM," << "TotalAdhEnergyECM,"<<"TotalLinSpringEnergy,"<<"TotalEnergy, " <<"TotalEnergyDerivative"<< std::endl; } //initilaization function finished void SceECM:: ApplyECMConstrain(int currentActiveCellCount, int totalNodeCountForActiveCellsECM, double curTime, double dt, double Damp_CoefCell, bool cellPolar, bool subCellPolar, bool isInitPhase, double mitoticThreshold){ bool implicit_solver_active = false ; if (eCMRemoved) { PrintECMRemoved(curTime); cout << "ECM is removed" << endl ; return ; } #ifdef debugModeECM hipEvent_t start1, start2, start3, start4, start5, start6, start7, start8, stop; float elapsedTime1, elapsedTime2, elapsedTime3, elapsedTime4, elapsedTime5, elapsedTime6, elapsedTime7 , elapsedTime8 ; hipEventCreate(&start1); hipEventCreate(&start2); hipEventCreate(&start3); hipEventCreate(&start4); hipEventCreate(&start5); hipEventCreate(&start6); hipEventCreate(&start7); hipEventCreate(&start8); hipEventCreate(&stop); hipEventRecord(start1, 0); #endif nodeCellLocXOld.resize(totalNodeCountForActiveCellsECM) ; nodeCellLocYOld.resize(totalNodeCountForActiveCellsECM) ; adhPairECM_Cell.resize(totalNodeCountForActiveCellsECM,-1) ; morseEnergyCell.resize(totalNodeCountForActiveCellsECM,0.0); adhEnergyCell.resize(totalNodeCountForActiveCellsECM,0.0); thrust::copy(nodesPointerECM->getInfoVecs().nodeLocX.begin(),nodesPointerECM->getInfoVecs().nodeLocX.begin()+totalNodeCountForActiveCellsECM,nodeCellLocXOld.begin()) ; thrust::copy(nodesPointerECM->getInfoVecs().nodeLocY.begin(),nodesPointerECM->getInfoVecs().nodeLocY.begin()+totalNodeCountForActiveCellsECM,nodeCellLocYOld.begin()) ; #ifdef debugModeECM hipEventRecord(start2, 0); hipEventSynchronize(start2); hipEventElapsedTime(&elapsedTime1, start1, start2); #endif // std::cout<<"ERROR 1"<<std::endl; thrust:: transform (peripORexcm.begin(), peripORexcm.begin()+numNodesECM, thrust::make_zip_iterator (thrust::make_tuple (stiffLevel.begin(),sponLen.begin())),MechProp()); // cout << " Mechanical properties after assignment is " << stiffLevel[0] << endl ; counter ++ ; //if (counter>=100 || curTime<(100*dt) || isECMNeighborSet==false) { if (curTime<(100*dt) || isECMNeighborSet==false) { isECMNeighborSet=true ; counter=0 ; FindNeighborCandidateForCellsAndECMNodes(); // int ECMAdd_indx = decideIfAddECMNode_M(numNodesECM); // if (ECMAdd_indx >= 0){ // AddECMNode(ECMAdd_indx, numNodesECM); // numNodesECM += 1; // std::cout<<"New ecm node added!"<<std::endl; // std::cout<<"Current numECMNodes : "<<numNodesECM<<std::endl; // } // FindNeighborCandidateForCellsAndECMNodes(); // for (int i = 0; i < numNodesECM; i++){ // std::cout<<cellNeighborId[i]<<std::endl; // } } // std::cout<<"ERROR 2"<<std::endl; if (cellsPointerECM->getCellInfoVecs().isPostDivision ==true || cellsPointerECM->getCellInfoVecs().isPostAddMembrNodes == true){ std::cout<<"Resetting ecm and cell neighbor info! post division!"<<std::endl; int ECMAdd_indx = decideIfAddECMNode_M(numNodesECM); // for (int i = 0; i < 5; i++){ if (ECMAdd_indx >= 0){ AddECMNode(ECMAdd_indx, numNodesECM); numNodesECM += 1; std::cout<<"New ecm node added!"<<std::endl; std::cout<<"Current numECMNodes : "<<numNodesECM<<std::endl; } // } FindNeighborCandidateForCellsAndECMNodes(); isECMNeighborResetPostDivision=true; // for (int i = 0; i < numNodesECM; i++){ // std::cout<<cellNeighborId[i]<<std::endl; // } // std::cout<<"SponLen : "<<std::endl; // for (int i = 0; i < numNodesECM; i++){ // std::cout<<sponLen[i]<<std::endl; // } } // std::cout<<"ERROR 3"<<std::endl; #ifdef debugModeECM hipEventRecord(start3, 0); hipEventSynchronize(start3); hipEventElapsedTime(&elapsedTime2, start2, start3); #endif MoveCellNodesByECMForces(totalNodeCountForActiveCellsECM,currentActiveCellCount,dt, Damp_CoefCell, mitoticThreshold) ; // std::cout<<"ERROR 4"<<std::endl; /* To reduce computational cost energyECM.totalMorseEnergyCellECM = thrust::reduce( morseEnergyCell.begin(),morseEnergyCell.begin()+totalNodeCountForActiveCellsECM,(double) 0.0, thrust::plus<double>() ); energyECM.totalAdhEnergyCellECM = thrust::reduce( adhEnergyCell.begin() ,adhEnergyCell.begin() +totalNodeCountForActiveCellsECM,(double) 0.0, thrust::plus<double>() ); */ CalLinSpringForce(); // std::cout<<"ERROR 5"<<std::endl; CalBendSpringForce(); // std::cout<<"ERROR 6"<<std::endl; #ifdef debugModeECM hipEventRecord(start4, 0); hipEventSynchronize(start4); hipEventElapsedTime(&elapsedTime3, start3, start4); #endif CalCellForcesOnECM(mitoticThreshold) ; // std::cout<<"ERROR 7"<<std::endl; //energyECM.totalLinSpringEnergyECM = 0.5 * ( thrust::reduce( linSpringEnergy.begin(),linSpringEnergy.begin()+numNodesECM,(double) 0.0, thrust::plus<double>() )); //to make sure it is based on the distance used for action force calculation. /* To reduce computational cost energyECM.totalMorseEnergyECMCell = thrust::reduce( morseEnergy.begin(),morseEnergy.begin()+numNodesECM,(double) 0.0, thrust::plus<double>() ); energyECM.totalAdhEnergyECMCell = thrust::reduce( adhEnergy.begin() ,adhEnergy.begin() +numNodesECM,(double) 0.0, thrust::plus<double>() ); */ if (!implicit_solver_active) { // Calculate summation of all forces and move nodes if explicit solver is going to be used CalSumForcesOnECM() ; MoveNodesBySumAllForces(dt) ; } // std::cout<<"ERROR 8"<<std::endl; // if (implicit_solver_active) { // //Calculate right hand side of implicit solver which includes explicit forces // CalSumOnlyExplicitForcesOnECM() ; // CalRHS(dt) ; // } #ifdef debugModeECM hipEventRecord(start5, 0); hipEventSynchronize(start5); hipEventElapsedTime(&elapsedTime4, start4, start5); #endif //Create tmp CPU vectors for using in implicit solver. Declariation is not acceptable to be inisde the if condition // vector <double> tmpRHSX(numNodesECM); // vector <double> tmpRHSY(numNodesECM); // tmpHostNodeECMLocX.resize(numNodesECM); // tmpHostNodeECMLocY.resize(numNodesECM); // if (implicit_solver_active) { // // Copy ECM locations from GPU to CPU if implicit solver is used // thrust::copy (rHSX.begin(), rHSX.begin()+numNodesECM, tmpRHSX.begin()); // thrust::copy (rHSY.begin(), rHSY.begin()+numNodesECM, tmpRHSY.begin()); // thrust::copy (nodeECMLocX.begin(), nodeECMLocX.begin()+numNodesECM, tmpHostNodeECMLocX.begin()); // thrust::copy (nodeECMLocY.begin(), nodeECMLocY.begin()+numNodesECM, tmpHostNodeECMLocY.begin()); // //cout << "max RHSX is " << *max_element(tmpRHSX.begin(), tmpRHSX.begin()+numNodesECM) << endl ; // //cout << "min RHSX is " << *min_element(tmpRHSX.begin(), tmpRHSX.begin()+numNodesECM) << endl ; // //cout << "max RHSY is " << *max_element(tmpRHSY.begin(), tmpRHSY.begin()+numNodesECM) << endl ; // //cout << "min RHSY is " << *min_element(tmpRHSY.begin(), tmpRHSY.begin()+numNodesECM) << endl ; // } #ifdef debugModeECM hipEventRecord(start6, 0); hipEventSynchronize(start6); hipEventElapsedTime(&elapsedTime5, start5, start6); #endif // if (implicit_solver_active) { // // setting up eqaution of motion if implicit solver is used // EquMotionCoef (dt); // } #ifdef debugModeECM hipEventRecord(start7, 0); hipEventSynchronize(start7); hipEventElapsedTime(&elapsedTime6, start6, start7); #endif // if (implicit_solver_active) { // // Fetch the implicit solver and update ECM location if implicit solver is used // tmpHostNodeECMLocX =solverPointer->SOR3DiagPeriodic(nodeIsActive,hCoefLd, hCoefD, hCoefUd,tmpRHSX,indexPrev, indexNext, tmpHostNodeECMLocX); // tmpHostNodeECMLocY =solverPointer->SOR3DiagPeriodic(nodeIsActive,hCoefLd, hCoefD, hCoefUd,tmpRHSY,indexPrev,indexNext, tmpHostNodeECMLocY); // // copy ECM node locations back from CPU to GPU if implicit solver is used // thrust::copy (tmpHostNodeECMLocX.begin(), tmpHostNodeECMLocX.begin()+numNodesECM, nodeECMLocX.begin()); // thrust::copy (tmpHostNodeECMLocY.begin(), tmpHostNodeECMLocY.begin()+numNodesECM, nodeECMLocY.begin()); // } #ifdef debugModeECM hipEventRecord(start8, 0); hipEventSynchronize(start8); hipEventElapsedTime(&elapsedTime7, start7, start8); #endif /* To reduce computational cost cout << "total Morse energy for cell-ECM is= "<< energyECM.totalMorseEnergyCellECM << endl ; cout << "total Morse energy for ECM-cell is= "<< energyECM.totalMorseEnergyECMCell << endl ; cout << "total adhesion energy for cell-ECM is= "<< energyECM.totalAdhEnergyCellECM << endl ; cout << "total adhesion energy for ECM-cell is= "<< energyECM.totalAdhEnergyECMCell << endl ; //assert (abs (energyECM.totalMorseEnergyCellECM-energyECM.totalMorseEnergyECMCell)<1.0) ; //assert (abs (energyECM.totalAdhEnergyCellECM- energyECM.totalAdhEnergyECMCell) <1.0) ; if ( (abs (energyECM.totalMorseEnergyCellECM-energyECM.totalMorseEnergyECMCell)>1.0) || (abs (energyECM.totalAdhEnergyCellECM- energyECM.totalAdhEnergyECMCell) >1.0) ) { cout << "Warning: Action and reaction forces in the ECM do not match each other" << endl ; } */ # ifdef debugModeECM hipEventRecord(stop, 0); hipEventSynchronize(stop); hipEventElapsedTime(&elapsedTime8, start8, stop); std::cout << "time 1 spent in ECM module for moving the membrane node of cells and ECM nodes are: " << elapsedTime1 << endl ; std::cout << "time 2 spent in ECM module for moving the membrane node of cells and ECM nodes are: " << elapsedTime2 << endl ; std::cout << "time 3 spent in ECM module for moving the membrane node of cells and ECM nodes are: " << elapsedTime3 << endl ; std::cout << "time 4 spent in ECM module for moving the membrane node of cells and ECM nodes are: " << elapsedTime4 << endl ; std::cout << "time 5 spent in ECM module for moving the membrane node of cells and ECM nodes are: " << elapsedTime5 << endl ; std::cout << "time 6 spent in ECM module for moving the membrane node of cells and ECM nodes are: " << elapsedTime6 << endl ; std::cout << "time 7 spent in ECM module for moving the membrane node of cells and ECM nodes are: " << elapsedTime7 << endl ; std::cout << "time 8 spent in ECM module for moving the membrane node of cells and ECM nodes are: " << elapsedTime8 << endl ; #endif //throw std::invalid_argument(" Solver called properly and I want to stop the code"); PrintECM(curTime); // std::cout<<"ERROR 9"<<std::endl; } void SceECM:: PrintECM(double curTime) { lastPrintECM=lastPrintECM+1 ; if (lastPrintECM>=freqPlotData) { outputFrameECM++ ; lastPrintECM=0 ; cout << " I am in regular print function" << endl ; // First ECM output file for paraview // std::string vtkFileName = "./ECMFolder/ECM_" + uniqueSymbol +patch::to_string(outputFrameECM-1) + ".vtk"; ofstream ECMOut; ECMOut.open(vtkFileName.c_str()); ECMOut<< "# vtk DataFile Version 3.0" << endl; ECMOut<< "Result for paraview 2d code" << endl; ECMOut << "ASCII" << endl; ECMOut << "DATASET UNSTRUCTURED_GRID" << std::endl; ECMOut << "POINTS " << nodeECMLocX.size() << " float" << std::endl; for (uint i = 0; i < nodeECMLocX.size(); i++) { ECMOut << nodeECMLocX[i] << " " << nodeECMLocY[i] << " " << 0.0 << std::endl; } ECMOut<< std::endl; ECMOut<< "CELLS " << nodeECMLocX.size()<< " " << 3 *nodeECMLocX.size()<< std::endl; for (uint i = 0; i < (nodeECMLocX.size()-1); i++) { ECMOut << 2 << " " << indexECM[i] << " " << indexECM[i+1] << std::endl; } ECMOut << 2 << " " << indexECM[nodeECMLocX.size()-1] << " "<< indexECM[0] << std::endl; //last point to the first point ECMOut << "CELL_TYPES " << nodeECMLocX.size()<< endl; for (uint i = 0; i < nodeECMLocX.size() ; i++) { ECMOut << "3" << endl; } ECMOut << "POINT_DATA "<<nodeECMLocX.size() <<endl ; ECMOut << "SCALARS Avg_Tension " << "float"<< endl; ECMOut << "LOOKUP_TABLE " << "default"<< endl; for (uint i = 0; i < nodeECMLocX.size(); i++) { ECMOut<<linSpringAvgTension[i] <<endl ; } ECMOut << "SCALARS Node_Type " << "float"<< endl; ECMOut << "LOOKUP_TABLE " << "default"<< endl; for (uint i = 0; i < nodeECMLocX.size(); i++) { ECMOut<<peripORexcm[i] <<endl ; } ECMOut.close(); // second output file for curvature estimation // std::string txtFileName = "./ECMFolder/ECMLocationExport_" + uniqueSymbol+ patch::to_string(outputFrameECM-1) + ".txt"; ofstream ECMLocationExport ; ECMLocationExport.open(txtFileName.c_str()); //ECMExport << "ECM pouch coordinates" << std::endl; for (uint i = 0; i < nodeECMLocX.size(); i++) { // if (peripORexcm[i]==excm) { ECMLocationExport<< nodeECMLocX[i] << " " << nodeECMLocY[i] << " " << 0.0 << " "<< peripORexcm[i]<<std::endl; // } } //ECMExport << "ECM lumen side coordinates" << std::endl; // for (uint i = 0; i < nodeECMLocX.size(); i++) { // if (peripORexcm[i]==perip) { // ECMLocationExport << nodeECMLocX[i] << " " << nodeECMLocY[i] << " " // << 0.0 << std::endl; // } // } ECMLocationExport.close(); //Third write file for ECM txtFileName = "./ECMFolder/ECMTensionExport_" + uniqueSymbol+ patch::to_string(outputFrameECM-1) + ".txt"; ofstream ECMTensionExport ; ECMTensionExport.open(txtFileName.c_str()); for (uint i = 0; i < nodeECMLocX.size(); i++) { ECMTensionExport<< linSpringAvgTension[i]<< " " << peripORexcm[i]<< std::endl; } ECMTensionExport.close(); /// //Fourth write file for ECM energyECM.totalEnergyECMOld=energyECM.totalEnergyECM ; energyECM.totalEnergyECM= energyECM.totalMorseEnergyECMCell + energyECM.totalAdhEnergyECMCell + energyECM.totalLinSpringEnergyECM ; std::string cSVFileName = "./ECMFolder/EnergyExport_" + uniqueSymbol+ ".CSV"; ofstream EnergyExport ; EnergyExport.open(cSVFileName.c_str(),ofstream::app); //EnergyExport <<"totalMorseEnergyCell " << "totalAdhEnergyCell "<< "totalMorseEnergy "<<"totalAdhEnergy "<< "totalLinSpringEnergy " << std::endl; EnergyExport <<curTime<<","<<energyECM.totalMorseEnergyECMCell << "," << energyECM.totalAdhEnergyECMCell<< "," << energyECM.totalLinSpringEnergyECM <<"," << energyECM.totalEnergyECM <<","<<energyECM.totalEnergyPrimeECM <<std::endl; } } // This is just to create a file to be able to generate the movie with consisten frames void SceECM:: PrintECMRemoved(double curTime) { lastPrintECM=lastPrintECM+1 ; if (lastPrintECM>=freqPlotData) { outputFrameECM++ ; lastPrintECM=0 ; cout << " I am in ECM removed print function" << endl ; // First ECM output file for paraview // std::string vtkFileName = "./ECMFolder/ECM_" + uniqueSymbol +patch::to_string(outputFrameECM-1) + ".vtk"; ofstream ECMOut; ECMOut.open(vtkFileName.c_str()); ECMOut<< "# vtk DataFile Version 3.0" << endl; ECMOut<< "Result for paraview 2d code" << endl; ECMOut << "ASCII" << endl; ECMOut << "DATASET UNSTRUCTURED_GRID" << std::endl; ECMOut << "POINTS " << nodeECMLocX.size() << " float" << std::endl; for (uint i = 0; i < nodeECMLocX.size(); i++) { ECMOut << -500.0 << " " << -500.0 << " " << 0.0 << std::endl; // Just out of domain } ECMOut<< std::endl; ECMOut<< "CELLS " << nodeECMLocX.size()<< " " << 3 *nodeECMLocX.size()<< std::endl; for (uint i = 0; i < (nodeECMLocX.size()-1); i++) { ECMOut << 2 << " " << indexECM[i] << " " << indexECM[i+1] << std::endl; } ECMOut << 2 << " " << indexECM[nodeECMLocX.size()-1] << " "<< indexECM[0] << std::endl; //last point to the first point ECMOut << "CELL_TYPES " << nodeECMLocX.size()<< endl; for (uint i = 0; i < nodeECMLocX.size() ; i++) { ECMOut << "3" << endl; } ECMOut << "POINT_DATA "<<nodeECMLocX.size() <<endl ; ECMOut << "SCALARS Avg_Tension " << "float"<< endl; ECMOut << "LOOKUP_TABLE " << "default"<< endl; for (uint i = 0; i < nodeECMLocX.size(); i++) { ECMOut<<linSpringAvgTension[i] <<endl ; } ECMOut << "SCALARS Node_Type " << "float"<< endl; ECMOut << "LOOKUP_TABLE " << "default"<< endl; for (uint i = 0; i < nodeECMLocX.size(); i++) { ECMOut<<peripORexcm[i] <<endl ; } ECMOut.close(); } } AniResumeData SceECM:: obtainResumeData() { AniResumeData aniResumeData ; thrust:: host_vector<double> hostTmpLocX; thrust:: host_vector<double> hostTmpLocY; thrust:: host_vector<EType> hostTmpType; hostTmpLocX.resize(numNodesECM) ; hostTmpLocY.resize(numNodesECM) ; hostTmpType.resize(numNodesECM) ; cout << " I am in obtainResumeData function" << endl ; thrust::copy ( thrust::make_zip_iterator( thrust::make_tuple(nodeECMLocX.begin(),nodeECMLocY.begin(),peripORexcm.begin())), thrust::make_zip_iterator( thrust::make_tuple(nodeECMLocX.begin(),nodeECMLocY.begin(),peripORexcm.begin()))+numNodesECM, thrust::make_zip_iterator( thrust::make_tuple(hostTmpLocX.begin(),hostTmpLocY.begin(),hostTmpType.begin()))); cout << " I start passing to regular vector variables" << endl ; CVector tmp; for( int i=0 ; i<numNodesECM ; i++) { tmp=CVector (hostTmpLocX[i], hostTmpLocY[i], 0.0) ; aniResumeData.nodePosArr.push_back(tmp) ; aniResumeData.nodeECMType.push_back(hostTmpType[i]) ; } return aniResumeData ; } void SceECM::EquMotionCoef (double dt) { vector <double> sponLenHost(numNodesECM) ; vector <double> sponLenWithNext ; vector <double> sponLenWithPrev ; vector <double> distWithNext ; vector <double> distWithPrev ; vector <double> dampCoefHost ; sponLenWithNext.clear(); sponLenWithPrev.clear(); distWithNext.clear() ; distWithPrev.clear() ; hCoefLd.clear() ; hCoefUd.clear() ; hCoefD.clear() ; indexNext.clear() ; indexPrev.clear() ; dampCoefHost.clear() ; indexNext.resize(numNodesECM) ; indexPrev.resize(numNodesECM) ; dampCoefHost.resize(numNodesECM) ; thrust::copy(sponLen.begin(),sponLen.begin()+numNodesECM, sponLenHost.begin()) ; thrust::copy(dampCoef.begin(),dampCoef.begin()+numNodesECM, dampCoefHost.begin()) ; double k=stiffLevel[0] ; //Assumming ECM is homogenous in mechanical properties for ( int i=0 ; i< numNodesECM ; i++) { indexNext.at(i)=i+1 ; indexPrev.at(i)=i-1 ; if (i==numNodesECM-1){ indexNext.at(i)=0 ; } if (i==0){ indexPrev.at(i)=numNodesECM-1 ; } sponLenWithNext.push_back( 0.5*(sponLenHost[indexNext.at(i)]+sponLenHost[i]) ); sponLenWithPrev.push_back( 0.5*(sponLenHost[indexPrev.at(i)]+sponLenHost[i]) ); distWithNext.push_back(sqrt( pow(tmpHostNodeECMLocX[indexNext.at(i)]-tmpHostNodeECMLocX[i],2) + pow(tmpHostNodeECMLocY[indexNext.at(i)]-tmpHostNodeECMLocY[i],2))) ; distWithPrev.push_back(sqrt( pow(tmpHostNodeECMLocX[indexPrev.at(i)]-tmpHostNodeECMLocX[i],2) + pow(tmpHostNodeECMLocY[indexPrev.at(i)]-tmpHostNodeECMLocY[i],2))); } for ( int i=0 ; i< numNodesECM ; i++) { hCoefD.push_back (1 + k*dt/dampCoefHost.at(i)*( 2 - sponLenWithPrev.at(i)/(distWithPrev.at(i) + 0.0001 ) - sponLenWithNext.at(i)/(distWithNext.at(i) + 0.0001 ))) ; hCoefLd.push_back( k*dt/dampCoefHost.at(i)*(-1 + sponLenWithPrev.at(i)/(distWithPrev.at(i) + 0.0001 ))) ; hCoefUd.push_back( k*dt/dampCoefHost.at(i)*(-1 + sponLenWithNext.at(i)/(distWithNext.at(i) + 0.0001 ))) ; } #ifdef debugModeECM cout <<"max distance with next node is" <<*max_element ( distWithNext.begin(), distWithNext.begin()+numNodesECM) <<endl ; cout <<"min distance with next node is" << *min_element ( distWithNext.begin(), distWithNext.begin()+numNodesECM) <<endl ; cout <<"max distance with previous node is" <<*max_element ( distWithPrev.begin(), distWithPrev.begin()+numNodesECM) <<endl ; cout <<"min distance with previous node is" << *min_element ( distWithPrev.begin(), distWithPrev.begin()+numNodesECM) <<endl ; vector < double> hCoefDAbs; hCoefDAbs.clear() ; for ( int i=0 ; i< numNodesECM ; i++) { hCoefDAbs.push_back (abs(1 + k*dt/dampCoefHost.at(i)*( 2 - sponLenWithPrev.at(i)/(distWithPrev.at(i) + 0.0001 ) - sponLenWithNext.at(i)/(distWithNext.at(i) + 0.0001 )))) ; } cout <<"max main diag. elment is " << *max_element ( hCoefD.begin(), hCoefD.begin() +numNodesECM) <<endl ; cout <<"min main diag. element is " << *min_element ( hCoefD.begin(), hCoefD.begin() +numNodesECM) <<endl ; cout <<"min main Abs(diag.) element is " << *min_element ( hCoefDAbs.begin(), hCoefDAbs.begin() +numNodesECM) <<endl ; cout <<"max upper diag. element is " << *max_element ( hCoefUd.begin(), hCoefUd.begin()+numNodesECM) <<endl ; cout <<"min upper diag. element is " << *min_element ( hCoefUd.begin(), hCoefUd.begin()+numNodesECM) <<endl ; cout <<"max lower diag. element is " << *max_element ( hCoefLd.begin(), hCoefLd.begin()+numNodesECM) <<endl ; cout <<"min lower diag. element is " << *min_element ( hCoefLd.begin(), hCoefLd.begin()+numNodesECM) <<endl ; cout <<"stiffness, time step and first element of damping vector is " << endl ; cout << k <<","<< dt<<"," << dampCoefHost.at(0) << endl ; cout << "constants for stiffness matrix calculated " << endl ; cout << "last diagonal element is " << hCoefD.at(numNodesECM-1) << endl ; cout << " number of ECM nodes is "<< numNodesECM << endl ; # endif } void SceECM::MoveCellNodesByECMForces(int totalNodeCountForActiveCellsECM,int currentActiveCellCount, double dt, double Damp_CoefCell, double mitoticThreshold) { double* nodeECMLocXAddr= thrust::raw_pointer_cast ( &nodeECMLocX[0]) ; double* nodeECMLocYAddr= thrust::raw_pointer_cast ( &nodeECMLocY[0]) ; // bool* isActiveECM = thrust::raw_pointer_cast( // &isActiveECM[0]); EType* peripORexcmAddr= thrust::raw_pointer_cast ( &peripORexcm[0]) ; // double* nodeGrowProAddr = thrust::raw_pointer_cast( // &nodesPointerECM->getInfoVecs().nodeGrowPro[0]); double* cellGrowthProgress = thrust::raw_pointer_cast( &cellsPointerECM->getCellInfoVecs().growthProgress[0]); // move the nodes of epithelial cells //// find the closest ECM node to each each cell // int numCells = cellsPointerECM->getCellInfoVecs().basalLocX.size() ; thrust::counting_iterator<int> iBegin(0) ; thrust::counting_iterator<int> iBegin2(0) ; ////////////////////////////////////////// thrust:: transform ( thrust::make_zip_iterator ( thrust:: make_tuple ( make_permutation_iterator( cellsPointerECM->getCellInfoVecs().eCMNeighborId.begin(), make_transform_iterator(iBegin2, DivideFunctor2( maxAllNodePerCell))), make_transform_iterator (iBegin, DivideFunctor2(maxAllNodePerCell)), make_transform_iterator (iBegin, ModuloFunctor2(maxAllNodePerCell)), nodesPointerECM->getInfoVecs().nodeLocX.begin(), nodesPointerECM->getInfoVecs().nodeLocY.begin(), nodesPointerECM->getInfoVecs().nodeIsActive.begin(), nodesPointerECM->getInfoVecs().memNodeType1.begin() )), thrust::make_zip_iterator ( thrust:: make_tuple ( make_permutation_iterator( cellsPointerECM->getCellInfoVecs().eCMNeighborId.begin(), make_transform_iterator(iBegin2, DivideFunctor2( maxAllNodePerCell))), make_transform_iterator (iBegin, DivideFunctor2(maxAllNodePerCell)), make_transform_iterator (iBegin, ModuloFunctor2(maxAllNodePerCell)), nodesPointerECM->getInfoVecs().nodeLocX.begin(), nodesPointerECM->getInfoVecs().nodeLocY.begin(), nodesPointerECM->getInfoVecs().nodeIsActive.begin(), nodesPointerECM->getInfoVecs().memNodeType1.begin() ))+totalNodeCountForActiveCellsECM, thrust::make_zip_iterator ( thrust::make_tuple ( nodesPointerECM->getInfoVecs().nodeLocX.begin(), nodesPointerECM->getInfoVecs().nodeLocY.begin(), adhPairECM_Cell.begin(), morseEnergyCell.begin(), adhEnergyCell.begin())), MoveNodes2_Cell(nodeECMLocXAddr,nodeECMLocYAddr,maxMembrNodePerCell,numNodesECM,dt,Damp_CoefCell,peripORexcmAddr,currentActiveCellCount, cellGrowthProgress, mitoticThreshold));//, isActiveECM)); } void SceECM::CalLinSpringForce() { double* nodeECMLocXAddr= thrust::raw_pointer_cast ( &nodeECMLocX[0]) ; double* nodeECMLocYAddr= thrust::raw_pointer_cast ( &nodeECMLocY[0]) ; double* stiffLevelAddr=thrust::raw_pointer_cast ( &stiffLevel[0]) ; double* sponLenAddr =thrust::raw_pointer_cast ( &sponLen[0]) ; // bool* isActiveECM = thrust::raw_pointer_cast( // &isActiveECM[0]); thrust:: transform ( thrust::make_zip_iterator ( thrust:: make_tuple ( indexECM.begin(), nodeECMLocX.begin(), nodeECMLocY.begin())), thrust::make_zip_iterator ( thrust:: make_tuple ( indexECM.begin(), nodeECMLocX.begin(), nodeECMLocY.begin()))+numNodesECM, thrust::make_zip_iterator ( thrust::make_tuple ( linSpringForceECMX.begin(), linSpringForceECMY.begin(), linSpringAvgTension.begin(), linSpringEnergy.begin())), LinSpringForceECM(numNodesECM,nodeECMLocXAddr,nodeECMLocYAddr,stiffLevelAddr,sponLenAddr));//, isActiveECM)); //////////////////////////////////// find the closest Cell to each ECM node /////////// /////////////////////////////////// //cout << " I am after FindCellNeighbor functor" << endl ; } void SceECM::CalBendSpringForce() { const double eCMBendStiff=6.0 ; // need to be an input double* nodeECMLocXAddr= thrust::raw_pointer_cast ( &nodeECMLocX[0]) ; double* nodeECMLocYAddr= thrust::raw_pointer_cast ( &nodeECMLocY[0]) ; // bool* isActiveECM = thrust::raw_pointer_cast( // &isActiveECM[0]); thrust:: transform ( thrust::make_zip_iterator ( thrust:: make_tuple ( indexECM.begin(), nodeECMLocX.begin(), nodeECMLocY.begin())), thrust::make_zip_iterator ( thrust:: make_tuple ( indexECM.begin(), nodeECMLocX.begin(), nodeECMLocY.begin()))+numNodesECM, thrust::make_zip_iterator ( thrust::make_tuple ( fBendCenterX.begin(), fBendCenterY.begin(), fBendLeftX.begin(), fBendLeftY.begin(), fBendRightX.begin(), fBendRightY.begin())), CalBendECM(nodeECMLocXAddr,nodeECMLocYAddr,numNodesECM,eCMBendStiff));//, isActiveECM)); double* fBendLeftXAddr= thrust::raw_pointer_cast ( &fBendLeftX[0]) ; double* fBendLeftYAddr= thrust::raw_pointer_cast ( &fBendLeftY[0]) ; double* fBendRightXAddr= thrust::raw_pointer_cast ( &fBendRightX[0]) ; double* fBendRightYAddr= thrust::raw_pointer_cast ( &fBendRightY[0]) ; thrust:: transform ( thrust::make_zip_iterator ( thrust:: make_tuple ( indexECM.begin(), fBendCenterX.begin(), fBendCenterY.begin())), thrust::make_zip_iterator ( thrust:: make_tuple ( indexECM.begin(), fBendCenterX.begin(), fBendCenterY.begin()))+numNodesECM, thrust::make_zip_iterator ( thrust::make_tuple ( bendSpringForceECMX.begin(), bendSpringForceECMY.begin())), SumBendForce(fBendLeftXAddr,fBendLeftYAddr,fBendRightXAddr,fBendRightYAddr,numNodesECM)); } void SceECM::CalCellForcesOnECM(double mitoticThreshold) { bool* nodeIsActiveAddr= thrust::raw_pointer_cast ( & (nodesPointerECM->getInfoVecs().nodeIsActive[0])) ; int * adhPairECM_CellAddr= thrust::raw_pointer_cast ( &adhPairECM_Cell[0]) ; //Old locations are chosen to make sure action-reaction balance of forces between ECM and cell nodes are fully satisfied. double* nodeCellLocXAddr= thrust::raw_pointer_cast ( &nodeCellLocXOld[0]) ; double* nodeCellLocYAddr= thrust::raw_pointer_cast ( &nodeCellLocYOld[0]) ; // bool* isActiveECM = thrust::raw_pointer_cast( // &isActiveECM[0]); double* nodeCellGrowProAddr = thrust::raw_pointer_cast( &nodesPointerECM->getInfoVecs().nodeGrowPro[0]); int numCells = cellsPointerECM->getCellInfoVecs().basalLocX.size() ; thrust:: transform ( thrust::make_zip_iterator ( thrust:: make_tuple ( indexECM.begin(), nodeECMLocX.begin(), nodeECMLocY.begin(), cellNeighborId.begin())), thrust::make_zip_iterator ( thrust:: make_tuple ( indexECM.begin(), nodeECMLocX.begin(), nodeECMLocY.begin(), cellNeighborId.begin()))+numNodesECM, thrust::make_zip_iterator ( thrust::make_tuple ( memMorseForceECMX.begin(), memMorseForceECMY.begin(), morseEnergy.begin(), adhEnergy.begin())), MorseAndAdhForceECM(numCells,maxAllNodePerCell,maxMembrNodePerCell,nodeCellLocXAddr,nodeCellLocYAddr,nodeIsActiveAddr,adhPairECM_CellAddr/*, isActiveECM*/, nodeCellGrowProAddr, mitoticThreshold)); } void SceECM::CalSumForcesOnECM() { double dummy=0.0 ; thrust:: transform ( thrust::make_zip_iterator ( thrust:: make_tuple ( linSpringForceECMX.begin(), linSpringForceECMY.begin(), bendSpringForceECMX.begin(), bendSpringForceECMY.begin(), memMorseForceECMX.begin(), memMorseForceECMY.begin())), thrust::make_zip_iterator ( thrust:: make_tuple ( linSpringForceECMX.begin(), linSpringForceECMY.begin(), bendSpringForceECMX.begin(), bendSpringForceECMY.begin(), memMorseForceECMX.begin(), memMorseForceECMY.begin()))+numNodesECM, thrust::make_zip_iterator ( thrust::make_tuple ( totalForceECMX.begin(), totalForceECMY.begin())), TotalECMForceCompute(dummy)); } void SceECM::CalSumOnlyExplicitForcesOnECM() { thrust:: transform ( thrust::make_zip_iterator ( thrust:: make_tuple ( bendSpringForceECMX.begin(), bendSpringForceECMY.begin(), memMorseForceECMX.begin(), memMorseForceECMY.begin())), thrust::make_zip_iterator ( thrust:: make_tuple ( bendSpringForceECMX.begin(), bendSpringForceECMY.begin(), memMorseForceECMX.begin(), memMorseForceECMY.begin()))+numNodesECM, thrust::make_zip_iterator ( thrust::make_tuple ( totalExplicitForceECMX.begin(), totalExplicitForceECMY.begin())), TotalExplicitECMForceCompute()); } void SceECM::CalRHS(double dt) { thrust:: transform ( thrust::make_zip_iterator ( thrust:: make_tuple ( totalExplicitForceECMX.begin(), totalExplicitForceECMY.begin(), nodeECMLocX.begin(), nodeECMLocY.begin(), dampCoef.begin())), thrust::make_zip_iterator ( thrust:: make_tuple ( totalExplicitForceECMX.begin(), totalExplicitForceECMY.begin(), nodeECMLocX.begin(), nodeECMLocY.begin(), dampCoef.begin()))+numNodesECM, thrust::make_zip_iterator ( thrust::make_tuple ( rHSX.begin(), rHSY.begin())), RHSCompute(dt)); } void SceECM::MoveNodesBySumAllForces(double dt) { // move the nodes of ECM thrust:: transform ( thrust::make_zip_iterator ( thrust:: make_tuple ( nodeECMLocX.begin(), nodeECMLocY.begin(), totalForceECMX.begin(), totalForceECMY.begin(), dampCoef.begin())), thrust::make_zip_iterator ( thrust:: make_tuple ( nodeECMLocX.begin(), nodeECMLocY.begin(), totalForceECMX.begin(), totalForceECMY.begin(), dampCoef.begin()))+numNodesECM, thrust::make_zip_iterator ( thrust::make_tuple ( nodeECMLocX.begin(), nodeECMLocY.begin())), MoveNodesECM(dt)); } void SceECM::FindNeighborCandidateForCellsAndECMNodes() { double* nodeECMLocXAddr= thrust::raw_pointer_cast ( &nodeECMLocX[0]) ; double* nodeECMLocYAddr= thrust::raw_pointer_cast ( &nodeECMLocY[0]) ; double * basalCellLocXAddr= thrust::raw_pointer_cast ( & ( cellsPointerECM->getCellInfoVecs().basalLocX[0]) ) ; double * basalCellLocYAddr= thrust::raw_pointer_cast ( & ( cellsPointerECM->getCellInfoVecs().basalLocY[0]) ) ; EType* peripORexcmAddr= thrust::raw_pointer_cast ( &peripORexcm[0]) ; // bool * isActiveECMAddr = thrust::raw_pointer_cast( // &isActiveECM[0]); int numCells = cellsPointerECM->getCellInfoVecs().basalLocX.size() ; //// find the closest ECM node to each each cell // thrust:: transform ( thrust::make_zip_iterator ( thrust:: make_tuple ( cellsPointerECM->getCellInfoVecs().basalLocX.begin(), cellsPointerECM->getCellInfoVecs().basalLocY.begin())), thrust::make_zip_iterator ( thrust:: make_tuple ( cellsPointerECM->getCellInfoVecs().basalLocX.begin(), cellsPointerECM->getCellInfoVecs().basalLocY.begin()))+numCells, cellsPointerECM->getCellInfoVecs().eCMNeighborId.begin(), FindECMNeighborPerCell(nodeECMLocXAddr,nodeECMLocYAddr,maxTotalNodes));//, isActiveECMAddr)); thrust:: transform ( thrust::make_zip_iterator ( thrust:: make_tuple ( nodeECMLocX.begin(), nodeECMLocY.begin())), // isActiveECM.begin())), thrust::make_zip_iterator ( thrust:: make_tuple ( nodeECMLocX.begin(), nodeECMLocY.begin()))+numNodesECM, // isActiveECM.begin()))+numNodesECM, cellNeighborId.begin(), FindCellNeighborPerECMNode(basalCellLocXAddr,basalCellLocYAddr, numCells)); } void SceECM::AssignDampCoef() { thrust::transform ( peripORexcm.begin() ,peripORexcm.begin() +numNodesECM, dampCoef.begin(), AssignDamping(dampBasal,dampBC,dampApical) ); #ifdef debugModeECM for (int i=0 ; i<numNodesECM ; i++) { if (dampCoef[i] < smallNumber) { cout << "damping of element " << i << " is " << dampCoef[i] << " which is wrong" <<endl ; throw::invalid_argument ( "damping coefficients in ECM is not set correctly") ; } } #endif } int SceECM::decideIfAddECMNode_M(uint numECMNodes) { // decide if add ecm node given current active node count // uint maxECMNode = numNodesECM*3; // bool isInitPhase= nodes->isInitPhase ; // thrust::transform( // thrust::make_zip_iterator( // thrust::make_tuple(maxDistToRiVec.begin(), // maxTenIndxTypeVec.begin() // )), // thrust::make_zip_iterator( // thrust::make_tuple(maxDistToRiVec.begin(), // maxTenIndxTypeVec.begin() // )) // + curActCellCt, // isECMAddingNode, // ECMGrowFunc(numECMNodes, maxTotalNodes, maxLengthToAddECMNodes)); double max_Dist = -9999.9; int indx = -1; for (int i = 0; i < numECMNodes; i++){ if (i != numECMNodes-1){ double tmp_Dist = sqrt((nodeECMLocX[i+1] - nodeECMLocX[i])*(nodeECMLocX[i+1] - nodeECMLocX[i]) + (nodeECMLocY[i+1] - nodeECMLocY[i])*(nodeECMLocY[i+1] - nodeECMLocY[i])); if (tmp_Dist > max_Dist && tmp_Dist > 2.0*lknotECMPeripGPU){ max_Dist = tmp_Dist; indx = i; } } else{ double tmp_Dist = sqrt((nodeECMLocX[0] - nodeECMLocX[i])*(nodeECMLocX[0] - nodeECMLocX[i]) + (nodeECMLocY[0] - nodeECMLocY[i])*(nodeECMLocY[0] - nodeECMLocY[i])); if (tmp_Dist > max_Dist && tmp_Dist > 2.0*lknotECMPeripGPU){ max_Dist = tmp_Dist; indx = i; } } } return indx; } void SceECM::AddECMNode(int indx, uint numECMNodes){ std::cout<<"insertIndx = "<<indx+1<<std::endl; std::cout<<"numECMNodes = "<<numECMNodes<<std::endl; uint insertIndx = indx + 1; // newIndex double insertX, insertY; insertX = (nodeECMLocX[indx] + nodeECMLocX[indx+1])/2.0; insertY = (nodeECMLocY[indx] + nodeECMLocY[indx+1])/2.0; uint globalIndxEnd = numECMNodes; //membrane nodes are first. End position based on newindex uint globalIndexInsert = insertIndx; // if (insertIndx<=iDApical) { //since the current acrive membrane nodes is one more, it can not be the last ID. // iDApical=iDApical+1 ; // } // if (insertIndx<=iDBasal) { // iDBasal=iDBasal+1 ; // } for (uint i = globalIndxEnd; i >= globalIndexInsert; i--) { // isActiveECM[i] = isActiveECM[i - 1]; nodeECMLocX[i] = nodeECMLocX[i - 1]; nodeECMLocY[i] = nodeECMLocY[i - 1]; peripORexcm[i] = peripORexcm[i-1] ; sponLen[i] = sponLen[i-1]; dampCoef[i] = dampCoef[i-1]; } // isActiveECM[globalIndexInsert] = true; nodeECMLocX[globalIndexInsert] = insertX; std::cout<<"nodeECMLocX[globalIndexInsert] : "<<nodeECMLocX[globalIndexInsert]<<std::endl; nodeECMLocY[globalIndexInsert] = insertY; std::cout<<"nodeECMLocY[globalIndexInsert] : "<<nodeECMLocY[globalIndexInsert]<<std::endl; peripORexcm[globalIndexInsert] = peripORexcm[globalIndexInsert-1]; // to have the same type of the membrane node as at least one of its neighbors std::cout<<"peripORexcm[globalIndexInsert] : "<<peripORexcm[globalIndexInsert]<<std::endl; sponLen[globalIndexInsert] = sponLen[globalIndexInsert-1]; std::cout<<"sponLen[globalIndexInsert] : "<<sponLen[globalIndexInsert]<<std::endl; dampCoef[globalIndexInsert] = dampCoef[globalIndexInsert-1]; std::cout<<"dampCoef[globalIndexInsert] : "<<dampCoef[globalIndexInsert]<<std::endl; // if (_memNodeType[globalIndexInsert-1] != apical1 || _memNodeType[globalIndexInsert-1] != basal1){ // _memNodeType[globalIndexInsert] = _memNodeType[globalIndexInsert+1]; // } //return (curActCount + 1); // numECMNodes += 1; }; // void SceECM::calECMGrowSpeed_M() { // // reduce_by_key, find value of max tension and their index // thrust::counting_iterator<uint> iBegin(0); // uint maxNPerCell = allocPara_m.maxAllNodePerCell; // thrust::transform( // thrust::make_zip_iterator( // thrust::make_tuple( // ECMDistToRi.begin(), // ECMNodeType1.begin())), // thrust::make_zip_iterator( // thrust::make_tuple( // ECMbrDistToRi.begin(), // ECMNodeType1.begin())) // + numECMNodes, // thrust::make_zip_iterator( // thrust::make_tuple(maxDistToRiVec.begin(), // maxTenIndxTypeVec.begin())), // thrust::equal_to<uint>(), MaxWInfo()); // for (int i=0 ; i<cellInfoVecs.maxDistToRiVec.size() ; i++) { // cout << "the max distance in cell" << i << " is "<<cellInfoVecs.maxDistToRiVec[i] << endl ; // cout << "At index "<<cellInfoVecs.maxTenIndxVec[i]<<std::endl; // } // }
8f3da28d641f54c039d9608b84441e8aa6a43955.cu
#include "SceECM.h" #include "SceCells.h" // Because of forward declaration //# define debugModeECM // bending stiffness is given inside the code. It should be given as in input from a txt file. //isInitPhase bool variable is not active anymore. //Right now it is assumed that ECM stiffness is the same everywhere. __constant__ double sceInterCell_ECM[5]; //__constant__ double wLCPara_ECM[4]; __constant__ double restLenECMAdhSpringGPU ; __constant__ double maxLenECMAdhSpringGPU ; __constant__ double kAdhECMGPU ; __constant__ double stiffnessECMBasalGPU ; __constant__ double stiffnessECMBCGPU ; __constant__ double stiffnessECMPeripGPU ; __constant__ double lknotECMBasalGPU ; __constant__ double lknotECMBCGPU ; __constant__ double lknotECMPeripGPU ; const double smallNumber=.000001 ; namespace patch{ template <typename T> std::string to_string (const T& n) { std:: ostringstream stm ; stm << n ; return stm.str() ; } } __device__ void DefineECMStiffnessAndLknot ( EType nodeType, double & stiffness, double & sponLen) { if (nodeType==excm) { stiffness=stiffnessECMBasalGPU ; sponLen=lknotECMBasalGPU ; } if (nodeType==perip) { stiffness=stiffnessECMPeripGPU ; sponLen=lknotECMPeripGPU ; } if (nodeType==bc2) { stiffness=stiffnessECMBCGPU; sponLen=lknotECMBCGPU ; } } __device__ double calMorse_ECM(const double& linkLength ) { double forceValue=0.0 ; if (linkLength > sceInterCell_ECM[4]) { forceValue = 0; } else { forceValue = -sceInterCell_ECM[0] / sceInterCell_ECM[2] * exp(-linkLength / sceInterCell_ECM[2]) + sceInterCell_ECM[1] / sceInterCell_ECM[3] * exp(-linkLength / sceInterCell_ECM[3]); // if (forceValue > 0) { // forceValue = 0; // } } return (forceValue) ; } __device__ double calMorse_ECM_mitotic(const double& linkLength, double scaling ) { double forceValue=0.0 ; if (linkLength > sceInterCell_ECM[4]) { forceValue = 0; } else { forceValue = (1.0+(3.0-1.0)*scaling)*(-sceInterCell_ECM[0] / sceInterCell_ECM[2] * exp(-linkLength / sceInterCell_ECM[2]) + sceInterCell_ECM[1] / sceInterCell_ECM[3] * exp(-linkLength / sceInterCell_ECM[3])); // forceValue = -(1.0+(2.0-1.0)*scaling)*sceInterCell_ECM[0] / sceInterCell_ECM[2] // * exp(-linkLength / sceInterCell_ECM[2]) // + (1.0+(2.0-1.0)*scaling)*sceInterCell_ECM[1] / sceInterCell_ECM[3] // * exp(-linkLength / sceInterCell_ECM[3]); // if (forceValue > 0) { // forceValue = 0; // } } return (forceValue) ; } __device__ double calMorseEnergy_ECM(const double& linkLength ) { double energyValue=0.0 ; if (linkLength > sceInterCell_ECM[4]) { energyValue = 0; } else { energyValue = sceInterCell_ECM[0]* exp(-linkLength / sceInterCell_ECM[2]) - sceInterCell_ECM[1]* exp(-linkLength / sceInterCell_ECM[3]); } return (energyValue) ; } /* __device__ double calWLC_ECM(const double& linkLength ) { double x=linkLength/wLCPara_ECM[0] ; return (wLCPara_ECM[1]*( 6*x+ ( x*x*(3.0-2*x))/( (1-x)*(1-x) ) ) -wLCPara_ECM[2]/pow(linkLength,wLCPara_ECM[3]) ) ; } */ __device__ bool IsValidAdhPair(const double& dist ) { if (dist > restLenECMAdhSpringGPU && dist < maxLenECMAdhSpringGPU){ return true ; } else { return false ; } } __device__ bool IsValidAdhPairForNotInitPhase(const double& dist ) { if (dist > restLenECMAdhSpringGPU){ return true ; } else { return false ; } } __device__ double CalAdhECM(const double& dist ) { return (kAdhECMGPU*(dist-restLenECMAdhSpringGPU)); // in the function IsValid pair, distance already checked to be greater than neutral length } __device__ double CalAdhEnergy(const double& dist ) { return (0.5*kAdhECMGPU*(dist-restLenECMAdhSpringGPU)*(dist-restLenECMAdhSpringGPU)); // in the function IsValid pair, distance already checked to be greater than neutral length } EType SceECM:: ConvertStringToEType(string eNodeRead) { if (eNodeRead=="perip") { return perip ; } else if (eNodeRead=="bc2") { return bc2 ; } else if (eNodeRead=="excm") { return excm ; } else { cout << "Error in defining type of external nodes" << endl ; return excm ;// To just return something to avoid compiler complain } } SceECM::SceECM() { isECMNeighborSet=false ; eCMRemoved=false ; isECMNeighborResetPostDivision = false; } void SceECM::Initialize(uint maxAllNodePerCellECM, uint maxMembrNodePerCellECM, uint maxTotalNodesECM, int freqPlotData, string uniqueSymbol) { maxAllNodePerCell=maxAllNodePerCellECM ; maxMembrNodePerCell= maxMembrNodePerCellECM ; // maxTotalNodes=maxTotalNodesECM ; //Ali this->freqPlotData=freqPlotData ; this->uniqueSymbol=uniqueSymbol ; std::fstream readCoord_ECM ; std::fstream readInput_ECM ; int numberNodes_ECM ; double tmpPosX_ECM,tmpPosY_ECM ; vector<double> posXIni_ECM,posYIni_ECM ; vector <EType> eNodeVec ; int resumeSimulation = globalConfigVars.getConfigValue( "ResumeSimulation").toInt(); if (resumeSimulation==0) { cout << " In the ECM module, I am in start mode" << endl ; readCoord_ECM.open("./resources/coordinate_ECM21.txt") ; } else if(resumeSimulation==1) { cout << " In the ECM module, I am in resume mode" << endl ; std::string secondInputFileName = "./resources/DataFileECM_" + uniqueSymbol + "Resume.cfg"; readCoord_ECM.open(secondInputFileName.c_str()) ; } else{ throw std::invalid_argument(" ResumeSimulation parameter in the input file must be either 1 or 0. Error from ECM module"); } if (readCoord_ECM.is_open()) { cout << "ECM coordinates file opened successfully" <<endl ; } else { cout << "ECM coordinates file is not opened successfully" << endl ; } string inputInfoText ; string eNodeRead ; readCoord_ECM>>numberNodes_ECM ; for (int i=0 ; i<numberNodes_ECM ; i++){ readCoord_ECM>>tmpPosX_ECM>>tmpPosY_ECM>>eNodeRead ; posXIni_ECM.push_back(tmpPosX_ECM) ; posYIni_ECM.push_back(tmpPosY_ECM) ; EType eNode=ConvertStringToEType(eNodeRead) ; eNodeVec.push_back(eNode) ; } readInput_ECM.open("./resources/ECM_input.txt") ; if (readInput_ECM.is_open()) { cout << "ECM Mech input opened successfully" <<endl ; } else { cout << "ECM Mech input is not opened successfully" << endl ; } readInput_ECM>> inputInfoText ; for (int i=0 ; i<5; i++) { readInput_ECM>> mechPara_ECM.sceInterCellCPU_ECM[i] ; //=39.0 ; } // readInput_ECM>>restLenECMSpring ; // readInput_ECM>>eCMLinSpringStiff ; readInput_ECM>>restLenECMAdhSpring ; readInput_ECM>>maxLenECMAdhSpring ; readInput_ECM>>kAdhECM ; //for ( int i=0 ; i<4 ; i++) { // readInput_ECM>>mechPara_ECM.wLCParaCPU_ECM[i] ; // } std::fstream secondInput_ECM ; std:: string secondInputInfo ; //dummy std::string secondInputFileName = "./resources/ECM_" + uniqueSymbol + "input.cfg"; secondInput_ECM.open(secondInputFileName.c_str()) ; //secondInput_ECM.open("./resources/ECM_N01G00_input.cfg" ) ; if (secondInput_ECM.is_open()) { cout << "Second ECM Mech input opened successfully" <<endl ; } else { cout << "Second ECM Mech input is not opened successfully" << endl ; } secondInput_ECM>>secondInputInfo ; // just for information no use in the code secondInput_ECM>>stiffnessECMBasal ; secondInput_ECM>>stiffnessECMBC ; secondInput_ECM>>stiffnessECMPerip ; secondInput_ECM>>lknotECMBasal ; secondInput_ECM>>lknotECMBC ; secondInput_ECM>>lknotECMPerip ; secondInput_ECM>>dampBasal ; secondInput_ECM>>dampBC ; secondInput_ECM>>dampApical ; cout <<" stiffness of ECM at the basal side is="<<stiffnessECMBasal <<endl ; cout <<" stiffness of ECM at boundary is="<<stiffnessECMBC<<endl ; cout <<" stiffness of ECM peripodial side is="<<stiffnessECMPerip<<endl ; cout <<" rest len basal ECM is="<<lknotECMBasal<<endl ; cout <<" rest len boundary ECM is= "<<lknotECMBC<<endl ; cout << "rest len peripodial ECM is=" <<lknotECMPerip <<endl ; cout << "Damping for basal ECM is="<<dampBasal<<endl ; cout << "Damping for boundary ECM is= "<<dampBC<<endl ; cout << "Damping for peripodial ECM is=" <<dampApical <<endl ; cout << "number of ECM nodes is"<< numberNodes_ECM <<endl ; maxTotalNodes = numberNodes_ECM*2; cout << "max number of ECM nodes is"<< maxTotalNodes <<endl ; for (int i=0 ; i<5; i++) { cout <<"Morse parameter number"<<i<<" is " <<mechPara_ECM.sceInterCellCPU_ECM[i]<<endl ; } //cout <<"rest length of ECM spring is "<<restLenECMSpring<<endl ; // cout <<"ECM spring stiffness is "<<eCMLinSpringStiff<<endl ; cout <<"ECM Membrane neutral adhesion length is "<<restLenECMAdhSpring<<endl ; cout <<"ECM Membrane max adhesion length is "<<maxLenECMAdhSpring<<endl ; cout <<"ECM Membrane adhesion stiffness is "<<kAdhECM<<endl ; cout << "ECM only applies adhesvie force" << endl ; //for ( int i=0 ; i<4 ; i++) { // cout<<"wLC parameter "<< i << " is "<<mechPara_ECM.wLCParaCPU_ECM[i]<<endl ; ; //} cudaMemcpyToSymbol(sceInterCell_ECM,mechPara_ECM.sceInterCellCPU_ECM ,5*sizeof(double)); //cudaMemcpyToSymbol(wLCPara_ECM,mechPara_ECM.wLCParaCPU_ECM // ,4*sizeof(double)); cudaMemcpyToSymbol(restLenECMAdhSpringGPU, &restLenECMAdhSpring,sizeof(double)); cudaMemcpyToSymbol(maxLenECMAdhSpringGPU, &maxLenECMAdhSpring,sizeof(double)); cudaMemcpyToSymbol(kAdhECMGPU, &kAdhECM,sizeof(double)); cudaMemcpyToSymbol(stiffnessECMPeripGPU, &stiffnessECMPerip,sizeof(double)); cudaMemcpyToSymbol(stiffnessECMBCGPU, &stiffnessECMBC,sizeof(double)); cudaMemcpyToSymbol(stiffnessECMBasalGPU, &stiffnessECMBasal,sizeof(double)); cudaMemcpyToSymbol(lknotECMPeripGPU, & lknotECMPerip,sizeof(double)); cudaMemcpyToSymbol(lknotECMBCGPU, & lknotECMBC,sizeof(double)); cudaMemcpyToSymbol(lknotECMBasalGPU, & lknotECMBasal,sizeof(double)); counter=100000 ; //large number lastPrintECM=1000000 ; // large number outputFrameECM=0 ; numNodesECM= numberNodes_ECM ; //(eCMMaxX-eCMMinX)/eCMMinDist ; // indexECM.resize(numNodesECM,0) ; // peripORexcm.resize(numNodesECM,perip) ; // dampCoef.resize(numNodesECM) ; // nodeECMLocX.resize(numNodesECM,0.0) ; // nodeECMLocY.resize(numNodesECM,0.0) ; // cellNeighborId.resize(numNodesECM,-1) ; // stiffLevel.resize(numNodesECM) ; // sponLen.resize(numNodesECM) ; // linSpringForceECMX.resize(numNodesECM,0.0); // linSpringForceECMY.resize(numNodesECM,0.0); // linSpringAvgTension.resize(numNodesECM,0.0); // linSpringEnergy.resize(numNodesECM,0.0); // morseEnergy.resize(numNodesECM,0.0); // adhEnergy.resize(numNodesECM,0.0); // bendSpringForceECMX.resize(numNodesECM,0.0); // bendSpringForceECMY.resize(numNodesECM,0.0); // memMorseForceECMX.resize(numNodesECM,0.0); // memMorseForceECMY.resize(numNodesECM,0.0); // fBendCenterX.resize(numNodesECM,0.0); // fBendCenterY.resize(numNodesECM,0.0); // fBendLeftX.resize(numNodesECM,0.0); // fBendLeftY.resize(numNodesECM,0.0); // fBendRightX.resize(numNodesECM,0.0); // fBendRightY.resize(numNodesECM,0.0); // totalForceECMX.resize(numNodesECM,0.0); // totalForceECMY.resize(numNodesECM,0.0); // totalExplicitForceECMX.resize(numNodesECM,0.0); // totalExplicitForceECMY.resize(numNodesECM,0.0); // rHSX.resize(numNodesECM,0.0); // rHSY.resize(numNodesECM,0.0); // //memNodeType.resize(maxTotalNodes,notAssigned1) ; // nodeIsActive.resize(numNodesECM,true) ; // thrust::sequence (indexECM.begin(),indexECM.begin()+numNodesECM); indexECM.resize(maxTotalNodes,0) ; peripORexcm.resize(maxTotalNodes,perip) ; dampCoef.resize(maxTotalNodes) ; nodeECMLocX.resize(maxTotalNodes,0.0) ; nodeECMLocY.resize(maxTotalNodes,0.0) ; // isActiveECM.resize(maxTotalNodes,false); cellNeighborId.resize(maxTotalNodes,-1) ; stiffLevel.resize(maxTotalNodes) ; sponLen.resize(maxTotalNodes) ; linSpringForceECMX.resize(maxTotalNodes,0.0); linSpringForceECMY.resize(maxTotalNodes,0.0); linSpringAvgTension.resize(maxTotalNodes,0.0); linSpringEnergy.resize(maxTotalNodes,0.0); morseEnergy.resize(maxTotalNodes,0.0); adhEnergy.resize(maxTotalNodes,0.0); bendSpringForceECMX.resize(maxTotalNodes,0.0); bendSpringForceECMY.resize(maxTotalNodes,0.0); memMorseForceECMX.resize(maxTotalNodes,0.0); memMorseForceECMY.resize(maxTotalNodes,0.0); fBendCenterX.resize(maxTotalNodes,0.0); fBendCenterY.resize(maxTotalNodes,0.0); fBendLeftX.resize(maxTotalNodes,0.0); fBendLeftY.resize(maxTotalNodes,0.0); fBendRightX.resize(maxTotalNodes,0.0); fBendRightY.resize(maxTotalNodes,0.0); totalForceECMX.resize(maxTotalNodes,0.0); totalForceECMY.resize(maxTotalNodes,0.0); totalExplicitForceECMX.resize(maxTotalNodes,0.0); totalExplicitForceECMY.resize(maxTotalNodes,0.0); rHSX.resize(maxTotalNodes,0.0); rHSY.resize(maxTotalNodes,0.0); //memNodeType.resize(maxTotalNodes,notAssigned1) ; nodeIsActive.resize(maxTotalNodes,true) ; thrust::sequence (indexECM.begin(),indexECM.begin()+maxTotalNodes); thrust::copy(posXIni_ECM.begin(),posXIni_ECM.end(),nodeECMLocX.begin()) ; thrust::copy(posYIni_ECM.begin(),posYIni_ECM.end(),nodeECMLocY.begin()) ; thrust::copy(eNodeVec.begin(),eNodeVec.end(),peripORexcm.begin()) ; // for (int i = 0; i < posXIni_ECM.size(); i++){ // isActiveECM[i] == true; // } AssignDampCoef() ; cout << "GPU level initial coordinates and type of external nodes are: " << endl ; for (int i=0; i<nodeECMLocX.size() ; i++) { cout<< nodeECMLocX[i]<<", "<<nodeECMLocY[i]<<", "<<peripORexcm[i] << endl; } PrintECM(0.0) ; std::string cSVFileName = "./ECMFolder/EnergyExport_" + uniqueSymbol + ".CSV"; ofstream EnergyExport ; EnergyExport.open(cSVFileName.c_str()); EnergyExport <<"Time,"<<"TotalMorseEnergyECM," << "TotalAdhEnergyECM,"<<"TotalLinSpringEnergy,"<<"TotalEnergy, " <<"TotalEnergyDerivative"<< std::endl; } //initilaization function finished void SceECM:: ApplyECMConstrain(int currentActiveCellCount, int totalNodeCountForActiveCellsECM, double curTime, double dt, double Damp_CoefCell, bool cellPolar, bool subCellPolar, bool isInitPhase, double mitoticThreshold){ bool implicit_solver_active = false ; if (eCMRemoved) { PrintECMRemoved(curTime); cout << "ECM is removed" << endl ; return ; } #ifdef debugModeECM cudaEvent_t start1, start2, start3, start4, start5, start6, start7, start8, stop; float elapsedTime1, elapsedTime2, elapsedTime3, elapsedTime4, elapsedTime5, elapsedTime6, elapsedTime7 , elapsedTime8 ; cudaEventCreate(&start1); cudaEventCreate(&start2); cudaEventCreate(&start3); cudaEventCreate(&start4); cudaEventCreate(&start5); cudaEventCreate(&start6); cudaEventCreate(&start7); cudaEventCreate(&start8); cudaEventCreate(&stop); cudaEventRecord(start1, 0); #endif nodeCellLocXOld.resize(totalNodeCountForActiveCellsECM) ; nodeCellLocYOld.resize(totalNodeCountForActiveCellsECM) ; adhPairECM_Cell.resize(totalNodeCountForActiveCellsECM,-1) ; morseEnergyCell.resize(totalNodeCountForActiveCellsECM,0.0); adhEnergyCell.resize(totalNodeCountForActiveCellsECM,0.0); thrust::copy(nodesPointerECM->getInfoVecs().nodeLocX.begin(),nodesPointerECM->getInfoVecs().nodeLocX.begin()+totalNodeCountForActiveCellsECM,nodeCellLocXOld.begin()) ; thrust::copy(nodesPointerECM->getInfoVecs().nodeLocY.begin(),nodesPointerECM->getInfoVecs().nodeLocY.begin()+totalNodeCountForActiveCellsECM,nodeCellLocYOld.begin()) ; #ifdef debugModeECM cudaEventRecord(start2, 0); cudaEventSynchronize(start2); cudaEventElapsedTime(&elapsedTime1, start1, start2); #endif // std::cout<<"ERROR 1"<<std::endl; thrust:: transform (peripORexcm.begin(), peripORexcm.begin()+numNodesECM, thrust::make_zip_iterator (thrust::make_tuple (stiffLevel.begin(),sponLen.begin())),MechProp()); // cout << " Mechanical properties after assignment is " << stiffLevel[0] << endl ; counter ++ ; //if (counter>=100 || curTime<(100*dt) || isECMNeighborSet==false) { if (curTime<(100*dt) || isECMNeighborSet==false) { isECMNeighborSet=true ; counter=0 ; FindNeighborCandidateForCellsAndECMNodes(); // int ECMAdd_indx = decideIfAddECMNode_M(numNodesECM); // if (ECMAdd_indx >= 0){ // AddECMNode(ECMAdd_indx, numNodesECM); // numNodesECM += 1; // std::cout<<"New ecm node added!"<<std::endl; // std::cout<<"Current numECMNodes : "<<numNodesECM<<std::endl; // } // FindNeighborCandidateForCellsAndECMNodes(); // for (int i = 0; i < numNodesECM; i++){ // std::cout<<cellNeighborId[i]<<std::endl; // } } // std::cout<<"ERROR 2"<<std::endl; if (cellsPointerECM->getCellInfoVecs().isPostDivision ==true || cellsPointerECM->getCellInfoVecs().isPostAddMembrNodes == true){ std::cout<<"Resetting ecm and cell neighbor info! post division!"<<std::endl; int ECMAdd_indx = decideIfAddECMNode_M(numNodesECM); // for (int i = 0; i < 5; i++){ if (ECMAdd_indx >= 0){ AddECMNode(ECMAdd_indx, numNodesECM); numNodesECM += 1; std::cout<<"New ecm node added!"<<std::endl; std::cout<<"Current numECMNodes : "<<numNodesECM<<std::endl; } // } FindNeighborCandidateForCellsAndECMNodes(); isECMNeighborResetPostDivision=true; // for (int i = 0; i < numNodesECM; i++){ // std::cout<<cellNeighborId[i]<<std::endl; // } // std::cout<<"SponLen : "<<std::endl; // for (int i = 0; i < numNodesECM; i++){ // std::cout<<sponLen[i]<<std::endl; // } } // std::cout<<"ERROR 3"<<std::endl; #ifdef debugModeECM cudaEventRecord(start3, 0); cudaEventSynchronize(start3); cudaEventElapsedTime(&elapsedTime2, start2, start3); #endif MoveCellNodesByECMForces(totalNodeCountForActiveCellsECM,currentActiveCellCount,dt, Damp_CoefCell, mitoticThreshold) ; // std::cout<<"ERROR 4"<<std::endl; /* To reduce computational cost energyECM.totalMorseEnergyCellECM = thrust::reduce( morseEnergyCell.begin(),morseEnergyCell.begin()+totalNodeCountForActiveCellsECM,(double) 0.0, thrust::plus<double>() ); energyECM.totalAdhEnergyCellECM = thrust::reduce( adhEnergyCell.begin() ,adhEnergyCell.begin() +totalNodeCountForActiveCellsECM,(double) 0.0, thrust::plus<double>() ); */ CalLinSpringForce(); // std::cout<<"ERROR 5"<<std::endl; CalBendSpringForce(); // std::cout<<"ERROR 6"<<std::endl; #ifdef debugModeECM cudaEventRecord(start4, 0); cudaEventSynchronize(start4); cudaEventElapsedTime(&elapsedTime3, start3, start4); #endif CalCellForcesOnECM(mitoticThreshold) ; // std::cout<<"ERROR 7"<<std::endl; //energyECM.totalLinSpringEnergyECM = 0.5 * ( thrust::reduce( linSpringEnergy.begin(),linSpringEnergy.begin()+numNodesECM,(double) 0.0, thrust::plus<double>() )); //to make sure it is based on the distance used for action force calculation. /* To reduce computational cost energyECM.totalMorseEnergyECMCell = thrust::reduce( morseEnergy.begin(),morseEnergy.begin()+numNodesECM,(double) 0.0, thrust::plus<double>() ); energyECM.totalAdhEnergyECMCell = thrust::reduce( adhEnergy.begin() ,adhEnergy.begin() +numNodesECM,(double) 0.0, thrust::plus<double>() ); */ if (!implicit_solver_active) { // Calculate summation of all forces and move nodes if explicit solver is going to be used CalSumForcesOnECM() ; MoveNodesBySumAllForces(dt) ; } // std::cout<<"ERROR 8"<<std::endl; // if (implicit_solver_active) { // //Calculate right hand side of implicit solver which includes explicit forces // CalSumOnlyExplicitForcesOnECM() ; // CalRHS(dt) ; // } #ifdef debugModeECM cudaEventRecord(start5, 0); cudaEventSynchronize(start5); cudaEventElapsedTime(&elapsedTime4, start4, start5); #endif //Create tmp CPU vectors for using in implicit solver. Declariation is not acceptable to be inisde the if condition // vector <double> tmpRHSX(numNodesECM); // vector <double> tmpRHSY(numNodesECM); // tmpHostNodeECMLocX.resize(numNodesECM); // tmpHostNodeECMLocY.resize(numNodesECM); // if (implicit_solver_active) { // // Copy ECM locations from GPU to CPU if implicit solver is used // thrust::copy (rHSX.begin(), rHSX.begin()+numNodesECM, tmpRHSX.begin()); // thrust::copy (rHSY.begin(), rHSY.begin()+numNodesECM, tmpRHSY.begin()); // thrust::copy (nodeECMLocX.begin(), nodeECMLocX.begin()+numNodesECM, tmpHostNodeECMLocX.begin()); // thrust::copy (nodeECMLocY.begin(), nodeECMLocY.begin()+numNodesECM, tmpHostNodeECMLocY.begin()); // //cout << "max RHSX is " << *max_element(tmpRHSX.begin(), tmpRHSX.begin()+numNodesECM) << endl ; // //cout << "min RHSX is " << *min_element(tmpRHSX.begin(), tmpRHSX.begin()+numNodesECM) << endl ; // //cout << "max RHSY is " << *max_element(tmpRHSY.begin(), tmpRHSY.begin()+numNodesECM) << endl ; // //cout << "min RHSY is " << *min_element(tmpRHSY.begin(), tmpRHSY.begin()+numNodesECM) << endl ; // } #ifdef debugModeECM cudaEventRecord(start6, 0); cudaEventSynchronize(start6); cudaEventElapsedTime(&elapsedTime5, start5, start6); #endif // if (implicit_solver_active) { // // setting up eqaution of motion if implicit solver is used // EquMotionCoef (dt); // } #ifdef debugModeECM cudaEventRecord(start7, 0); cudaEventSynchronize(start7); cudaEventElapsedTime(&elapsedTime6, start6, start7); #endif // if (implicit_solver_active) { // // Fetch the implicit solver and update ECM location if implicit solver is used // tmpHostNodeECMLocX =solverPointer->SOR3DiagPeriodic(nodeIsActive,hCoefLd, hCoefD, hCoefUd,tmpRHSX,indexPrev, indexNext, tmpHostNodeECMLocX); // tmpHostNodeECMLocY =solverPointer->SOR3DiagPeriodic(nodeIsActive,hCoefLd, hCoefD, hCoefUd,tmpRHSY,indexPrev,indexNext, tmpHostNodeECMLocY); // // copy ECM node locations back from CPU to GPU if implicit solver is used // thrust::copy (tmpHostNodeECMLocX.begin(), tmpHostNodeECMLocX.begin()+numNodesECM, nodeECMLocX.begin()); // thrust::copy (tmpHostNodeECMLocY.begin(), tmpHostNodeECMLocY.begin()+numNodesECM, nodeECMLocY.begin()); // } #ifdef debugModeECM cudaEventRecord(start8, 0); cudaEventSynchronize(start8); cudaEventElapsedTime(&elapsedTime7, start7, start8); #endif /* To reduce computational cost cout << "total Morse energy for cell-ECM is= "<< energyECM.totalMorseEnergyCellECM << endl ; cout << "total Morse energy for ECM-cell is= "<< energyECM.totalMorseEnergyECMCell << endl ; cout << "total adhesion energy for cell-ECM is= "<< energyECM.totalAdhEnergyCellECM << endl ; cout << "total adhesion energy for ECM-cell is= "<< energyECM.totalAdhEnergyECMCell << endl ; //assert (abs (energyECM.totalMorseEnergyCellECM-energyECM.totalMorseEnergyECMCell)<1.0) ; //assert (abs (energyECM.totalAdhEnergyCellECM- energyECM.totalAdhEnergyECMCell) <1.0) ; if ( (abs (energyECM.totalMorseEnergyCellECM-energyECM.totalMorseEnergyECMCell)>1.0) || (abs (energyECM.totalAdhEnergyCellECM- energyECM.totalAdhEnergyECMCell) >1.0) ) { cout << "Warning: Action and reaction forces in the ECM do not match each other" << endl ; } */ # ifdef debugModeECM cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&elapsedTime8, start8, stop); std::cout << "time 1 spent in ECM module for moving the membrane node of cells and ECM nodes are: " << elapsedTime1 << endl ; std::cout << "time 2 spent in ECM module for moving the membrane node of cells and ECM nodes are: " << elapsedTime2 << endl ; std::cout << "time 3 spent in ECM module for moving the membrane node of cells and ECM nodes are: " << elapsedTime3 << endl ; std::cout << "time 4 spent in ECM module for moving the membrane node of cells and ECM nodes are: " << elapsedTime4 << endl ; std::cout << "time 5 spent in ECM module for moving the membrane node of cells and ECM nodes are: " << elapsedTime5 << endl ; std::cout << "time 6 spent in ECM module for moving the membrane node of cells and ECM nodes are: " << elapsedTime6 << endl ; std::cout << "time 7 spent in ECM module for moving the membrane node of cells and ECM nodes are: " << elapsedTime7 << endl ; std::cout << "time 8 spent in ECM module for moving the membrane node of cells and ECM nodes are: " << elapsedTime8 << endl ; #endif //throw std::invalid_argument(" Solver called properly and I want to stop the code"); PrintECM(curTime); // std::cout<<"ERROR 9"<<std::endl; } void SceECM:: PrintECM(double curTime) { lastPrintECM=lastPrintECM+1 ; if (lastPrintECM>=freqPlotData) { outputFrameECM++ ; lastPrintECM=0 ; cout << " I am in regular print function" << endl ; // First ECM output file for paraview // std::string vtkFileName = "./ECMFolder/ECM_" + uniqueSymbol +patch::to_string(outputFrameECM-1) + ".vtk"; ofstream ECMOut; ECMOut.open(vtkFileName.c_str()); ECMOut<< "# vtk DataFile Version 3.0" << endl; ECMOut<< "Result for paraview 2d code" << endl; ECMOut << "ASCII" << endl; ECMOut << "DATASET UNSTRUCTURED_GRID" << std::endl; ECMOut << "POINTS " << nodeECMLocX.size() << " float" << std::endl; for (uint i = 0; i < nodeECMLocX.size(); i++) { ECMOut << nodeECMLocX[i] << " " << nodeECMLocY[i] << " " << 0.0 << std::endl; } ECMOut<< std::endl; ECMOut<< "CELLS " << nodeECMLocX.size()<< " " << 3 *nodeECMLocX.size()<< std::endl; for (uint i = 0; i < (nodeECMLocX.size()-1); i++) { ECMOut << 2 << " " << indexECM[i] << " " << indexECM[i+1] << std::endl; } ECMOut << 2 << " " << indexECM[nodeECMLocX.size()-1] << " "<< indexECM[0] << std::endl; //last point to the first point ECMOut << "CELL_TYPES " << nodeECMLocX.size()<< endl; for (uint i = 0; i < nodeECMLocX.size() ; i++) { ECMOut << "3" << endl; } ECMOut << "POINT_DATA "<<nodeECMLocX.size() <<endl ; ECMOut << "SCALARS Avg_Tension " << "float"<< endl; ECMOut << "LOOKUP_TABLE " << "default"<< endl; for (uint i = 0; i < nodeECMLocX.size(); i++) { ECMOut<<linSpringAvgTension[i] <<endl ; } ECMOut << "SCALARS Node_Type " << "float"<< endl; ECMOut << "LOOKUP_TABLE " << "default"<< endl; for (uint i = 0; i < nodeECMLocX.size(); i++) { ECMOut<<peripORexcm[i] <<endl ; } ECMOut.close(); // second output file for curvature estimation // std::string txtFileName = "./ECMFolder/ECMLocationExport_" + uniqueSymbol+ patch::to_string(outputFrameECM-1) + ".txt"; ofstream ECMLocationExport ; ECMLocationExport.open(txtFileName.c_str()); //ECMExport << "ECM pouch coordinates" << std::endl; for (uint i = 0; i < nodeECMLocX.size(); i++) { // if (peripORexcm[i]==excm) { ECMLocationExport<< nodeECMLocX[i] << " " << nodeECMLocY[i] << " " << 0.0 << " "<< peripORexcm[i]<<std::endl; // } } //ECMExport << "ECM lumen side coordinates" << std::endl; // for (uint i = 0; i < nodeECMLocX.size(); i++) { // if (peripORexcm[i]==perip) { // ECMLocationExport << nodeECMLocX[i] << " " << nodeECMLocY[i] << " " // << 0.0 << std::endl; // } // } ECMLocationExport.close(); //Third write file for ECM txtFileName = "./ECMFolder/ECMTensionExport_" + uniqueSymbol+ patch::to_string(outputFrameECM-1) + ".txt"; ofstream ECMTensionExport ; ECMTensionExport.open(txtFileName.c_str()); for (uint i = 0; i < nodeECMLocX.size(); i++) { ECMTensionExport<< linSpringAvgTension[i]<< " " << peripORexcm[i]<< std::endl; } ECMTensionExport.close(); /// //Fourth write file for ECM energyECM.totalEnergyECMOld=energyECM.totalEnergyECM ; energyECM.totalEnergyECM= energyECM.totalMorseEnergyECMCell + energyECM.totalAdhEnergyECMCell + energyECM.totalLinSpringEnergyECM ; std::string cSVFileName = "./ECMFolder/EnergyExport_" + uniqueSymbol+ ".CSV"; ofstream EnergyExport ; EnergyExport.open(cSVFileName.c_str(),ofstream::app); //EnergyExport <<"totalMorseEnergyCell " << "totalAdhEnergyCell "<< "totalMorseEnergy "<<"totalAdhEnergy "<< "totalLinSpringEnergy " << std::endl; EnergyExport <<curTime<<","<<energyECM.totalMorseEnergyECMCell << "," << energyECM.totalAdhEnergyECMCell<< "," << energyECM.totalLinSpringEnergyECM <<"," << energyECM.totalEnergyECM <<","<<energyECM.totalEnergyPrimeECM <<std::endl; } } // This is just to create a file to be able to generate the movie with consisten frames void SceECM:: PrintECMRemoved(double curTime) { lastPrintECM=lastPrintECM+1 ; if (lastPrintECM>=freqPlotData) { outputFrameECM++ ; lastPrintECM=0 ; cout << " I am in ECM removed print function" << endl ; // First ECM output file for paraview // std::string vtkFileName = "./ECMFolder/ECM_" + uniqueSymbol +patch::to_string(outputFrameECM-1) + ".vtk"; ofstream ECMOut; ECMOut.open(vtkFileName.c_str()); ECMOut<< "# vtk DataFile Version 3.0" << endl; ECMOut<< "Result for paraview 2d code" << endl; ECMOut << "ASCII" << endl; ECMOut << "DATASET UNSTRUCTURED_GRID" << std::endl; ECMOut << "POINTS " << nodeECMLocX.size() << " float" << std::endl; for (uint i = 0; i < nodeECMLocX.size(); i++) { ECMOut << -500.0 << " " << -500.0 << " " << 0.0 << std::endl; // Just out of domain } ECMOut<< std::endl; ECMOut<< "CELLS " << nodeECMLocX.size()<< " " << 3 *nodeECMLocX.size()<< std::endl; for (uint i = 0; i < (nodeECMLocX.size()-1); i++) { ECMOut << 2 << " " << indexECM[i] << " " << indexECM[i+1] << std::endl; } ECMOut << 2 << " " << indexECM[nodeECMLocX.size()-1] << " "<< indexECM[0] << std::endl; //last point to the first point ECMOut << "CELL_TYPES " << nodeECMLocX.size()<< endl; for (uint i = 0; i < nodeECMLocX.size() ; i++) { ECMOut << "3" << endl; } ECMOut << "POINT_DATA "<<nodeECMLocX.size() <<endl ; ECMOut << "SCALARS Avg_Tension " << "float"<< endl; ECMOut << "LOOKUP_TABLE " << "default"<< endl; for (uint i = 0; i < nodeECMLocX.size(); i++) { ECMOut<<linSpringAvgTension[i] <<endl ; } ECMOut << "SCALARS Node_Type " << "float"<< endl; ECMOut << "LOOKUP_TABLE " << "default"<< endl; for (uint i = 0; i < nodeECMLocX.size(); i++) { ECMOut<<peripORexcm[i] <<endl ; } ECMOut.close(); } } AniResumeData SceECM:: obtainResumeData() { AniResumeData aniResumeData ; thrust:: host_vector<double> hostTmpLocX; thrust:: host_vector<double> hostTmpLocY; thrust:: host_vector<EType> hostTmpType; hostTmpLocX.resize(numNodesECM) ; hostTmpLocY.resize(numNodesECM) ; hostTmpType.resize(numNodesECM) ; cout << " I am in obtainResumeData function" << endl ; thrust::copy ( thrust::make_zip_iterator( thrust::make_tuple(nodeECMLocX.begin(),nodeECMLocY.begin(),peripORexcm.begin())), thrust::make_zip_iterator( thrust::make_tuple(nodeECMLocX.begin(),nodeECMLocY.begin(),peripORexcm.begin()))+numNodesECM, thrust::make_zip_iterator( thrust::make_tuple(hostTmpLocX.begin(),hostTmpLocY.begin(),hostTmpType.begin()))); cout << " I start passing to regular vector variables" << endl ; CVector tmp; for( int i=0 ; i<numNodesECM ; i++) { tmp=CVector (hostTmpLocX[i], hostTmpLocY[i], 0.0) ; aniResumeData.nodePosArr.push_back(tmp) ; aniResumeData.nodeECMType.push_back(hostTmpType[i]) ; } return aniResumeData ; } void SceECM::EquMotionCoef (double dt) { vector <double> sponLenHost(numNodesECM) ; vector <double> sponLenWithNext ; vector <double> sponLenWithPrev ; vector <double> distWithNext ; vector <double> distWithPrev ; vector <double> dampCoefHost ; sponLenWithNext.clear(); sponLenWithPrev.clear(); distWithNext.clear() ; distWithPrev.clear() ; hCoefLd.clear() ; hCoefUd.clear() ; hCoefD.clear() ; indexNext.clear() ; indexPrev.clear() ; dampCoefHost.clear() ; indexNext.resize(numNodesECM) ; indexPrev.resize(numNodesECM) ; dampCoefHost.resize(numNodesECM) ; thrust::copy(sponLen.begin(),sponLen.begin()+numNodesECM, sponLenHost.begin()) ; thrust::copy(dampCoef.begin(),dampCoef.begin()+numNodesECM, dampCoefHost.begin()) ; double k=stiffLevel[0] ; //Assumming ECM is homogenous in mechanical properties for ( int i=0 ; i< numNodesECM ; i++) { indexNext.at(i)=i+1 ; indexPrev.at(i)=i-1 ; if (i==numNodesECM-1){ indexNext.at(i)=0 ; } if (i==0){ indexPrev.at(i)=numNodesECM-1 ; } sponLenWithNext.push_back( 0.5*(sponLenHost[indexNext.at(i)]+sponLenHost[i]) ); sponLenWithPrev.push_back( 0.5*(sponLenHost[indexPrev.at(i)]+sponLenHost[i]) ); distWithNext.push_back(sqrt( pow(tmpHostNodeECMLocX[indexNext.at(i)]-tmpHostNodeECMLocX[i],2) + pow(tmpHostNodeECMLocY[indexNext.at(i)]-tmpHostNodeECMLocY[i],2))) ; distWithPrev.push_back(sqrt( pow(tmpHostNodeECMLocX[indexPrev.at(i)]-tmpHostNodeECMLocX[i],2) + pow(tmpHostNodeECMLocY[indexPrev.at(i)]-tmpHostNodeECMLocY[i],2))); } for ( int i=0 ; i< numNodesECM ; i++) { hCoefD.push_back (1 + k*dt/dampCoefHost.at(i)*( 2 - sponLenWithPrev.at(i)/(distWithPrev.at(i) + 0.0001 ) - sponLenWithNext.at(i)/(distWithNext.at(i) + 0.0001 ))) ; hCoefLd.push_back( k*dt/dampCoefHost.at(i)*(-1 + sponLenWithPrev.at(i)/(distWithPrev.at(i) + 0.0001 ))) ; hCoefUd.push_back( k*dt/dampCoefHost.at(i)*(-1 + sponLenWithNext.at(i)/(distWithNext.at(i) + 0.0001 ))) ; } #ifdef debugModeECM cout <<"max distance with next node is" <<*max_element ( distWithNext.begin(), distWithNext.begin()+numNodesECM) <<endl ; cout <<"min distance with next node is" << *min_element ( distWithNext.begin(), distWithNext.begin()+numNodesECM) <<endl ; cout <<"max distance with previous node is" <<*max_element ( distWithPrev.begin(), distWithPrev.begin()+numNodesECM) <<endl ; cout <<"min distance with previous node is" << *min_element ( distWithPrev.begin(), distWithPrev.begin()+numNodesECM) <<endl ; vector < double> hCoefDAbs; hCoefDAbs.clear() ; for ( int i=0 ; i< numNodesECM ; i++) { hCoefDAbs.push_back (abs(1 + k*dt/dampCoefHost.at(i)*( 2 - sponLenWithPrev.at(i)/(distWithPrev.at(i) + 0.0001 ) - sponLenWithNext.at(i)/(distWithNext.at(i) + 0.0001 )))) ; } cout <<"max main diag. elment is " << *max_element ( hCoefD.begin(), hCoefD.begin() +numNodesECM) <<endl ; cout <<"min main diag. element is " << *min_element ( hCoefD.begin(), hCoefD.begin() +numNodesECM) <<endl ; cout <<"min main Abs(diag.) element is " << *min_element ( hCoefDAbs.begin(), hCoefDAbs.begin() +numNodesECM) <<endl ; cout <<"max upper diag. element is " << *max_element ( hCoefUd.begin(), hCoefUd.begin()+numNodesECM) <<endl ; cout <<"min upper diag. element is " << *min_element ( hCoefUd.begin(), hCoefUd.begin()+numNodesECM) <<endl ; cout <<"max lower diag. element is " << *max_element ( hCoefLd.begin(), hCoefLd.begin()+numNodesECM) <<endl ; cout <<"min lower diag. element is " << *min_element ( hCoefLd.begin(), hCoefLd.begin()+numNodesECM) <<endl ; cout <<"stiffness, time step and first element of damping vector is " << endl ; cout << k <<","<< dt<<"," << dampCoefHost.at(0) << endl ; cout << "constants for stiffness matrix calculated " << endl ; cout << "last diagonal element is " << hCoefD.at(numNodesECM-1) << endl ; cout << " number of ECM nodes is "<< numNodesECM << endl ; # endif } void SceECM::MoveCellNodesByECMForces(int totalNodeCountForActiveCellsECM,int currentActiveCellCount, double dt, double Damp_CoefCell, double mitoticThreshold) { double* nodeECMLocXAddr= thrust::raw_pointer_cast ( &nodeECMLocX[0]) ; double* nodeECMLocYAddr= thrust::raw_pointer_cast ( &nodeECMLocY[0]) ; // bool* isActiveECM = thrust::raw_pointer_cast( // &isActiveECM[0]); EType* peripORexcmAddr= thrust::raw_pointer_cast ( &peripORexcm[0]) ; // double* nodeGrowProAddr = thrust::raw_pointer_cast( // &nodesPointerECM->getInfoVecs().nodeGrowPro[0]); double* cellGrowthProgress = thrust::raw_pointer_cast( &cellsPointerECM->getCellInfoVecs().growthProgress[0]); // move the nodes of epithelial cells //// find the closest ECM node to each each cell // int numCells = cellsPointerECM->getCellInfoVecs().basalLocX.size() ; thrust::counting_iterator<int> iBegin(0) ; thrust::counting_iterator<int> iBegin2(0) ; ////////////////////////////////////////// thrust:: transform ( thrust::make_zip_iterator ( thrust:: make_tuple ( make_permutation_iterator( cellsPointerECM->getCellInfoVecs().eCMNeighborId.begin(), make_transform_iterator(iBegin2, DivideFunctor2( maxAllNodePerCell))), make_transform_iterator (iBegin, DivideFunctor2(maxAllNodePerCell)), make_transform_iterator (iBegin, ModuloFunctor2(maxAllNodePerCell)), nodesPointerECM->getInfoVecs().nodeLocX.begin(), nodesPointerECM->getInfoVecs().nodeLocY.begin(), nodesPointerECM->getInfoVecs().nodeIsActive.begin(), nodesPointerECM->getInfoVecs().memNodeType1.begin() )), thrust::make_zip_iterator ( thrust:: make_tuple ( make_permutation_iterator( cellsPointerECM->getCellInfoVecs().eCMNeighborId.begin(), make_transform_iterator(iBegin2, DivideFunctor2( maxAllNodePerCell))), make_transform_iterator (iBegin, DivideFunctor2(maxAllNodePerCell)), make_transform_iterator (iBegin, ModuloFunctor2(maxAllNodePerCell)), nodesPointerECM->getInfoVecs().nodeLocX.begin(), nodesPointerECM->getInfoVecs().nodeLocY.begin(), nodesPointerECM->getInfoVecs().nodeIsActive.begin(), nodesPointerECM->getInfoVecs().memNodeType1.begin() ))+totalNodeCountForActiveCellsECM, thrust::make_zip_iterator ( thrust::make_tuple ( nodesPointerECM->getInfoVecs().nodeLocX.begin(), nodesPointerECM->getInfoVecs().nodeLocY.begin(), adhPairECM_Cell.begin(), morseEnergyCell.begin(), adhEnergyCell.begin())), MoveNodes2_Cell(nodeECMLocXAddr,nodeECMLocYAddr,maxMembrNodePerCell,numNodesECM,dt,Damp_CoefCell,peripORexcmAddr,currentActiveCellCount, cellGrowthProgress, mitoticThreshold));//, isActiveECM)); } void SceECM::CalLinSpringForce() { double* nodeECMLocXAddr= thrust::raw_pointer_cast ( &nodeECMLocX[0]) ; double* nodeECMLocYAddr= thrust::raw_pointer_cast ( &nodeECMLocY[0]) ; double* stiffLevelAddr=thrust::raw_pointer_cast ( &stiffLevel[0]) ; double* sponLenAddr =thrust::raw_pointer_cast ( &sponLen[0]) ; // bool* isActiveECM = thrust::raw_pointer_cast( // &isActiveECM[0]); thrust:: transform ( thrust::make_zip_iterator ( thrust:: make_tuple ( indexECM.begin(), nodeECMLocX.begin(), nodeECMLocY.begin())), thrust::make_zip_iterator ( thrust:: make_tuple ( indexECM.begin(), nodeECMLocX.begin(), nodeECMLocY.begin()))+numNodesECM, thrust::make_zip_iterator ( thrust::make_tuple ( linSpringForceECMX.begin(), linSpringForceECMY.begin(), linSpringAvgTension.begin(), linSpringEnergy.begin())), LinSpringForceECM(numNodesECM,nodeECMLocXAddr,nodeECMLocYAddr,stiffLevelAddr,sponLenAddr));//, isActiveECM)); //////////////////////////////////// find the closest Cell to each ECM node /////////// /////////////////////////////////// //cout << " I am after FindCellNeighbor functor" << endl ; } void SceECM::CalBendSpringForce() { const double eCMBendStiff=6.0 ; // need to be an input double* nodeECMLocXAddr= thrust::raw_pointer_cast ( &nodeECMLocX[0]) ; double* nodeECMLocYAddr= thrust::raw_pointer_cast ( &nodeECMLocY[0]) ; // bool* isActiveECM = thrust::raw_pointer_cast( // &isActiveECM[0]); thrust:: transform ( thrust::make_zip_iterator ( thrust:: make_tuple ( indexECM.begin(), nodeECMLocX.begin(), nodeECMLocY.begin())), thrust::make_zip_iterator ( thrust:: make_tuple ( indexECM.begin(), nodeECMLocX.begin(), nodeECMLocY.begin()))+numNodesECM, thrust::make_zip_iterator ( thrust::make_tuple ( fBendCenterX.begin(), fBendCenterY.begin(), fBendLeftX.begin(), fBendLeftY.begin(), fBendRightX.begin(), fBendRightY.begin())), CalBendECM(nodeECMLocXAddr,nodeECMLocYAddr,numNodesECM,eCMBendStiff));//, isActiveECM)); double* fBendLeftXAddr= thrust::raw_pointer_cast ( &fBendLeftX[0]) ; double* fBendLeftYAddr= thrust::raw_pointer_cast ( &fBendLeftY[0]) ; double* fBendRightXAddr= thrust::raw_pointer_cast ( &fBendRightX[0]) ; double* fBendRightYAddr= thrust::raw_pointer_cast ( &fBendRightY[0]) ; thrust:: transform ( thrust::make_zip_iterator ( thrust:: make_tuple ( indexECM.begin(), fBendCenterX.begin(), fBendCenterY.begin())), thrust::make_zip_iterator ( thrust:: make_tuple ( indexECM.begin(), fBendCenterX.begin(), fBendCenterY.begin()))+numNodesECM, thrust::make_zip_iterator ( thrust::make_tuple ( bendSpringForceECMX.begin(), bendSpringForceECMY.begin())), SumBendForce(fBendLeftXAddr,fBendLeftYAddr,fBendRightXAddr,fBendRightYAddr,numNodesECM)); } void SceECM::CalCellForcesOnECM(double mitoticThreshold) { bool* nodeIsActiveAddr= thrust::raw_pointer_cast ( & (nodesPointerECM->getInfoVecs().nodeIsActive[0])) ; int * adhPairECM_CellAddr= thrust::raw_pointer_cast ( &adhPairECM_Cell[0]) ; //Old locations are chosen to make sure action-reaction balance of forces between ECM and cell nodes are fully satisfied. double* nodeCellLocXAddr= thrust::raw_pointer_cast ( &nodeCellLocXOld[0]) ; double* nodeCellLocYAddr= thrust::raw_pointer_cast ( &nodeCellLocYOld[0]) ; // bool* isActiveECM = thrust::raw_pointer_cast( // &isActiveECM[0]); double* nodeCellGrowProAddr = thrust::raw_pointer_cast( &nodesPointerECM->getInfoVecs().nodeGrowPro[0]); int numCells = cellsPointerECM->getCellInfoVecs().basalLocX.size() ; thrust:: transform ( thrust::make_zip_iterator ( thrust:: make_tuple ( indexECM.begin(), nodeECMLocX.begin(), nodeECMLocY.begin(), cellNeighborId.begin())), thrust::make_zip_iterator ( thrust:: make_tuple ( indexECM.begin(), nodeECMLocX.begin(), nodeECMLocY.begin(), cellNeighborId.begin()))+numNodesECM, thrust::make_zip_iterator ( thrust::make_tuple ( memMorseForceECMX.begin(), memMorseForceECMY.begin(), morseEnergy.begin(), adhEnergy.begin())), MorseAndAdhForceECM(numCells,maxAllNodePerCell,maxMembrNodePerCell,nodeCellLocXAddr,nodeCellLocYAddr,nodeIsActiveAddr,adhPairECM_CellAddr/*, isActiveECM*/, nodeCellGrowProAddr, mitoticThreshold)); } void SceECM::CalSumForcesOnECM() { double dummy=0.0 ; thrust:: transform ( thrust::make_zip_iterator ( thrust:: make_tuple ( linSpringForceECMX.begin(), linSpringForceECMY.begin(), bendSpringForceECMX.begin(), bendSpringForceECMY.begin(), memMorseForceECMX.begin(), memMorseForceECMY.begin())), thrust::make_zip_iterator ( thrust:: make_tuple ( linSpringForceECMX.begin(), linSpringForceECMY.begin(), bendSpringForceECMX.begin(), bendSpringForceECMY.begin(), memMorseForceECMX.begin(), memMorseForceECMY.begin()))+numNodesECM, thrust::make_zip_iterator ( thrust::make_tuple ( totalForceECMX.begin(), totalForceECMY.begin())), TotalECMForceCompute(dummy)); } void SceECM::CalSumOnlyExplicitForcesOnECM() { thrust:: transform ( thrust::make_zip_iterator ( thrust:: make_tuple ( bendSpringForceECMX.begin(), bendSpringForceECMY.begin(), memMorseForceECMX.begin(), memMorseForceECMY.begin())), thrust::make_zip_iterator ( thrust:: make_tuple ( bendSpringForceECMX.begin(), bendSpringForceECMY.begin(), memMorseForceECMX.begin(), memMorseForceECMY.begin()))+numNodesECM, thrust::make_zip_iterator ( thrust::make_tuple ( totalExplicitForceECMX.begin(), totalExplicitForceECMY.begin())), TotalExplicitECMForceCompute()); } void SceECM::CalRHS(double dt) { thrust:: transform ( thrust::make_zip_iterator ( thrust:: make_tuple ( totalExplicitForceECMX.begin(), totalExplicitForceECMY.begin(), nodeECMLocX.begin(), nodeECMLocY.begin(), dampCoef.begin())), thrust::make_zip_iterator ( thrust:: make_tuple ( totalExplicitForceECMX.begin(), totalExplicitForceECMY.begin(), nodeECMLocX.begin(), nodeECMLocY.begin(), dampCoef.begin()))+numNodesECM, thrust::make_zip_iterator ( thrust::make_tuple ( rHSX.begin(), rHSY.begin())), RHSCompute(dt)); } void SceECM::MoveNodesBySumAllForces(double dt) { // move the nodes of ECM thrust:: transform ( thrust::make_zip_iterator ( thrust:: make_tuple ( nodeECMLocX.begin(), nodeECMLocY.begin(), totalForceECMX.begin(), totalForceECMY.begin(), dampCoef.begin())), thrust::make_zip_iterator ( thrust:: make_tuple ( nodeECMLocX.begin(), nodeECMLocY.begin(), totalForceECMX.begin(), totalForceECMY.begin(), dampCoef.begin()))+numNodesECM, thrust::make_zip_iterator ( thrust::make_tuple ( nodeECMLocX.begin(), nodeECMLocY.begin())), MoveNodesECM(dt)); } void SceECM::FindNeighborCandidateForCellsAndECMNodes() { double* nodeECMLocXAddr= thrust::raw_pointer_cast ( &nodeECMLocX[0]) ; double* nodeECMLocYAddr= thrust::raw_pointer_cast ( &nodeECMLocY[0]) ; double * basalCellLocXAddr= thrust::raw_pointer_cast ( & ( cellsPointerECM->getCellInfoVecs().basalLocX[0]) ) ; double * basalCellLocYAddr= thrust::raw_pointer_cast ( & ( cellsPointerECM->getCellInfoVecs().basalLocY[0]) ) ; EType* peripORexcmAddr= thrust::raw_pointer_cast ( &peripORexcm[0]) ; // bool * isActiveECMAddr = thrust::raw_pointer_cast( // &isActiveECM[0]); int numCells = cellsPointerECM->getCellInfoVecs().basalLocX.size() ; //// find the closest ECM node to each each cell // thrust:: transform ( thrust::make_zip_iterator ( thrust:: make_tuple ( cellsPointerECM->getCellInfoVecs().basalLocX.begin(), cellsPointerECM->getCellInfoVecs().basalLocY.begin())), thrust::make_zip_iterator ( thrust:: make_tuple ( cellsPointerECM->getCellInfoVecs().basalLocX.begin(), cellsPointerECM->getCellInfoVecs().basalLocY.begin()))+numCells, cellsPointerECM->getCellInfoVecs().eCMNeighborId.begin(), FindECMNeighborPerCell(nodeECMLocXAddr,nodeECMLocYAddr,maxTotalNodes));//, isActiveECMAddr)); thrust:: transform ( thrust::make_zip_iterator ( thrust:: make_tuple ( nodeECMLocX.begin(), nodeECMLocY.begin())), // isActiveECM.begin())), thrust::make_zip_iterator ( thrust:: make_tuple ( nodeECMLocX.begin(), nodeECMLocY.begin()))+numNodesECM, // isActiveECM.begin()))+numNodesECM, cellNeighborId.begin(), FindCellNeighborPerECMNode(basalCellLocXAddr,basalCellLocYAddr, numCells)); } void SceECM::AssignDampCoef() { thrust::transform ( peripORexcm.begin() ,peripORexcm.begin() +numNodesECM, dampCoef.begin(), AssignDamping(dampBasal,dampBC,dampApical) ); #ifdef debugModeECM for (int i=0 ; i<numNodesECM ; i++) { if (dampCoef[i] < smallNumber) { cout << "damping of element " << i << " is " << dampCoef[i] << " which is wrong" <<endl ; throw::invalid_argument ( "damping coefficients in ECM is not set correctly") ; } } #endif } int SceECM::decideIfAddECMNode_M(uint numECMNodes) { // decide if add ecm node given current active node count // uint maxECMNode = numNodesECM*3; // bool isInitPhase= nodes->isInitPhase ; // thrust::transform( // thrust::make_zip_iterator( // thrust::make_tuple(maxDistToRiVec.begin(), // maxTenIndxTypeVec.begin() // )), // thrust::make_zip_iterator( // thrust::make_tuple(maxDistToRiVec.begin(), // maxTenIndxTypeVec.begin() // )) // + curActCellCt, // isECMAddingNode, // ECMGrowFunc(numECMNodes, maxTotalNodes, maxLengthToAddECMNodes)); double max_Dist = -9999.9; int indx = -1; for (int i = 0; i < numECMNodes; i++){ if (i != numECMNodes-1){ double tmp_Dist = sqrt((nodeECMLocX[i+1] - nodeECMLocX[i])*(nodeECMLocX[i+1] - nodeECMLocX[i]) + (nodeECMLocY[i+1] - nodeECMLocY[i])*(nodeECMLocY[i+1] - nodeECMLocY[i])); if (tmp_Dist > max_Dist && tmp_Dist > 2.0*lknotECMPeripGPU){ max_Dist = tmp_Dist; indx = i; } } else{ double tmp_Dist = sqrt((nodeECMLocX[0] - nodeECMLocX[i])*(nodeECMLocX[0] - nodeECMLocX[i]) + (nodeECMLocY[0] - nodeECMLocY[i])*(nodeECMLocY[0] - nodeECMLocY[i])); if (tmp_Dist > max_Dist && tmp_Dist > 2.0*lknotECMPeripGPU){ max_Dist = tmp_Dist; indx = i; } } } return indx; } void SceECM::AddECMNode(int indx, uint numECMNodes){ std::cout<<"insertIndx = "<<indx+1<<std::endl; std::cout<<"numECMNodes = "<<numECMNodes<<std::endl; uint insertIndx = indx + 1; // newIndex double insertX, insertY; insertX = (nodeECMLocX[indx] + nodeECMLocX[indx+1])/2.0; insertY = (nodeECMLocY[indx] + nodeECMLocY[indx+1])/2.0; uint globalIndxEnd = numECMNodes; //membrane nodes are first. End position based on newindex uint globalIndexInsert = insertIndx; // if (insertIndx<=iDApical) { //since the current acrive membrane nodes is one more, it can not be the last ID. // iDApical=iDApical+1 ; // } // if (insertIndx<=iDBasal) { // iDBasal=iDBasal+1 ; // } for (uint i = globalIndxEnd; i >= globalIndexInsert; i--) { // isActiveECM[i] = isActiveECM[i - 1]; nodeECMLocX[i] = nodeECMLocX[i - 1]; nodeECMLocY[i] = nodeECMLocY[i - 1]; peripORexcm[i] = peripORexcm[i-1] ; sponLen[i] = sponLen[i-1]; dampCoef[i] = dampCoef[i-1]; } // isActiveECM[globalIndexInsert] = true; nodeECMLocX[globalIndexInsert] = insertX; std::cout<<"nodeECMLocX[globalIndexInsert] : "<<nodeECMLocX[globalIndexInsert]<<std::endl; nodeECMLocY[globalIndexInsert] = insertY; std::cout<<"nodeECMLocY[globalIndexInsert] : "<<nodeECMLocY[globalIndexInsert]<<std::endl; peripORexcm[globalIndexInsert] = peripORexcm[globalIndexInsert-1]; // to have the same type of the membrane node as at least one of its neighbors std::cout<<"peripORexcm[globalIndexInsert] : "<<peripORexcm[globalIndexInsert]<<std::endl; sponLen[globalIndexInsert] = sponLen[globalIndexInsert-1]; std::cout<<"sponLen[globalIndexInsert] : "<<sponLen[globalIndexInsert]<<std::endl; dampCoef[globalIndexInsert] = dampCoef[globalIndexInsert-1]; std::cout<<"dampCoef[globalIndexInsert] : "<<dampCoef[globalIndexInsert]<<std::endl; // if (_memNodeType[globalIndexInsert-1] != apical1 || _memNodeType[globalIndexInsert-1] != basal1){ // _memNodeType[globalIndexInsert] = _memNodeType[globalIndexInsert+1]; // } //return (curActCount + 1); // numECMNodes += 1; }; // void SceECM::calECMGrowSpeed_M() { // // reduce_by_key, find value of max tension and their index // thrust::counting_iterator<uint> iBegin(0); // uint maxNPerCell = allocPara_m.maxAllNodePerCell; // thrust::transform( // thrust::make_zip_iterator( // thrust::make_tuple( // ECMDistToRi.begin(), // ECMNodeType1.begin())), // thrust::make_zip_iterator( // thrust::make_tuple( // ECMbrDistToRi.begin(), // ECMNodeType1.begin())) // + numECMNodes, // thrust::make_zip_iterator( // thrust::make_tuple(maxDistToRiVec.begin(), // maxTenIndxTypeVec.begin())), // thrust::equal_to<uint>(), MaxWInfo()); // for (int i=0 ; i<cellInfoVecs.maxDistToRiVec.size() ; i++) { // cout << "the max distance in cell" << i << " is "<<cellInfoVecs.maxDistToRiVec[i] << endl ; // cout << "At index "<<cellInfoVecs.maxTenIndxVec[i]<<std::endl; // } // }
fd6f637c65ca65f0f86c90e9ba8a504df3eb1dde.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #include "common/errors.h" #include "common/cpu_bitmap.h" #define DIM 1000 #define SCALE_FACTOR 1.5 struct hipComplex { float r; float i; __device__ hipComplex(float a, float b) : r(a), i(b) {} __device__ float magnitude2(void) { return r * r + i * i; } __device__ hipComplex operator*(const hipComplex& a) { return hipComplex(r * a.r - i * a.i, i * a.r + r * a.i); } __device__ hipComplex operator+(const hipComplex& a) { return hipComplex(r + a.r, i + a.i); } }; __device__ int julia(int x, int y) { const float scale = SCALE_FACTOR; float jx = scale * (float)(DIM / 2 - x) / (DIM / 2); float jy = scale * (float)(DIM / 2 - y) / (DIM / 2); hipComplex c(-0.8, 0.154); hipComplex a(jx, jy); int i = 0; for (i = 0; i < 200; i++) { a = a * a + c; if (a.magnitude2() > 1000) { return 0; } } return 1; } __global__ void kernel(unsigned char *ptr) { int x = blockIdx.x; int y = blockIdx.y; int offset = x + y * gridDim.x; int juliaValue = julia(x, y); ptr[offset * 4 + 0] = 255 * juliaValue; ptr[offset * 4 + 1] = 0; ptr[offset * 4 + 2] = 0; ptr[offset * 4 + 3] = 255; } int main(void) { CPUBitmap bitmap(DIM, DIM); unsigned char *dev_bitmap; HANDLE_ERROR(hipMalloc((void**)&dev_bitmap, bitmap.image_size())); dim3 grid(DIM, DIM); hipLaunchKernelGGL(( kernel), dim3(grid), dim3(1), 0, 0, dev_bitmap); HANDLE_ERROR(hipMemcpy(bitmap.get_ptr(), dev_bitmap, bitmap.image_size(), hipMemcpyDeviceToHost)); bitmap.dump_ppm("image.ppm"); HANDLE_ERROR(hipFree(dev_bitmap)); }
fd6f637c65ca65f0f86c90e9ba8a504df3eb1dde.cu
#include <stdio.h> #include "common/errors.h" #include "common/cpu_bitmap.h" #define DIM 1000 #define SCALE_FACTOR 1.5 struct cuComplex { float r; float i; __device__ cuComplex(float a, float b) : r(a), i(b) {} __device__ float magnitude2(void) { return r * r + i * i; } __device__ cuComplex operator*(const cuComplex& a) { return cuComplex(r * a.r - i * a.i, i * a.r + r * a.i); } __device__ cuComplex operator+(const cuComplex& a) { return cuComplex(r + a.r, i + a.i); } }; __device__ int julia(int x, int y) { const float scale = SCALE_FACTOR; float jx = scale * (float)(DIM / 2 - x) / (DIM / 2); float jy = scale * (float)(DIM / 2 - y) / (DIM / 2); cuComplex c(-0.8, 0.154); cuComplex a(jx, jy); int i = 0; for (i = 0; i < 200; i++) { a = a * a + c; if (a.magnitude2() > 1000) { return 0; } } return 1; } __global__ void kernel(unsigned char *ptr) { int x = blockIdx.x; int y = blockIdx.y; int offset = x + y * gridDim.x; int juliaValue = julia(x, y); ptr[offset * 4 + 0] = 255 * juliaValue; ptr[offset * 4 + 1] = 0; ptr[offset * 4 + 2] = 0; ptr[offset * 4 + 3] = 255; } int main(void) { CPUBitmap bitmap(DIM, DIM); unsigned char *dev_bitmap; HANDLE_ERROR(cudaMalloc((void**)&dev_bitmap, bitmap.image_size())); dim3 grid(DIM, DIM); kernel<<<grid, 1>>>(dev_bitmap); HANDLE_ERROR(cudaMemcpy(bitmap.get_ptr(), dev_bitmap, bitmap.image_size(), cudaMemcpyDeviceToHost)); bitmap.dump_ppm("image.ppm"); HANDLE_ERROR(cudaFree(dev_bitmap)); }
a2173becbfdd143d70f5c296923e4647f02831b3.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include "paddle/fluid/platform/device/gpu/gpu_launch_config.h" #include "paddle/fluid/platform/device/gpu/gpu_primitives.h" #include "paddle/phi/backends/gpu/gpu_info.h" #include "paddle/phi/core/kernel_registry.h" #include "paddle/phi/core/utils/data_type.h" #include "paddle/phi/kernels/index_select_kernel.h" namespace phi { using paddle::platform::PADDLE_CUDA_NUM_THREADS; template <typename T, typename IndexT> __global__ void index_select_cuda_kernel(const T* input, T* output, const IndexT* index, int64_t N, int64_t stride, int64_t size, int64_t delta) { CUDA_KERNEL_LOOP_TYPE(idx, N, int64_t) { int64_t pre_idx = idx / (stride * size); int64_t dim_idx = idx % (stride * size) / stride; IndexT src_dim_idx = index[dim_idx]; int64_t input_idx = idx + (delta * pre_idx + src_dim_idx - dim_idx) * stride; output[idx] = input[input_idx]; } } template <typename T, typename Context> void IndexSelectKernel(const Context& ctx, const DenseTensor& x, const DenseTensor& index, int dim, DenseTensor* output) { auto input_dim = x.dims(); auto output_dim = output->dims(); dim = dim >= 0 ? dim : dim + input_dim.size(); auto stride_dim = phi::stride(input_dim); int64_t stride = stride_dim[dim]; int64_t size = output_dim[dim]; int64_t delta = input_dim[dim] - size; const auto& index_type = index.dtype(); bool index_type_match = index_type == phi::DataType::INT64 || index_type == phi::DataType::INT32; PADDLE_ENFORCE_EQ(index_type_match, true, phi::errors::InvalidArgument( "Input(Index) holds the wrong type, it holds %s, but " "desires to be %s or %s", index_type, phi::DataType::INT32, phi::DataType::INT64)); auto* in_data = x.data<T>(); T* out_data = ctx.template Alloc<T>(output); int64_t numel = output->numel(); if (numel == 0) { return; } auto stream = ctx.stream(); unsigned int block_dim = PADDLE_CUDA_NUM_THREADS; dim3 grid_dim = dim3((numel + block_dim - 1) / block_dim); paddle::platform::LimitGridDim(ctx, &grid_dim); if (index_type == phi::DataType::INT64) { const int64_t* index_data = index.data<int64_t>(); hipLaunchKernelGGL(( index_select_cuda_kernel<T, int64_t>), dim3(grid_dim), dim3(block_dim), 0, stream, in_data, out_data, index_data, numel, stride, size, delta); } else { const int* index_data = index.data<int>(); hipLaunchKernelGGL(( index_select_cuda_kernel<T, int>), dim3(grid_dim), dim3(block_dim), 0, stream, in_data, out_data, index_data, numel, stride, size, delta); } } } // namespace phi PD_REGISTER_KERNEL(index_select, GPU, ALL_LAYOUT, phi::IndexSelectKernel, float, double, phi::dtype::float16, int, int64_t) {}
a2173becbfdd143d70f5c296923e4647f02831b3.cu
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include "paddle/fluid/platform/device/gpu/gpu_launch_config.h" #include "paddle/fluid/platform/device/gpu/gpu_primitives.h" #include "paddle/phi/backends/gpu/gpu_info.h" #include "paddle/phi/core/kernel_registry.h" #include "paddle/phi/core/utils/data_type.h" #include "paddle/phi/kernels/index_select_kernel.h" namespace phi { using paddle::platform::PADDLE_CUDA_NUM_THREADS; template <typename T, typename IndexT> __global__ void index_select_cuda_kernel(const T* input, T* output, const IndexT* index, int64_t N, int64_t stride, int64_t size, int64_t delta) { CUDA_KERNEL_LOOP_TYPE(idx, N, int64_t) { int64_t pre_idx = idx / (stride * size); int64_t dim_idx = idx % (stride * size) / stride; IndexT src_dim_idx = index[dim_idx]; int64_t input_idx = idx + (delta * pre_idx + src_dim_idx - dim_idx) * stride; output[idx] = input[input_idx]; } } template <typename T, typename Context> void IndexSelectKernel(const Context& ctx, const DenseTensor& x, const DenseTensor& index, int dim, DenseTensor* output) { auto input_dim = x.dims(); auto output_dim = output->dims(); dim = dim >= 0 ? dim : dim + input_dim.size(); auto stride_dim = phi::stride(input_dim); int64_t stride = stride_dim[dim]; int64_t size = output_dim[dim]; int64_t delta = input_dim[dim] - size; const auto& index_type = index.dtype(); bool index_type_match = index_type == phi::DataType::INT64 || index_type == phi::DataType::INT32; PADDLE_ENFORCE_EQ(index_type_match, true, phi::errors::InvalidArgument( "Input(Index) holds the wrong type, it holds %s, but " "desires to be %s or %s", index_type, phi::DataType::INT32, phi::DataType::INT64)); auto* in_data = x.data<T>(); T* out_data = ctx.template Alloc<T>(output); int64_t numel = output->numel(); if (numel == 0) { return; } auto stream = ctx.stream(); unsigned int block_dim = PADDLE_CUDA_NUM_THREADS; dim3 grid_dim = dim3((numel + block_dim - 1) / block_dim); paddle::platform::LimitGridDim(ctx, &grid_dim); if (index_type == phi::DataType::INT64) { const int64_t* index_data = index.data<int64_t>(); index_select_cuda_kernel<T, int64_t><<<grid_dim, block_dim, 0, stream>>>( in_data, out_data, index_data, numel, stride, size, delta); } else { const int* index_data = index.data<int>(); index_select_cuda_kernel<T, int><<<grid_dim, block_dim, 0, stream>>>( in_data, out_data, index_data, numel, stride, size, delta); } } } // namespace phi PD_REGISTER_KERNEL(index_select, GPU, ALL_LAYOUT, phi::IndexSelectKernel, float, double, phi::dtype::float16, int, int64_t) {}
1a96ff271b5b9b99ace84adfeb9675f68f8985e2.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "Tensor.h" __global__ void printVal(float* ptr) { printf("%f\n", ptr[0]); } __global__ void check(float* ptr,float val) { if (ptr[0] <= val+0.01 && ptr[0]>=val-0.01) { printf("check passed %f %f %d\n",ptr[0],val,ptr); } else { printf("check failed %f %f %d\n",ptr[0],val,ptr); } } void Tensor::allocateGPU() { // if (mMemory != NULL) // { // freeGPU(); // } // cl_int err; // mMemory = clCreateBuffer(gCLContext, CL_MEM_READ_WRITE, mSize * sizeof(cl_float), NULL, &err); // if (err != CL_SUCCESS) // { // printf("ERROR: allocating tensor GPU: %d\n", err); // } #ifdef USE_GPU if(mDataGPU!=NULL) { freeGPU(); } gpuErrChk(hipMalloc(&mDataGPU, mAllocSize * sizeof(Float))); mStartGPU = mDataGPU; #endif } void Tensor::freeGPU() { // if (mMemory != NULL) // { // clReleaseMemObject(mMemory); // mMemory = NULL; // } #ifdef USE_GPU if(mDataGPU!=NULL) { gpuErrChk(hipFree(mDataGPU)); mDataGPU = NULL; mStartGPU = NULL; } #endif } void Tensor::copyToGPU() { #ifdef NN_DEBUG assert(mData != NULL && mDataGPU != NULL); #endif #ifdef USE_GPU gpuErrChk(hipMemcpy(mDataGPU, mData, mAllocSize * sizeof(Float), hipMemcpyHostToDevice)); #endif // if (mMemory != NULL && mData != NULL) // { // cl_int err = clEnqueueWriteBuffer(gCLQueue, mMemory, CL_TRUE, 0, mSize * sizeof(cl_float), mData, 0, NULL, NULL); // if (err != CL_SUCCESS) // { // printf("ERROR: copytoGPU: %d\n", err); // } // } // /*cl_int err = clblasWriteMatrix(clblasRowMajor, mSize * sizeof(cl_float), mSize * sizeof(cl_float), sizeof(cl_float), // mData, 0, cols(), mMemory, 0, cols(), // gCLQueue, 1, NULL); // if (err != CL_SUCCESS) // { // printf("ERROR: copytoGPU: %d\n", err); // }*/ } void Tensor::copyToCPU() { #ifdef NN_DEBUG assert(mData != NULL && mDataGPU != NULL); #endif #ifdef USE_GPU gpuErrChk(hipMemcpy(mData, mDataGPU, mAllocSize * sizeof(Float), hipMemcpyDeviceToHost)); #endif // if (mMemory != NULL && mData != NULL) // { // cl_int err = clEnqueueReadBuffer(gCLQueue, mMemory, CL_TRUE, 0, mSize * sizeof(cl_float), mData, 0, NULL, NULL); // if (err != CL_SUCCESS) // { // printf("ERROR: copytoCPU: %d\n", err); // } // } // /*cl_int err = clblasReadMatrix(clblasRowMajor, mSize * sizeof(cl_float), mSize * sizeof(cl_float), sizeof(cl_float), // mMemory, 0, cols(), mData, 0, cols(), // gCLQueue, 1, NULL); // if (err != CL_SUCCESS) // { // printf("ERROR: copytoGPU: %d\n", err); // }*/ } // __global__ void Tensor::printGPU() const // { // for (uint64_t i = 0; i < mShape[0]; i++) // { // for (uint64_t j = 0; j < mShape[1]; j++) // { // printf("%f ", mDataGPU[i*mLD+j]); // } // printf("\n"); // } // } __global__ void printGPU(int m, int n, int ld, float* data) { for (uint64_t i = 0; i < n; i++) { for (uint64_t j = 0; j < m; j++) { printf("%f ", data[j*ld+i]); } printf("\n"); } }
1a96ff271b5b9b99ace84adfeb9675f68f8985e2.cu
#include "Tensor.h" __global__ void printVal(float* ptr) { printf("%f\n", ptr[0]); } __global__ void check(float* ptr,float val) { if (ptr[0] <= val+0.01 && ptr[0]>=val-0.01) { printf("check passed %f %f %d\n",ptr[0],val,ptr); } else { printf("check failed %f %f %d\n",ptr[0],val,ptr); } } void Tensor::allocateGPU() { // if (mMemory != NULL) // { // freeGPU(); // } // cl_int err; // mMemory = clCreateBuffer(gCLContext, CL_MEM_READ_WRITE, mSize * sizeof(cl_float), NULL, &err); // if (err != CL_SUCCESS) // { // printf("ERROR: allocating tensor GPU: %d\n", err); // } #ifdef USE_GPU if(mDataGPU!=NULL) { freeGPU(); } gpuErrChk(cudaMalloc(&mDataGPU, mAllocSize * sizeof(Float))); mStartGPU = mDataGPU; #endif } void Tensor::freeGPU() { // if (mMemory != NULL) // { // clReleaseMemObject(mMemory); // mMemory = NULL; // } #ifdef USE_GPU if(mDataGPU!=NULL) { gpuErrChk(cudaFree(mDataGPU)); mDataGPU = NULL; mStartGPU = NULL; } #endif } void Tensor::copyToGPU() { #ifdef NN_DEBUG assert(mData != NULL && mDataGPU != NULL); #endif #ifdef USE_GPU gpuErrChk(cudaMemcpy(mDataGPU, mData, mAllocSize * sizeof(Float), cudaMemcpyHostToDevice)); #endif // if (mMemory != NULL && mData != NULL) // { // cl_int err = clEnqueueWriteBuffer(gCLQueue, mMemory, CL_TRUE, 0, mSize * sizeof(cl_float), mData, 0, NULL, NULL); // if (err != CL_SUCCESS) // { // printf("ERROR: copytoGPU: %d\n", err); // } // } // /*cl_int err = clblasWriteMatrix(clblasRowMajor, mSize * sizeof(cl_float), mSize * sizeof(cl_float), sizeof(cl_float), // mData, 0, cols(), mMemory, 0, cols(), // gCLQueue, 1, NULL); // if (err != CL_SUCCESS) // { // printf("ERROR: copytoGPU: %d\n", err); // }*/ } void Tensor::copyToCPU() { #ifdef NN_DEBUG assert(mData != NULL && mDataGPU != NULL); #endif #ifdef USE_GPU gpuErrChk(cudaMemcpy(mData, mDataGPU, mAllocSize * sizeof(Float), cudaMemcpyDeviceToHost)); #endif // if (mMemory != NULL && mData != NULL) // { // cl_int err = clEnqueueReadBuffer(gCLQueue, mMemory, CL_TRUE, 0, mSize * sizeof(cl_float), mData, 0, NULL, NULL); // if (err != CL_SUCCESS) // { // printf("ERROR: copytoCPU: %d\n", err); // } // } // /*cl_int err = clblasReadMatrix(clblasRowMajor, mSize * sizeof(cl_float), mSize * sizeof(cl_float), sizeof(cl_float), // mMemory, 0, cols(), mData, 0, cols(), // gCLQueue, 1, NULL); // if (err != CL_SUCCESS) // { // printf("ERROR: copytoGPU: %d\n", err); // }*/ } // __global__ void Tensor::printGPU() const // { // for (uint64_t i = 0; i < mShape[0]; i++) // { // for (uint64_t j = 0; j < mShape[1]; j++) // { // printf("%f ", mDataGPU[i*mLD+j]); // } // printf("\n"); // } // } __global__ void printGPU(int m, int n, int ld, float* data) { for (uint64_t i = 0; i < n; i++) { for (uint64_t j = 0; j < m; j++) { printf("%f ", data[j*ld+i]); } printf("\n"); } }
62e6f9c2bd391bb90b676a3e6e44e8e1e6bb4557.hip
// !!! This is a file automatically generated by hipify!!! #include <time.h> // includes, system #include <stdlib.h> #include <stdio.h> #include <string.h> #include <math.h> #define pai 3.14159265359 //CUFFT Header file #include <hipfftXt.h> // includes, project #include <hipfft.h> //#include <cutil_inline.h> //#include <shrQATest.h> #include "su.h" #include "segy.h" //#include "kexuan2.h" #include "Complex.h" #include <hip/hip_runtime.h> #include <helper_functions.h> #include <helper_cuda.h> #include <timer.h> #define BLOCK_SIZE_X 16 #define BLOCK_SIZE_Y 16 /* This is an seismic data modeling program. V1.0: 3D modeling on OSE method with multi-GPU; --Little Ke in Story 2019.12.20 */ // Complex data type typedef float2 Complex; #define batch 1 int nGPUs,M; int nx,ny,nz,nt,bu,bd,bl,br,bf,bb,nxb,nyb,nzb,sxbeg,sybeg,szbeg,jsx,jsy,jsz,gxbeg,gybeg,gzbeg,jgx,jgy,jgz,csdgather,ng,nshot,is,it,sx,sy,sz,boundary; float fm,dt,amp,dx,dy,dz,vmax,R,dtR,dkx,dky,dkz; int *sxyz_h, *gxyz_h,*sxyz_d, *gxyz_d; float *wavelet_h,*wavelet_d,*wavelethilbert_d; hipfftComplex *cwavelet_d; float *wavelet_card,*wavelethilbert_card; hipfftComplex *cwavelet_card; float *fdata_3dH,*fdata_3dD,*vb_3dH; hipfftComplex *cdata_3dH,*cvb_3dH; cudaLibXtDesc *p1D,*p2D,*q1D,*q2D,*pcD,*vbD; float *att_blr,*att_bfb,*att_bud; float *att_host; float *rec_slice_d,*rec_slice_h,*rec_data_h; char parname[500]; char filename[500]; #include "ose_3d_kernel.cu" #include "ose_3d_lib.cu" int main() { system("rm *.bin"); system("rm ./wf/*.bin"); nx=101; ny=101; nz=101; boundary=25; bu=boundary; bd=boundary; bl=boundary; br=boundary; bf=boundary; bb=boundary; nxb=bl+nx+br; nyb=bf+ny+bb; nzb=bu+nz+bd; nxb=judge_odd(&br,&nxb); nyb=judge_odd(&bb,&nyb); nzb=judge_odd(&bd,&nzb); printf("nxbnew=%d brnew=%d\n",nxb,br); nt=300; fm=40.0; dt=1.0; dt=dt*0.001; amp=100.0; ng=nx*ny; nshot=1; sxbeg=nx/2; sybeg=ny/2; szbeg=0; gxbeg=0; gybeg=0; gzbeg=0; jsx=1; jsy=1; jsz=1; jgx=1; jgy=1; jgz=1; csdgather=0; nGPUs=2; dx=10.0; dy=10.0; dz=10.0; dkx=2*pai/dx/(float)nxb; dky=2*pai/dy/(float)nyb; dkz=2*pai/dz/(float)nzb; printf("dkx=%f dky=%f dkz=%f\n",dkx,dky,dkz); sprintf(parname,"./output/partest.txt"); // set the filename of orignal rtm result hipSetDevice(0); float mstimer; hipEvent_t start, stop; hipEventCreate(&start); hipEventCreate(&stop); hipEventRecord(start);/* record starting time */ dim3 dimBlock2D(BLOCK_SIZE_X,BLOCK_SIZE_Y); dim3 dimGrid3D((nxb+dimBlock2D.x-1)/dimBlock2D.x,(nyb+dimBlock2D.y-1)/dimBlock2D.y,nzb); dim3 dimBlock_nt(BLOCK_SIZE_X,1); dim3 dimGrid_nt((nt+dimBlock_nt.x-1)/dimBlock_nt.x,1); dim3 dimGrid3D_half_before_tran((nxb+1+dimBlock2D.x-1)/dimBlock2D.x,(nyb+dimBlock2D.y-1)/dimBlock2D.y,nzb/2); dim3 dimGrid3D_half_after_tran((nxb+1+dimBlock2D.x-1)/dimBlock2D.x,(nyb/2+dimBlock2D.y-1)/dimBlock2D.y,nzb); Alloc(); sg_init_host(sxyz_h,sxbeg,sybeg,szbeg,jsx,jsy,jsz,nshot,nx,ny); sg_init_host(gxyz_h,sxbeg,sybeg,szbeg,jsx,jsy,jsz,nshot,nx,ny); hipLaunchKernelGGL(( sg_init_device), dim3(1),dim3(1), 0, 0, gxyz_d,gxbeg,gybeg,gzbeg,jgx,jgy,jgz,ng,nx,ny); //There is a potential BUG here, when receiver is put on non GPU0 // init_fdata_3D(vb_3dH,nxb,nyb,nzb,3500.0); // init_cdata_r_3D(cvb_3dH,nxb,nyb,nzb,3500.0); sprintf(filename,"./input/v3d_nx101_ny101_nz101.bin"); input_file_xyz_boundary(filename,vb_3dH,nx,ny,nz,bl,bf,bu,nxb,nyb,nzb); add_pml_layers_v_h(vb_3dH,nx,ny,nz,bl,bf,bu,nxb,nyb,nzb); data_R2CR(vb_3dH,cvb_3dH,nxb,nyb,nzb); Alloc_mulGPU(); hipLaunchKernelGGL(( generate_wavelet_gpu), dim3(1),dim3(1), 0, 0, wavelet_d,fm,nt,dt,amp); hipMemcpy(wavelet_h,wavelet_d,nt*sizeof(float),hipMemcpyDefault); write_1dfile_from_1darray("./temp/ricker.bin",wavelet_h,nt); hipfftHandle plan_1d; hipfftHandle plan_1d_r; checkCudaErrors(hipfftPlan1d(&plan_1d,nt,HIPFFT_R2C,1)); checkCudaErrors(hipfftPlan1d(&plan_1d_r,nt,HIPFFT_C2R,1)); checkCudaErrors(hipfftExecR2C(plan_1d,wavelet_d,cwavelet_d)); hipLaunchKernelGGL(( hilbert_1d), dim3(dimGrid_nt),dim3(dimBlock_nt), 0, 0, cwavelet_d,nt); checkCudaErrors(hipfftExecC2R(plan_1d_r,cwavelet_d,wavelethilbert_d)); hipLaunchKernelGGL(( scale_wavelet), dim3(dimGrid_nt),dim3(dimBlock_nt), 0, 0, wavelethilbert_d,nt); hipMemcpy(wavelet_h,wavelethilbert_d,nt*sizeof(float),hipMemcpyDefault); write_1dfile_from_1darray("./temp/ricker_hil.bin",wavelet_h,nt); for(int i=0;i<nt;i++) { printf("rickerhilbert[%d]=%f\n",i,wavelet_h[i]); } vmax=find_max(vb_3dH,nxb*nyb*nzb); printf("vmax=%f\n",vmax); R=vmax*PI*sqrt(pow(1.0/dx,2.0)+pow(1.0/dy,2.0)+pow(1.0/dz,2.0)); printf("R=%f \n",R); dtR=dt*R; M=(int)dtR+6; printf("dtR=%f M=%d\n",dtR,M); R2C_3D_CPU(cdata_3dH,fdata_3dH,nxb,nyb,nzb,TRUE); // Demonstrate how to use CUFFT to perform 3-d FFTs using 2 GPUs // hipfftCreate() - Create an empty plan hipfftHandle plan_input; hipfftResult result; result = hipfftCreate(&plan_input); if (result != HIPFFT_SUCCESS) { printf ("*Create failed\n"); return; } // cufftXtSetGPUs() - Define which GPUs to use int whichGPUs[nGPUs],iGPU; for(iGPU=0;iGPU<nGPUs;iGPU++) { whichGPUs[iGPU] = iGPU; } result = cufftXtSetGPUs (plan_input, nGPUs, whichGPUs); if (result != HIPFFT_SUCCESS) { printf ("*XtSetGPUs failed\n"); return; } // Initialize FFT input data size_t worksize[nGPUs]; // hipfftMakePlan3d() - Create the plan result = hipfftMakePlan3d (plan_input, nzb, nyb, nxb, HIPFFT_C2C, worksize); if (result != HIPFFT_SUCCESS) { printf ("*MakePlan* failed\n"); return; } // cufftXtMalloc() - Malloc data on multiple GPUs Alloc_cufftXt(plan_input,result); // cufftXtMemcpy() - Copy data from host to multiple GPUs result = cufftXtMemcpy (plan_input, p1D,cdata_3dH, CUFFT_COPY_HOST_TO_DEVICE); if (result != HIPFFT_SUCCESS) { printf ("*XtMemcpy failed\n"); return; } result = cufftXtMemcpy (plan_input, vbD,cvb_3dH, CUFFT_COPY_HOST_TO_DEVICE); if (result != HIPFFT_SUCCESS) { printf ("*XtMemcpy failed\n"); return; } // cufftXtExecDescriptorC2C() - Execute FFT on multiple GPUs for(is=0;is<nshot;is++) { // Pad Zero Values For Wavefield Data Xtdata_value(p1D,p2D,q1D,q2D,pcD,nxb,nyb,nzb,0.0); Alloc_wavelet(is,plan_1d,plan_1d_r); for(it=0;it<nt;it++) { // if(it<80) // { Xt_add_wavelet_sxyz(p2D,wavelet_card,sxyz_h,is); Xt_add_wavelet_sxyz(q2D,wavelethilbert_card,sxyz_h,is); // } // if(it%50==0) printf("is=%d it=%d/%d time=%f(s)/%f(s)\n",is,it,nt,dt*it,dt*nt); wavefield_extro_ori2(plan_input,result); /* sprintf(filename,"./wf/q-%d-single.bin",it); result = cufftXtMemcpy (plan_input, cdata_3dH,q2D, CUFFT_COPY_DEVICE_TO_HOST); if (result != HIPFFT_SUCCESS) { printf ("*XtMemcpy failed\n"); return; } write_3dfile_from_1darray_ci(filename,cdata_3dH,nxb,nyb,nzb); sprintf(filename,"./wf/p-%d-single.bin",it); result = cufftXtMemcpy (plan_input, cdata_3dH,p2D, CUFFT_COPY_DEVICE_TO_HOST); if (result != HIPFFT_SUCCESS) { printf ("*XtMemcpy failed\n"); return; } write_3dfile_from_1darray_ci(filename,cdata_3dH,nxb,nyb,nzb); */ hipSetDevice(0); //There is a potential BUG here, when receiver is put on non GPU0 hipLaunchKernelGGL(( rec_shotdata_3d), dim3(dimGrid3D_half_before_tran),dim3(dimBlock2D), 0, 0, (hipfftComplex*) q2D->descriptor->data[0],rec_slice_d,gxyz_d,nx,ny,nz,bl,bf,bu,nxb,nyb,nzb,ng); hipMemcpy(rec_slice_h,rec_slice_d,ng*sizeof(float),hipMemcpyDefault); rec_slice_to_data(rec_slice_h,rec_data_h,ng,it); } write_3dfile_from_1darray("./output/qshot2.bin",rec_data_h,ng,1,nt); Free_wavelet(is); } Output(); Free_cufftXt(plan_input,result); // hipfftDestroy() - Destroy FFT plan result = hipfftDestroy(plan_input); if (result != HIPFFT_SUCCESS) { printf ("*Destroy failed: code\n"); return; } result = hipfftDestroy(plan_1d); if (result != HIPFFT_SUCCESS) { printf ("plan_1d Destroy failed: code\n"); return; } result = hipfftDestroy(plan_1d_r); if (result != HIPFFT_SUCCESS) { printf ("plan_1d_r Destroy failed: code\n"); return; } Free(); hipSetDevice(0); hipEventRecord(stop);/* record ending time */ hipEventSynchronize(stop); hipEventElapsedTime(&mstimer, start, stop); printf("finished: %f (s)\n",mstimer*1e-3); return (0); }
62e6f9c2bd391bb90b676a3e6e44e8e1e6bb4557.cu
#include <time.h> // includes, system #include <stdlib.h> #include <stdio.h> #include <string.h> #include <math.h> #define pai 3.14159265359 //CUFFT Header file #include <cufftXt.h> // includes, project #include <cufft.h> //#include <cutil_inline.h> //#include <shrQATest.h> #include "su.h" #include "segy.h" //#include "kexuan2.h" #include "Complex.h" #include <cuda_runtime.h> #include <helper_functions.h> #include <helper_cuda.h> #include <timer.h> #define BLOCK_SIZE_X 16 #define BLOCK_SIZE_Y 16 /* This is an seismic data modeling program. V1.0: 3D modeling on OSE method with multi-GPU; --Little Ke in Story 2019.12.20 */ // Complex data type typedef float2 Complex; #define batch 1 int nGPUs,M; int nx,ny,nz,nt,bu,bd,bl,br,bf,bb,nxb,nyb,nzb,sxbeg,sybeg,szbeg,jsx,jsy,jsz,gxbeg,gybeg,gzbeg,jgx,jgy,jgz,csdgather,ng,nshot,is,it,sx,sy,sz,boundary; float fm,dt,amp,dx,dy,dz,vmax,R,dtR,dkx,dky,dkz; int *sxyz_h, *gxyz_h,*sxyz_d, *gxyz_d; float *wavelet_h,*wavelet_d,*wavelethilbert_d; cufftComplex *cwavelet_d; float *wavelet_card,*wavelethilbert_card; cufftComplex *cwavelet_card; float *fdata_3dH,*fdata_3dD,*vb_3dH; cufftComplex *cdata_3dH,*cvb_3dH; cudaLibXtDesc *p1D,*p2D,*q1D,*q2D,*pcD,*vbD; float *att_blr,*att_bfb,*att_bud; float *att_host; float *rec_slice_d,*rec_slice_h,*rec_data_h; char parname[500]; char filename[500]; #include "ose_3d_kernel.cu" #include "ose_3d_lib.cu" int main() { system("rm *.bin"); system("rm ./wf/*.bin"); nx=101; ny=101; nz=101; boundary=25; bu=boundary; bd=boundary; bl=boundary; br=boundary; bf=boundary; bb=boundary; nxb=bl+nx+br; nyb=bf+ny+bb; nzb=bu+nz+bd; nxb=judge_odd(&br,&nxb); nyb=judge_odd(&bb,&nyb); nzb=judge_odd(&bd,&nzb); printf("nxbnew=%d brnew=%d\n",nxb,br); nt=300; fm=40.0; dt=1.0; dt=dt*0.001; amp=100.0; ng=nx*ny; nshot=1; sxbeg=nx/2; sybeg=ny/2; szbeg=0; gxbeg=0; gybeg=0; gzbeg=0; jsx=1; jsy=1; jsz=1; jgx=1; jgy=1; jgz=1; csdgather=0; nGPUs=2; dx=10.0; dy=10.0; dz=10.0; dkx=2*pai/dx/(float)nxb; dky=2*pai/dy/(float)nyb; dkz=2*pai/dz/(float)nzb; printf("dkx=%f dky=%f dkz=%f\n",dkx,dky,dkz); sprintf(parname,"./output/partest.txt"); // set the filename of orignal rtm result cudaSetDevice(0); float mstimer; cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start);/* record starting time */ dim3 dimBlock2D(BLOCK_SIZE_X,BLOCK_SIZE_Y); dim3 dimGrid3D((nxb+dimBlock2D.x-1)/dimBlock2D.x,(nyb+dimBlock2D.y-1)/dimBlock2D.y,nzb); dim3 dimBlock_nt(BLOCK_SIZE_X,1); dim3 dimGrid_nt((nt+dimBlock_nt.x-1)/dimBlock_nt.x,1); dim3 dimGrid3D_half_before_tran((nxb+1+dimBlock2D.x-1)/dimBlock2D.x,(nyb+dimBlock2D.y-1)/dimBlock2D.y,nzb/2); dim3 dimGrid3D_half_after_tran((nxb+1+dimBlock2D.x-1)/dimBlock2D.x,(nyb/2+dimBlock2D.y-1)/dimBlock2D.y,nzb); Alloc(); sg_init_host(sxyz_h,sxbeg,sybeg,szbeg,jsx,jsy,jsz,nshot,nx,ny); sg_init_host(gxyz_h,sxbeg,sybeg,szbeg,jsx,jsy,jsz,nshot,nx,ny); sg_init_device<<<1,1>>>(gxyz_d,gxbeg,gybeg,gzbeg,jgx,jgy,jgz,ng,nx,ny); //There is a potential BUG here, when receiver is put on non GPU0 // init_fdata_3D(vb_3dH,nxb,nyb,nzb,3500.0); // init_cdata_r_3D(cvb_3dH,nxb,nyb,nzb,3500.0); sprintf(filename,"./input/v3d_nx101_ny101_nz101.bin"); input_file_xyz_boundary(filename,vb_3dH,nx,ny,nz,bl,bf,bu,nxb,nyb,nzb); add_pml_layers_v_h(vb_3dH,nx,ny,nz,bl,bf,bu,nxb,nyb,nzb); data_R2CR(vb_3dH,cvb_3dH,nxb,nyb,nzb); Alloc_mulGPU(); generate_wavelet_gpu<<<1,1>>>(wavelet_d,fm,nt,dt,amp); cudaMemcpy(wavelet_h,wavelet_d,nt*sizeof(float),cudaMemcpyDefault); write_1dfile_from_1darray("./temp/ricker.bin",wavelet_h,nt); cufftHandle plan_1d; cufftHandle plan_1d_r; checkCudaErrors(cufftPlan1d(&plan_1d,nt,CUFFT_R2C,1)); checkCudaErrors(cufftPlan1d(&plan_1d_r,nt,CUFFT_C2R,1)); checkCudaErrors(cufftExecR2C(plan_1d,wavelet_d,cwavelet_d)); hilbert_1d<<<dimGrid_nt,dimBlock_nt>>>(cwavelet_d,nt); checkCudaErrors(cufftExecC2R(plan_1d_r,cwavelet_d,wavelethilbert_d)); scale_wavelet<<<dimGrid_nt,dimBlock_nt>>>(wavelethilbert_d,nt); cudaMemcpy(wavelet_h,wavelethilbert_d,nt*sizeof(float),cudaMemcpyDefault); write_1dfile_from_1darray("./temp/ricker_hil.bin",wavelet_h,nt); for(int i=0;i<nt;i++) { printf("rickerhilbert[%d]=%f\n",i,wavelet_h[i]); } vmax=find_max(vb_3dH,nxb*nyb*nzb); printf("vmax=%f\n",vmax); R=vmax*PI*sqrt(pow(1.0/dx,2.0)+pow(1.0/dy,2.0)+pow(1.0/dz,2.0)); printf("R=%f \n",R); dtR=dt*R; M=(int)dtR+6; printf("dtR=%f M=%d\n",dtR,M); R2C_3D_CPU(cdata_3dH,fdata_3dH,nxb,nyb,nzb,TRUE); // Demonstrate how to use CUFFT to perform 3-d FFTs using 2 GPUs // cufftCreate() - Create an empty plan cufftHandle plan_input; cufftResult result; result = cufftCreate(&plan_input); if (result != CUFFT_SUCCESS) { printf ("*Create failed\n"); return; } // cufftXtSetGPUs() - Define which GPUs to use int whichGPUs[nGPUs],iGPU; for(iGPU=0;iGPU<nGPUs;iGPU++) { whichGPUs[iGPU] = iGPU; } result = cufftXtSetGPUs (plan_input, nGPUs, whichGPUs); if (result != CUFFT_SUCCESS) { printf ("*XtSetGPUs failed\n"); return; } // Initialize FFT input data size_t worksize[nGPUs]; // cufftMakePlan3d() - Create the plan result = cufftMakePlan3d (plan_input, nzb, nyb, nxb, CUFFT_C2C, worksize); if (result != CUFFT_SUCCESS) { printf ("*MakePlan* failed\n"); return; } // cufftXtMalloc() - Malloc data on multiple GPUs Alloc_cufftXt(plan_input,result); // cufftXtMemcpy() - Copy data from host to multiple GPUs result = cufftXtMemcpy (plan_input, p1D,cdata_3dH, CUFFT_COPY_HOST_TO_DEVICE); if (result != CUFFT_SUCCESS) { printf ("*XtMemcpy failed\n"); return; } result = cufftXtMemcpy (plan_input, vbD,cvb_3dH, CUFFT_COPY_HOST_TO_DEVICE); if (result != CUFFT_SUCCESS) { printf ("*XtMemcpy failed\n"); return; } // cufftXtExecDescriptorC2C() - Execute FFT on multiple GPUs for(is=0;is<nshot;is++) { // Pad Zero Values For Wavefield Data Xtdata_value(p1D,p2D,q1D,q2D,pcD,nxb,nyb,nzb,0.0); Alloc_wavelet(is,plan_1d,plan_1d_r); for(it=0;it<nt;it++) { // if(it<80) // { Xt_add_wavelet_sxyz(p2D,wavelet_card,sxyz_h,is); Xt_add_wavelet_sxyz(q2D,wavelethilbert_card,sxyz_h,is); // } // if(it%50==0) printf("is=%d it=%d/%d time=%f(s)/%f(s)\n",is,it,nt,dt*it,dt*nt); wavefield_extro_ori2(plan_input,result); /* sprintf(filename,"./wf/q-%d-single.bin",it); result = cufftXtMemcpy (plan_input, cdata_3dH,q2D, CUFFT_COPY_DEVICE_TO_HOST); if (result != CUFFT_SUCCESS) { printf ("*XtMemcpy failed\n"); return; } write_3dfile_from_1darray_ci(filename,cdata_3dH,nxb,nyb,nzb); sprintf(filename,"./wf/p-%d-single.bin",it); result = cufftXtMemcpy (plan_input, cdata_3dH,p2D, CUFFT_COPY_DEVICE_TO_HOST); if (result != CUFFT_SUCCESS) { printf ("*XtMemcpy failed\n"); return; } write_3dfile_from_1darray_ci(filename,cdata_3dH,nxb,nyb,nzb); */ cudaSetDevice(0); //There is a potential BUG here, when receiver is put on non GPU0 rec_shotdata_3d<<<dimGrid3D_half_before_tran,dimBlock2D>>>((cufftComplex*) q2D->descriptor->data[0],rec_slice_d,gxyz_d,nx,ny,nz,bl,bf,bu,nxb,nyb,nzb,ng); cudaMemcpy(rec_slice_h,rec_slice_d,ng*sizeof(float),cudaMemcpyDefault); rec_slice_to_data(rec_slice_h,rec_data_h,ng,it); } write_3dfile_from_1darray("./output/qshot2.bin",rec_data_h,ng,1,nt); Free_wavelet(is); } Output(); Free_cufftXt(plan_input,result); // cufftDestroy() - Destroy FFT plan result = cufftDestroy(plan_input); if (result != CUFFT_SUCCESS) { printf ("*Destroy failed: code\n"); return; } result = cufftDestroy(plan_1d); if (result != CUFFT_SUCCESS) { printf ("plan_1d Destroy failed: code\n"); return; } result = cufftDestroy(plan_1d_r); if (result != CUFFT_SUCCESS) { printf ("plan_1d_r Destroy failed: code\n"); return; } Free(); cudaSetDevice(0); cudaEventRecord(stop);/* record ending time */ cudaEventSynchronize(stop); cudaEventElapsedTime(&mstimer, start, stop); printf("finished: %f (s)\n",mstimer*1e-3); return (0); }
58d043b647c800e33d684c39324d34d7736d92ca.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "ATen/hip/HIPContext.h" #include <ATen/ATen.h> #include <torch/torch.h> #include <torch/types.h> namespace at { namespace native { namespace { template <typename scalar_t> __global__ void revert_varlen_kernel(scalar_t *in, scalar_t *out, int64_t *offsets, int feature_size, int n, scalar_t pad_value) { const int offset = static_cast<int>(offsets[blockIdx.x]); for (int i = threadIdx.x; i < feature_size; i += blockDim.x) { out[blockIdx.x * feature_size + i] = (offset >= 0) ? in[offset + i] : pad_value; } } } // namespace void checkLongTensor(const Tensor &tensor) { TORCH_CHECK(tensor.dim() == 1 && tensor.device() == at::kCPU && tensor.scalar_type() == at::kLong, "'lengths' argument should be a 1D CPU int64 tensor"); } at::Tensor revert_varlen_tensor(const Tensor &_input, const Tensor &_offsets) { auto input = _input.contiguous(); auto output = torch::empty_like(input); int64_t seq_length = input.size(0); int64_t batch_size = input.size(1); assert(_offsets.dim() == 1); assert(_offsets.is_cuda()); assert(_offsets.scalar_type() == at::kLong); TORCH_CHECK(_offsets.dim() == 1 && _offsets.is_cuda() && _offsets.scalar_type() == at::kLong, "'offsets' argument should be a 1D CUDA int64 tensor"); TORCH_CHECK(_offsets.numel() == batch_size * seq_length, "Expected `len(offsets) = batch_size * seq_length`, but got ", _offsets.numel(), " (batch_size=", batch_size, ", seq_length=", seq_length, ")"); int64_t feature_size = 1; for (int64_t dim = 2; dim < input.ndimension(); dim++) { feature_size *= input.size(dim); } int numThreads = 512; int numBlocks = batch_size * seq_length; AT_DISPATCH_FLOATING_TYPES_AND_HALF( input.scalar_type(), "revert_varlen", [&] { hipLaunchKernelGGL(( revert_varlen_kernel), dim3(numBlocks), dim3(numThreads), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), input.data_ptr<scalar_t>(), output.data_ptr<scalar_t>(), _offsets.data_ptr<int64_t>(), feature_size, batch_size * seq_length, static_cast<scalar_t>(0)); }); return output; } at::Tensor get_offsets(const Tensor &_input, const Tensor &_lengths) { at::native::checkLongTensor(_lengths); auto input = _input.contiguous(); int64_t seq_length = input.size(0); int64_t batch_size = input.size(1); int64_t *lengths = _lengths.data_ptr<int64_t>(); TORCH_CHECK(_lengths.size(0) == batch_size, "Expected `len(lengths)` to be equal to batch_size, but got ", _lengths.size(0), " (batch_size=", batch_size, ")"); TORCH_CHECK( (lengths[batch_size - 1] > 0), "Length of all samples has to be greater than 0, but found an element " "in 'lengths' that is <= 0"); std::vector<int64_t> offsets; offsets.reserve(batch_size * seq_length); int64_t feature_size = 1; for (int64_t dim = 2; dim < input.ndimension(); dim++) { feature_size *= input.size(dim); } for (int64_t t = 0; t < seq_length; t++) { for (int64_t i = 0; i < batch_size; i++) { if (lengths[i] > t) { offsets.push_back(i * feature_size + (lengths[i] - t - 1) * batch_size * feature_size); } else { offsets.push_back(-1); } } } auto options = at::TensorOptions().device(at::kCUDA).dtype(at::kLong); auto offsets_tensor = at::from_blob(offsets.data(), batch_size * seq_length, at::kLong) .to(options, /* non_blocking */ true, /*copy*/ false); return offsets_tensor; } } // namespace native } // namespace at
58d043b647c800e33d684c39324d34d7736d92ca.cu
#include "ATen/cuda/CUDAContext.h" #include <ATen/ATen.h> #include <torch/torch.h> #include <torch/types.h> namespace at { namespace native { namespace { template <typename scalar_t> __global__ void revert_varlen_kernel(scalar_t *in, scalar_t *out, int64_t *offsets, int feature_size, int n, scalar_t pad_value) { const int offset = static_cast<int>(offsets[blockIdx.x]); for (int i = threadIdx.x; i < feature_size; i += blockDim.x) { out[blockIdx.x * feature_size + i] = (offset >= 0) ? in[offset + i] : pad_value; } } } // namespace void checkLongTensor(const Tensor &tensor) { TORCH_CHECK(tensor.dim() == 1 && tensor.device() == at::kCPU && tensor.scalar_type() == at::kLong, "'lengths' argument should be a 1D CPU int64 tensor"); } at::Tensor revert_varlen_tensor(const Tensor &_input, const Tensor &_offsets) { auto input = _input.contiguous(); auto output = torch::empty_like(input); int64_t seq_length = input.size(0); int64_t batch_size = input.size(1); assert(_offsets.dim() == 1); assert(_offsets.is_cuda()); assert(_offsets.scalar_type() == at::kLong); TORCH_CHECK(_offsets.dim() == 1 && _offsets.is_cuda() && _offsets.scalar_type() == at::kLong, "'offsets' argument should be a 1D CUDA int64 tensor"); TORCH_CHECK(_offsets.numel() == batch_size * seq_length, "Expected `len(offsets) = batch_size * seq_length`, but got ", _offsets.numel(), " (batch_size=", batch_size, ", seq_length=", seq_length, ")"); int64_t feature_size = 1; for (int64_t dim = 2; dim < input.ndimension(); dim++) { feature_size *= input.size(dim); } int numThreads = 512; int numBlocks = batch_size * seq_length; AT_DISPATCH_FLOATING_TYPES_AND_HALF( input.scalar_type(), "revert_varlen", [&] { revert_varlen_kernel<<<numBlocks, numThreads, 0, at::cuda::getCurrentCUDAStream()>>>( input.data_ptr<scalar_t>(), output.data_ptr<scalar_t>(), _offsets.data_ptr<int64_t>(), feature_size, batch_size * seq_length, static_cast<scalar_t>(0)); }); return output; } at::Tensor get_offsets(const Tensor &_input, const Tensor &_lengths) { at::native::checkLongTensor(_lengths); auto input = _input.contiguous(); int64_t seq_length = input.size(0); int64_t batch_size = input.size(1); int64_t *lengths = _lengths.data_ptr<int64_t>(); TORCH_CHECK(_lengths.size(0) == batch_size, "Expected `len(lengths)` to be equal to batch_size, but got ", _lengths.size(0), " (batch_size=", batch_size, ")"); TORCH_CHECK( (lengths[batch_size - 1] > 0), "Length of all samples has to be greater than 0, but found an element " "in 'lengths' that is <= 0"); std::vector<int64_t> offsets; offsets.reserve(batch_size * seq_length); int64_t feature_size = 1; for (int64_t dim = 2; dim < input.ndimension(); dim++) { feature_size *= input.size(dim); } for (int64_t t = 0; t < seq_length; t++) { for (int64_t i = 0; i < batch_size; i++) { if (lengths[i] > t) { offsets.push_back(i * feature_size + (lengths[i] - t - 1) * batch_size * feature_size); } else { offsets.push_back(-1); } } } auto options = at::TensorOptions().device(at::kCUDA).dtype(at::kLong); auto offsets_tensor = at::from_blob(offsets.data(), batch_size * seq_length, at::kLong) .to(options, /* non_blocking */ true, /*copy*/ false); return offsets_tensor; } } // namespace native } // namespace at
408c7769f400a389f403fe50a7e5b44ae3ca3ae8.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" __global__ void complexmult_kernal(float *afft, const float *bfft, int totaltc) { const uint ridx = 2*(threadIdx.x + (blockIdx.x + blockIdx.y*gridDim.x)*MAX_THREADS); if(ridx < totaltc){ const uint iidx = ridx + 1; //maybe use float2 to improve coalessing.... float afftr = afft[ridx]; float affti = afft[iidx]; float bfftr = bfft[ridx]; float bffti = bfft[iidx]; afft[ridx] = afftr*bfftr - affti*bffti; //real portion afft[iidx] = affti*bfftr + afftr*bffti; //imaginary portion } }
408c7769f400a389f403fe50a7e5b44ae3ca3ae8.cu
#include "includes.h" __global__ void complexmult_kernal(float *afft, const float *bfft, int totaltc) { const uint ridx = 2*(threadIdx.x + (blockIdx.x + blockIdx.y*gridDim.x)*MAX_THREADS); if(ridx < totaltc){ const uint iidx = ridx + 1; //maybe use float2 to improve coalessing.... float afftr = afft[ridx]; float affti = afft[iidx]; float bfftr = bfft[ridx]; float bffti = bfft[iidx]; afft[ridx] = afftr*bfftr - affti*bffti; //real portion afft[iidx] = affti*bfftr + afftr*bffti; //imaginary portion } }
6f3af327b614c2a51fab020801c1f9bdaf5b89aa.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* Copyright (c) 2013-2015, Gregory P. Meyer University of Illinois Board of Trustees All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither the name of the copyright holder(s) nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include <dip/common/error.h> #include <dip/common/types.h> #define FILTER_HALF_WIDTH 3 #define BLOCK_WIDTH 16 namespace dip { __global__ void BilateralFilter(float sigma_d, float sigma_r, int width, int height, const Depth *depth, Depth *filtered_depth) { // Allocate Shared Memory __shared__ Depth ds[BLOCK_WIDTH][BLOCK_WIDTH]; // Get Block and Thread Id int bx = blockIdx.x; int by = blockIdx.y; int tx = threadIdx.x; int ty = threadIdx.y; // Calculate Row & Column int col = tx + bx * BLOCK_WIDTH; int row = ty + by * BLOCK_WIDTH; // Cooperative Load of the Tile if ((col < width) && (row < height)) { ds[ty][tx] = depth[col + row * width]; } else { ds[ty][tx] = 0; } // Sync Threads in Block __syncthreads(); // Perform the Bilateral Filter if ((col < width) && (row < height)) { float center_depth = ds[ty][tx]; float h = 0.0f, k = 0.0f; if (center_depth > 0) { for (int dy = -FILTER_HALF_WIDTH; dy <= FILTER_HALF_WIDTH; dy++) { for (int dx = -FILTER_HALF_WIDTH; dx <= FILTER_HALF_WIDTH; dx++) { int x = col + dx; int y = row + dy; if ((x >= 0) && (x < width) && (y >= 0) && (y < height)) { int i = tx + dx; int j = ty + dy; float current_depth; if ((i >= 0) && (i < BLOCK_WIDTH) && (j >= 0) && (j < BLOCK_WIDTH)) current_depth = ds[j][i]; else current_depth = depth[x + y * width]; if (current_depth > 0) { float d = static_cast<float>((dx * dx) + (dy * dy)); float r = static_cast<float>((current_depth - center_depth) * (current_depth - center_depth)); float weight = __expf(-0.5f * (d * sigma_d + r * sigma_r)); h += current_depth * weight; k += weight; } } } } } if (k > 0.0f) filtered_depth[col + row * width] = h / k; else filtered_depth[col + row * width] = 0; } } void BilateralKernel(float sigma_d, float sigma_r, int width, int height, const Depth *depth, Depth *filtered_depth) { // Launch Bilateral Filter Kernel int grid_width = (width + (BLOCK_WIDTH - 1)) / BLOCK_WIDTH; int grid_height = (height + (BLOCK_WIDTH - 1)) / BLOCK_WIDTH; dim3 grid_dim(grid_width, grid_height, 1); dim3 block_dim(BLOCK_WIDTH, BLOCK_WIDTH, 1); hipLaunchKernelGGL(( BilateralFilter), dim3(grid_dim), dim3(block_dim), 0, 0, sigma_d, sigma_r, width, height, depth, filtered_depth); CUDA_ERROR_CHECK(hipDeviceSynchronize()); } } // namespace dip
6f3af327b614c2a51fab020801c1f9bdaf5b89aa.cu
/* Copyright (c) 2013-2015, Gregory P. Meyer University of Illinois Board of Trustees All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither the name of the copyright holder(s) nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include <dip/common/error.h> #include <dip/common/types.h> #define FILTER_HALF_WIDTH 3 #define BLOCK_WIDTH 16 namespace dip { __global__ void BilateralFilter(float sigma_d, float sigma_r, int width, int height, const Depth *depth, Depth *filtered_depth) { // Allocate Shared Memory __shared__ Depth ds[BLOCK_WIDTH][BLOCK_WIDTH]; // Get Block and Thread Id int bx = blockIdx.x; int by = blockIdx.y; int tx = threadIdx.x; int ty = threadIdx.y; // Calculate Row & Column int col = tx + bx * BLOCK_WIDTH; int row = ty + by * BLOCK_WIDTH; // Cooperative Load of the Tile if ((col < width) && (row < height)) { ds[ty][tx] = depth[col + row * width]; } else { ds[ty][tx] = 0; } // Sync Threads in Block __syncthreads(); // Perform the Bilateral Filter if ((col < width) && (row < height)) { float center_depth = ds[ty][tx]; float h = 0.0f, k = 0.0f; if (center_depth > 0) { for (int dy = -FILTER_HALF_WIDTH; dy <= FILTER_HALF_WIDTH; dy++) { for (int dx = -FILTER_HALF_WIDTH; dx <= FILTER_HALF_WIDTH; dx++) { int x = col + dx; int y = row + dy; if ((x >= 0) && (x < width) && (y >= 0) && (y < height)) { int i = tx + dx; int j = ty + dy; float current_depth; if ((i >= 0) && (i < BLOCK_WIDTH) && (j >= 0) && (j < BLOCK_WIDTH)) current_depth = ds[j][i]; else current_depth = depth[x + y * width]; if (current_depth > 0) { float d = static_cast<float>((dx * dx) + (dy * dy)); float r = static_cast<float>((current_depth - center_depth) * (current_depth - center_depth)); float weight = __expf(-0.5f * (d * sigma_d + r * sigma_r)); h += current_depth * weight; k += weight; } } } } } if (k > 0.0f) filtered_depth[col + row * width] = h / k; else filtered_depth[col + row * width] = 0; } } void BilateralKernel(float sigma_d, float sigma_r, int width, int height, const Depth *depth, Depth *filtered_depth) { // Launch Bilateral Filter Kernel int grid_width = (width + (BLOCK_WIDTH - 1)) / BLOCK_WIDTH; int grid_height = (height + (BLOCK_WIDTH - 1)) / BLOCK_WIDTH; dim3 grid_dim(grid_width, grid_height, 1); dim3 block_dim(BLOCK_WIDTH, BLOCK_WIDTH, 1); BilateralFilter<<<grid_dim, block_dim>>>(sigma_d, sigma_r, width, height, depth, filtered_depth); CUDA_ERROR_CHECK(cudaDeviceSynchronize()); } } // namespace dip
e9be5c124394abca7388b2dc7f223d1aaddcc187.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" //Udacity HW 4 //Radix Sorting #include "utils.h" #include <thrust/host_vector.h> /* Red Eye Removal =============== For this assignment we are implementing red eye removal. This is accomplished by first creating a score for every pixel that tells us how likely it is to be a red eye pixel. We have already done this for you - you are receiving the scores and need to sort them in ascending order so that we know which pixels to alter to remove the red eye. Note: ascending order == smallest to largest Each score is associated with a position, when you sort the scores, you must also move the positions accordingly. Implementing Parallel Radix Sort with CUDA ========================================== The basic idea is to construct a histogram on each pass of how many of each "digit" there are. Then we scan this histogram so that we know where to put the output of each digit. For example, the first 1 must come after all the 0s so we have to know how many 0s there are to be able to start moving 1s into the correct position. 1) Histogram of the number of occurrences of each digit 2) Exclusive Prefix Sum of Histogram 3) Determine relative offset of each digit For example [0 0 1 1 0 0 1] -> [0 1 0 1 2 3 2] 4) Combine the results of steps 2 & 3 to determine the final output location for each element and move it there LSB Radix sort is an out-of-place sort and you will need to ping-pong values between the input and output buffers we have provided. Make sure the final sorted results end up in the output buffer! Hint: You may need to do a copy at the end. */ const int K = 1024; __global__ void bit_digit_histo(unsigned int * d_bins, unsigned int *d_digit_mask, unsigned int * const d_inputVals, int numElems, int which_digit) { int myId = blockIdx.x * blockDim.x + threadIdx.x; if (myId < numElems) { int index = (d_inputVals[myId] & (1u << which_digit)) >> which_digit; atomicAdd(&(d_bins[index]), 1); d_digit_mask[myId] = index; } } // work on [start.. start+K], then we already have a startValue prefixSum to begin with // work on summing digit __global__ void scan_hillis(unsigned int * d_out, unsigned int * const d_digits, int start_index, int numElems, unsigned int digit, int zero_value) { assert( blockIdx.x == 0); // only work for one threadblock unsigned int thIdx = blockIdx.x * blockDim.x + threadIdx.x + start_index; if (thIdx >= numElems) { d_out[thIdx] = 0; } else { d_out[thIdx] = (d_digits[thIdx] == digit) ? 1 : 0; if (threadIdx.x == 0) { d_out[thIdx] += start_index > 0 ? d_out[start_index-1] : zero_value; // from last sequential run } } __syncthreads(); for (int d = 0, step = 1; d < 10; ++d, step *= 2) { int temp; if (threadIdx.x >= step) { temp = d_out[thIdx] + d_out[thIdx - step]; } __syncthreads(); if (threadIdx.x >= step) { d_out[thIdx] = temp; } __syncthreads(); } } __global__ void move(unsigned int * const d_input_vals, unsigned int * const d_input_pos, unsigned int * const d_output_vals, unsigned int * const d_output_pos, unsigned int * d_digits, unsigned int * d_position, int numElems, unsigned int digit) { unsigned int thIdx = blockIdx.x * blockDim.x + threadIdx.x; if (thIdx < numElems && d_digits[thIdx] == digit) { int where = thIdx == 0 ? 0 : d_position[thIdx-1]; d_output_vals[where] = d_input_vals[thIdx]; d_output_pos[where] = d_input_pos[thIdx]; } } void your_sort(unsigned int* const d_inputVals, unsigned int* const d_inputPos, unsigned int* const d_outputVals, unsigned int* const d_outputPos, const size_t numElems) { //TODO //PUT YOUR SORT HERE unsigned int* d_bins; checkCudaErrors(hipMalloc((void**)&d_bins, sizeof(unsigned int)*2)); int over_size = int((numElems + (K - 1))/ K) * K; unsigned int *d_prefix_sum, *d_digits; // the desired location checkCudaErrors(hipMalloc((void**)&d_prefix_sum, sizeof(unsigned int)*over_size)); // whether that bit is 1 or 0 checkCudaErrors(hipMalloc((void**)&d_digits, sizeof(unsigned int)*over_size)); unsigned int * d_input_vals = d_outputVals; unsigned int * d_input_pos = d_outputPos; unsigned int * d_output_vals = d_inputVals; unsigned int * d_output_pos = d_inputPos; for (int which_bit = 0; which_bit < 32; ++which_bit) { unsigned int * temp = d_input_vals; d_input_vals = d_output_vals; d_output_vals = temp; temp = d_input_pos; d_input_pos = d_output_pos; d_output_pos = temp; // bit histogram checkCudaErrors(hipMemset(d_bins, 0, sizeof(unsigned int)*2)); hipDeviceSynchronize(); checkCudaErrors(hipGetLastError()); hipLaunchKernelGGL(( bit_digit_histo), dim3(over_size/K), dim3(K), 0, 0, d_bins, d_digits, d_input_vals, numElems, which_bit); hipDeviceSynchronize(); checkCudaErrors(hipGetLastError()); unsigned int h_bins[2]; checkCudaErrors(hipMemcpy(h_bins, d_bins, sizeof(unsigned int)*2, hipMemcpyDeviceToHost)); hipDeviceSynchronize(); checkCudaErrors(hipGetLastError()); std::cout << "Iteration " << which_bit << " histo " << h_bins[0] << ", " << h_bins[1] << std::endl; // location for (int digit = 0, zero_value = 0; digit <= 1; ++digit, zero_value = h_bins[0]) { for (int start = 0; start < numElems; start += K) { hipLaunchKernelGGL(( scan_hillis), dim3(1), dim3(K), 0, 0, d_prefix_sum, d_digits, start, numElems, digit, zero_value); hipDeviceSynchronize(); checkCudaErrors(hipGetLastError()); } hipLaunchKernelGGL(( move), dim3(over_size/K), dim3(K), 0, 0, d_input_vals, d_input_pos, d_output_vals, d_output_pos, d_digits, d_prefix_sum, numElems, digit); hipDeviceSynchronize(); checkCudaErrors(hipGetLastError()); } } checkCudaErrors(hipMemcpy(d_outputVals, d_inputVals, sizeof(int)*numElems, hipMemcpyDeviceToDevice)); checkCudaErrors(hipMemcpy(d_outputPos, d_inputPos, sizeof(int)*numElems, hipMemcpyDeviceToDevice)); checkCudaErrors(hipFree(d_bins)); checkCudaErrors(hipFree(d_prefix_sum)); checkCudaErrors(hipFree(d_digits)); }
e9be5c124394abca7388b2dc7f223d1aaddcc187.cu
//Udacity HW 4 //Radix Sorting #include "utils.h" #include <thrust/host_vector.h> /* Red Eye Removal =============== For this assignment we are implementing red eye removal. This is accomplished by first creating a score for every pixel that tells us how likely it is to be a red eye pixel. We have already done this for you - you are receiving the scores and need to sort them in ascending order so that we know which pixels to alter to remove the red eye. Note: ascending order == smallest to largest Each score is associated with a position, when you sort the scores, you must also move the positions accordingly. Implementing Parallel Radix Sort with CUDA ========================================== The basic idea is to construct a histogram on each pass of how many of each "digit" there are. Then we scan this histogram so that we know where to put the output of each digit. For example, the first 1 must come after all the 0s so we have to know how many 0s there are to be able to start moving 1s into the correct position. 1) Histogram of the number of occurrences of each digit 2) Exclusive Prefix Sum of Histogram 3) Determine relative offset of each digit For example [0 0 1 1 0 0 1] -> [0 1 0 1 2 3 2] 4) Combine the results of steps 2 & 3 to determine the final output location for each element and move it there LSB Radix sort is an out-of-place sort and you will need to ping-pong values between the input and output buffers we have provided. Make sure the final sorted results end up in the output buffer! Hint: You may need to do a copy at the end. */ const int K = 1024; __global__ void bit_digit_histo(unsigned int * d_bins, unsigned int *d_digit_mask, unsigned int * const d_inputVals, int numElems, int which_digit) { int myId = blockIdx.x * blockDim.x + threadIdx.x; if (myId < numElems) { int index = (d_inputVals[myId] & (1u << which_digit)) >> which_digit; atomicAdd(&(d_bins[index]), 1); d_digit_mask[myId] = index; } } // work on [start.. start+K], then we already have a startValue prefixSum to begin with // work on summing digit __global__ void scan_hillis(unsigned int * d_out, unsigned int * const d_digits, int start_index, int numElems, unsigned int digit, int zero_value) { assert( blockIdx.x == 0); // only work for one threadblock unsigned int thIdx = blockIdx.x * blockDim.x + threadIdx.x + start_index; if (thIdx >= numElems) { d_out[thIdx] = 0; } else { d_out[thIdx] = (d_digits[thIdx] == digit) ? 1 : 0; if (threadIdx.x == 0) { d_out[thIdx] += start_index > 0 ? d_out[start_index-1] : zero_value; // from last sequential run } } __syncthreads(); for (int d = 0, step = 1; d < 10; ++d, step *= 2) { int temp; if (threadIdx.x >= step) { temp = d_out[thIdx] + d_out[thIdx - step]; } __syncthreads(); if (threadIdx.x >= step) { d_out[thIdx] = temp; } __syncthreads(); } } __global__ void move(unsigned int * const d_input_vals, unsigned int * const d_input_pos, unsigned int * const d_output_vals, unsigned int * const d_output_pos, unsigned int * d_digits, unsigned int * d_position, int numElems, unsigned int digit) { unsigned int thIdx = blockIdx.x * blockDim.x + threadIdx.x; if (thIdx < numElems && d_digits[thIdx] == digit) { int where = thIdx == 0 ? 0 : d_position[thIdx-1]; d_output_vals[where] = d_input_vals[thIdx]; d_output_pos[where] = d_input_pos[thIdx]; } } void your_sort(unsigned int* const d_inputVals, unsigned int* const d_inputPos, unsigned int* const d_outputVals, unsigned int* const d_outputPos, const size_t numElems) { //TODO //PUT YOUR SORT HERE unsigned int* d_bins; checkCudaErrors(cudaMalloc((void**)&d_bins, sizeof(unsigned int)*2)); int over_size = int((numElems + (K - 1))/ K) * K; unsigned int *d_prefix_sum, *d_digits; // the desired location checkCudaErrors(cudaMalloc((void**)&d_prefix_sum, sizeof(unsigned int)*over_size)); // whether that bit is 1 or 0 checkCudaErrors(cudaMalloc((void**)&d_digits, sizeof(unsigned int)*over_size)); unsigned int * d_input_vals = d_outputVals; unsigned int * d_input_pos = d_outputPos; unsigned int * d_output_vals = d_inputVals; unsigned int * d_output_pos = d_inputPos; for (int which_bit = 0; which_bit < 32; ++which_bit) { unsigned int * temp = d_input_vals; d_input_vals = d_output_vals; d_output_vals = temp; temp = d_input_pos; d_input_pos = d_output_pos; d_output_pos = temp; // bit histogram checkCudaErrors(cudaMemset(d_bins, 0, sizeof(unsigned int)*2)); cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError()); bit_digit_histo<<<over_size/K, K>>>(d_bins, d_digits, d_input_vals, numElems, which_bit); cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError()); unsigned int h_bins[2]; checkCudaErrors(cudaMemcpy(h_bins, d_bins, sizeof(unsigned int)*2, cudaMemcpyDeviceToHost)); cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError()); std::cout << "Iteration " << which_bit << " histo " << h_bins[0] << ", " << h_bins[1] << std::endl; // location for (int digit = 0, zero_value = 0; digit <= 1; ++digit, zero_value = h_bins[0]) { for (int start = 0; start < numElems; start += K) { scan_hillis<<<1, K>>>(d_prefix_sum, d_digits, start, numElems, digit, zero_value); cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError()); } move<<<over_size/K, K>>>(d_input_vals, d_input_pos, d_output_vals, d_output_pos, d_digits, d_prefix_sum, numElems, digit); cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError()); } } checkCudaErrors(cudaMemcpy(d_outputVals, d_inputVals, sizeof(int)*numElems, cudaMemcpyDeviceToDevice)); checkCudaErrors(cudaMemcpy(d_outputPos, d_inputPos, sizeof(int)*numElems, cudaMemcpyDeviceToDevice)); checkCudaErrors(cudaFree(d_bins)); checkCudaErrors(cudaFree(d_prefix_sum)); checkCudaErrors(cudaFree(d_digits)); }
cu_backprojection.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "cu_backprojection.h" __host__ void host2_backprojection(float *d_img, float *d_proj, float *float_para, int *int_para) { } __host__ void host_backprojection(float *d_img, float *d_proj, float angle,float SO, float SD, float da, int na, float ai, float db, int nb, float bi, int nx, int ny, int nz) { hipChannelFormatDesc channelDesc = hipCreateChannelDesc(32, 0, 0, 0, hipChannelFormatKindFloat); struct hipExtent extent = make_hipExtent(na, nb, 1); hipArray *array_proj; hipMalloc3DArray(&array_proj, &channelDesc, extent); hipMemcpy3DParms copyParams = {0}; hipPitchedPtr dp_proj = make_hipPitchedPtr((void*) d_proj, na * sizeof(float), na, nb); copyParams.extent = extent; copyParams.kind = hipMemcpyDeviceToDevice; copyParams.srcPtr = dp_proj; copyParams.dstArray = array_proj; hipMemcpy3D(&copyParams); hipResourceDesc resDesc; hipTextureDesc texDesc; memset(&resDesc, 0, sizeof(resDesc)); resDesc.resType = hipResourceTypeArray; memset(&texDesc, 0, sizeof(texDesc)); texDesc.addressMode[0] = hipAddressModeClamp; texDesc.addressMode[1] = hipAddressModeClamp; texDesc.addressMode[2] = hipAddressModeClamp; texDesc.filterMode = hipFilterModeLinear; texDesc.readMode = hipReadModeElementType; texDesc.normalizedCoords = 0; resDesc.res.array.array = array_proj; hipTextureObject_t tex_proj = 0; // hipTextureObject_t tex_proj = host_create_texture_object(d_proj, nb, na, 1); hipCreateTextureObject(&tex_proj, &resDesc, &texDesc, NULL); const dim3 gridSize_img((nx + BLOCKSIZE_X - 1) / BLOCKSIZE_X, (ny + BLOCKSIZE_Y - 1) / BLOCKSIZE_Y, (nz + BLOCKSIZE_Z - 1) / BLOCKSIZE_Z); const dim3 blockSize(BLOCKSIZE_X, BLOCKSIZE_Y, BLOCKSIZE_Z); hipLaunchKernelGGL(( kernel_backprojection), dim3(gridSize_img), dim3(blockSize), 0, 0, d_img, tex_proj, angle, SO, SD, na, nb, da, db, ai, bi, nx, ny, nz); hipDeviceSynchronize(); hipFreeArray(array_proj); hipDestroyTextureObject(tex_proj); } __global__ void kernel_backprojection(float *img, hipTextureObject_t tex_proj, float angle, float SO, float SD, int na, int nb, float da, float db, float ai, float bi, int nx, int ny, int nz){ int ix = BLOCKSIZE_X * blockIdx.x + threadIdx.x; int iy = BLOCKSIZE_Y * blockIdx.y + threadIdx.y; int iz = BLOCKSIZE_Z * blockIdx.z + threadIdx.z; if (ix >= nx || iy >= ny || iz >= nz) return; int id = ix + iy * nx + iz * nx * ny; // angle += 3.141592653589793; img[id] = 0.0f; // float sphi = __sinf(angle); // float cphi = __cosf(angle); float sphi = __sinf(angle); float cphi = __cosf(angle); // float dd_voxel[3]; float xc, yc, zc; xc = (float)ix - nx / 2 + 0.5f; yc = (float)iy - ny / 2 + 0.5f; zc = (float)iz - nz / 2 + 0.5f; // voxel boundary coordinates float xll, yll, zll, xlr, ylr, zlr, xrl, yrl, zrl, xrr, yrr, zrr, xt, yt, zt, xb, yb, zb; // xll = +(xc - 0.5f) * cphi + (yc - 0.5f) * sphi; // yll = -(xc - 0.5f) * sphi + (yc - 0.5f) * cphi; // xrr = +(xc + 0.5f) * cphi + (yc + 0.5f) * sphi; // yrr = -(xc + 0.5f) * sphi + (yc + 0.5f) * cphi; // zll = zc; zrr = zc; // xrl = +(xc + 0.5f) * cphi + (yc - 0.5f) * sphi; // yrl = -(xc + 0.5f) * sphi + (yc - 0.5f) * cphi; // xlr = +(xc - 0.5f) * cphi + (yc + 0.5f) * sphi; // ylr = -(xc - 0.5f) * sphi + (yc + 0.5f) * cphi; // zrl = zc; zlr = zc; xll = +xc * cphi + yc * sphi - 0.5f; yll = -xc * sphi + yc * cphi - 0.5f; xrr = +xc * cphi + yc * sphi + 0.5f; yrr = -xc * sphi + yc * cphi + 0.5f; zll = zc; zrr = zc; xrl = +xc * cphi + yc * sphi + 0.5f; yrl = -xc * sphi + yc * cphi - 0.5f; xlr = +xc * cphi + yc * sphi - 0.5f; ylr = -xc * sphi + yc * cphi + 0.5f; zrl = zc; zlr = zc; xt = xc * cphi + yc * sphi; yt = -xc * sphi + yc * cphi; zt = zc + 0.5f; xb = xc * cphi + yc * sphi; yb = -xc * sphi + yc * cphi; zb = zc - 0.5f; // the coordinates of source and detector plane here are after rotation float ratio, all, bll, alr, blr, arl, brl, arr, brr, at, bt, ab, bb, a_max, a_min, b_max, b_min; // calculate a value for each boundary coordinates // the a and b here are all absolute positions from isocenter, which are on detector planes ratio = SD / (xll + SO); all = ratio * yll; bll = ratio * zll; ratio = SD / (xrr + SO); arr = ratio * yrr; brr = ratio * zrr; ratio = SD / (xlr + SO); alr = ratio * ylr; blr = ratio * zlr; ratio = SD / (xrl + SO); arl = ratio * yrl; brl = ratio * zrl; ratio = SD / (xt + SO); at = ratio * yt; bt = ratio * zt; ratio = SD / (xb + SO); ab = ratio * yb; bb = ratio * zb; // get the max and min values of all boundary projectors of voxel boundaries on detector plane // a_max = MAX4(al ,ar, at, ab); // a_min = MIN4(al ,ar, at, ab); // b_max = MAX4(bl ,br, bt, bb); // b_min = MIN4(bl ,br, bt, bb); a_max = MAX6(all ,arr, alr, arl, at, ab); a_min = MIN6(all ,arr, alr, arl, at, ab); b_max = MAX6(bll ,brr, blr, brl, bt, bb); b_min = MIN6(bll ,brr, blr, brl, bt, bb); // the related positions on detector plane from start points a_max = a_max / da - ai + 0.5f; // now they are the detector coordinates a_min = a_min / da - ai + 0.5f; b_max = b_max / db - bi + 0.5f; b_min = b_min / db - bi + 0.5f; int a_ind_max = (int)floorf(a_max); int a_ind_min = (int)floorf(a_min); int b_ind_max = (int)floorf(b_max); int b_ind_min = (int)floorf(b_min); // int a_ind_max = (int)floorf(a_max / da - ai); // int a_ind_min = (int)floorf(a_min / da - ai); // int b_ind_max = (int)floorf(b_max / db - bi); // int b_ind_min = (int)floorf(b_min / db - bi); float bin_bound_1, bin_bound_2, wa, wb; for (int ia = MAX(0, a_ind_min); ia < MIN(na, a_max); ia ++){ // bin_bound_1 = ((float)ia + ai) * da; // bin_bound_2 = ((float)ia + ai + 1.0f) * da; bin_bound_1 = ia + 0.0f; bin_bound_2 = ia + 1.0f; wa = MIN(bin_bound_2, a_max) - MAX(bin_bound_1, a_min);// wa /= a_max - a_min; for (int ib = MAX(0, b_ind_min); ib < MIN(nb, b_max); ib ++){ // bin_bound_1 = ((float)ib + bi) * db; // bin_bound_2 = ((float)ib + bi + 1.0f) * db; bin_bound_1 = ib + 0.0f; bin_bound_2 = ib + 1.0f; // wb = MIN(bin_bound_2, b_max) - MAX(bin_bound_1, b_min);// wb /= db; wb = MIN(bin_bound_2, b_max) - MAX(bin_bound_1, b_min);// wb /= b_max - b_min; img[id] += wa * wb * tex3D<float>(tex_proj, (ia + 0.5f), (ib + 0.5f), 0.5f); } } }
cu_backprojection.cu
#include "cu_backprojection.h" __host__ void host2_backprojection(float *d_img, float *d_proj, float *float_para, int *int_para) { } __host__ void host_backprojection(float *d_img, float *d_proj, float angle,float SO, float SD, float da, int na, float ai, float db, int nb, float bi, int nx, int ny, int nz) { cudaChannelFormatDesc channelDesc = cudaCreateChannelDesc(32, 0, 0, 0, cudaChannelFormatKindFloat); struct cudaExtent extent = make_cudaExtent(na, nb, 1); cudaArray *array_proj; cudaMalloc3DArray(&array_proj, &channelDesc, extent); cudaMemcpy3DParms copyParams = {0}; cudaPitchedPtr dp_proj = make_cudaPitchedPtr((void*) d_proj, na * sizeof(float), na, nb); copyParams.extent = extent; copyParams.kind = cudaMemcpyDeviceToDevice; copyParams.srcPtr = dp_proj; copyParams.dstArray = array_proj; cudaMemcpy3D(&copyParams); cudaResourceDesc resDesc; cudaTextureDesc texDesc; memset(&resDesc, 0, sizeof(resDesc)); resDesc.resType = cudaResourceTypeArray; memset(&texDesc, 0, sizeof(texDesc)); texDesc.addressMode[0] = cudaAddressModeClamp; texDesc.addressMode[1] = cudaAddressModeClamp; texDesc.addressMode[2] = cudaAddressModeClamp; texDesc.filterMode = cudaFilterModeLinear; texDesc.readMode = cudaReadModeElementType; texDesc.normalizedCoords = 0; resDesc.res.array.array = array_proj; cudaTextureObject_t tex_proj = 0; // cudaTextureObject_t tex_proj = host_create_texture_object(d_proj, nb, na, 1); cudaCreateTextureObject(&tex_proj, &resDesc, &texDesc, NULL); const dim3 gridSize_img((nx + BLOCKSIZE_X - 1) / BLOCKSIZE_X, (ny + BLOCKSIZE_Y - 1) / BLOCKSIZE_Y, (nz + BLOCKSIZE_Z - 1) / BLOCKSIZE_Z); const dim3 blockSize(BLOCKSIZE_X, BLOCKSIZE_Y, BLOCKSIZE_Z); kernel_backprojection<<<gridSize_img, blockSize>>>(d_img, tex_proj, angle, SO, SD, na, nb, da, db, ai, bi, nx, ny, nz); cudaDeviceSynchronize(); cudaFreeArray(array_proj); cudaDestroyTextureObject(tex_proj); } __global__ void kernel_backprojection(float *img, cudaTextureObject_t tex_proj, float angle, float SO, float SD, int na, int nb, float da, float db, float ai, float bi, int nx, int ny, int nz){ int ix = BLOCKSIZE_X * blockIdx.x + threadIdx.x; int iy = BLOCKSIZE_Y * blockIdx.y + threadIdx.y; int iz = BLOCKSIZE_Z * blockIdx.z + threadIdx.z; if (ix >= nx || iy >= ny || iz >= nz) return; int id = ix + iy * nx + iz * nx * ny; // angle += 3.141592653589793; img[id] = 0.0f; // float sphi = __sinf(angle); // float cphi = __cosf(angle); float sphi = __sinf(angle); float cphi = __cosf(angle); // float dd_voxel[3]; float xc, yc, zc; xc = (float)ix - nx / 2 + 0.5f; yc = (float)iy - ny / 2 + 0.5f; zc = (float)iz - nz / 2 + 0.5f; // voxel boundary coordinates float xll, yll, zll, xlr, ylr, zlr, xrl, yrl, zrl, xrr, yrr, zrr, xt, yt, zt, xb, yb, zb; // xll = +(xc - 0.5f) * cphi + (yc - 0.5f) * sphi; // yll = -(xc - 0.5f) * sphi + (yc - 0.5f) * cphi; // xrr = +(xc + 0.5f) * cphi + (yc + 0.5f) * sphi; // yrr = -(xc + 0.5f) * sphi + (yc + 0.5f) * cphi; // zll = zc; zrr = zc; // xrl = +(xc + 0.5f) * cphi + (yc - 0.5f) * sphi; // yrl = -(xc + 0.5f) * sphi + (yc - 0.5f) * cphi; // xlr = +(xc - 0.5f) * cphi + (yc + 0.5f) * sphi; // ylr = -(xc - 0.5f) * sphi + (yc + 0.5f) * cphi; // zrl = zc; zlr = zc; xll = +xc * cphi + yc * sphi - 0.5f; yll = -xc * sphi + yc * cphi - 0.5f; xrr = +xc * cphi + yc * sphi + 0.5f; yrr = -xc * sphi + yc * cphi + 0.5f; zll = zc; zrr = zc; xrl = +xc * cphi + yc * sphi + 0.5f; yrl = -xc * sphi + yc * cphi - 0.5f; xlr = +xc * cphi + yc * sphi - 0.5f; ylr = -xc * sphi + yc * cphi + 0.5f; zrl = zc; zlr = zc; xt = xc * cphi + yc * sphi; yt = -xc * sphi + yc * cphi; zt = zc + 0.5f; xb = xc * cphi + yc * sphi; yb = -xc * sphi + yc * cphi; zb = zc - 0.5f; // the coordinates of source and detector plane here are after rotation float ratio, all, bll, alr, blr, arl, brl, arr, brr, at, bt, ab, bb, a_max, a_min, b_max, b_min; // calculate a value for each boundary coordinates // the a and b here are all absolute positions from isocenter, which are on detector planes ratio = SD / (xll + SO); all = ratio * yll; bll = ratio * zll; ratio = SD / (xrr + SO); arr = ratio * yrr; brr = ratio * zrr; ratio = SD / (xlr + SO); alr = ratio * ylr; blr = ratio * zlr; ratio = SD / (xrl + SO); arl = ratio * yrl; brl = ratio * zrl; ratio = SD / (xt + SO); at = ratio * yt; bt = ratio * zt; ratio = SD / (xb + SO); ab = ratio * yb; bb = ratio * zb; // get the max and min values of all boundary projectors of voxel boundaries on detector plane // a_max = MAX4(al ,ar, at, ab); // a_min = MIN4(al ,ar, at, ab); // b_max = MAX4(bl ,br, bt, bb); // b_min = MIN4(bl ,br, bt, bb); a_max = MAX6(all ,arr, alr, arl, at, ab); a_min = MIN6(all ,arr, alr, arl, at, ab); b_max = MAX6(bll ,brr, blr, brl, bt, bb); b_min = MIN6(bll ,brr, blr, brl, bt, bb); // the related positions on detector plane from start points a_max = a_max / da - ai + 0.5f; // now they are the detector coordinates a_min = a_min / da - ai + 0.5f; b_max = b_max / db - bi + 0.5f; b_min = b_min / db - bi + 0.5f; int a_ind_max = (int)floorf(a_max); int a_ind_min = (int)floorf(a_min); int b_ind_max = (int)floorf(b_max); int b_ind_min = (int)floorf(b_min); // int a_ind_max = (int)floorf(a_max / da - ai); // int a_ind_min = (int)floorf(a_min / da - ai); // int b_ind_max = (int)floorf(b_max / db - bi); // int b_ind_min = (int)floorf(b_min / db - bi); float bin_bound_1, bin_bound_2, wa, wb; for (int ia = MAX(0, a_ind_min); ia < MIN(na, a_max); ia ++){ // bin_bound_1 = ((float)ia + ai) * da; // bin_bound_2 = ((float)ia + ai + 1.0f) * da; bin_bound_1 = ia + 0.0f; bin_bound_2 = ia + 1.0f; wa = MIN(bin_bound_2, a_max) - MAX(bin_bound_1, a_min);// wa /= a_max - a_min; for (int ib = MAX(0, b_ind_min); ib < MIN(nb, b_max); ib ++){ // bin_bound_1 = ((float)ib + bi) * db; // bin_bound_2 = ((float)ib + bi + 1.0f) * db; bin_bound_1 = ib + 0.0f; bin_bound_2 = ib + 1.0f; // wb = MIN(bin_bound_2, b_max) - MAX(bin_bound_1, b_min);// wb /= db; wb = MIN(bin_bound_2, b_max) - MAX(bin_bound_1, b_min);// wb /= b_max - b_min; img[id] += wa * wb * tex3D<float>(tex_proj, (ia + 0.5f), (ib + 0.5f), 0.5f); } } }
487f095f1e78c9353f39c27a0bf6f92100bd84e2.hip
// !!! This is a file automatically generated by hipify!!! #include "hip_runtime.h" #ifndef _BACKPROP_CUDA_KERNEL_H_ #define _BACKPROP_CUDA_KERNEL_H_ #include <stdio.h> #include "backprop.h" #include "math.h" #include "hip/hip_runtime.h" __global__ void bpnn_layerforward_CUDA(hipLaunchParm lp, float *input_cuda, float *output_hidden_cuda, float *input_hidden_cuda, float *hidden_partial_sum, int in, int hid) { int by = hipBlockIdx_y; int tx = hipThreadIdx_x; int ty = hipThreadIdx_y; int index = ( hid + 1 ) * HEIGHT * by + ( hid + 1 ) * ty + tx + 1 + ( hid + 1 ) ; int index_in = HEIGHT * by + ty + 1; __shared__ float input_node[HEIGHT]; __shared__ float weight_matrix[HEIGHT][WIDTH]; if ( tx == 0 ) input_node[ty] = input_cuda[index_in] ; __syncthreads(); weight_matrix[ty][tx] = input_hidden_cuda[index]; __syncthreads(); weight_matrix[ty][tx] = weight_matrix[ty][tx] * input_node[ty]; __syncthreads(); for ( int i = 1 ; i <= __log2f(HEIGHT) ; i++){ int power_two = __powf(2, i); if( ty % power_two == 0 ) weight_matrix[ty][tx] = weight_matrix[ty][tx] + weight_matrix[ty + power_two/2][tx]; __syncthreads(); } //__syncthreads(); input_hidden_cuda[index] = weight_matrix[ty][tx]; /* for ( unsigned int i = 2 ; i <= HEIGHT ; i *= 2){ unsigned int power_two = i - 1; if( (ty & power_two) == 0 ) { weight_matrix[ty][tx] = weight_matrix[ty][tx] + weight_matrix[ty + power_two/2][tx]; } } */ __syncthreads(); if ( tx == 0 ) { hidden_partial_sum[by * hid + ty] = weight_matrix[tx][ty]; } } __global__ void bpnn_adjust_weights_cuda(hipLaunchParm lp, float * delta, int hid, float * ly, int in, float * w, float * oldw) { int by = hipBlockIdx_y; int tx = hipThreadIdx_x; int ty = hipThreadIdx_y; int index = ( hid + 1 ) * HEIGHT * by + ( hid + 1 ) * ty + tx + 1 + ( hid + 1 ) ; int index_y = HEIGHT * by + ty + 1; int index_x = tx + 1; //eta = 0.3; //momentum = 0.3; w[index] += ((ETA * delta[index_x] * ly[index_y]) + (MOMENTUM * oldw[index])); oldw[index] = ((ETA * delta[index_x] * ly[index_y]) + (MOMENTUM * oldw[index])); __syncthreads(); if (ty == 0 && by ==0){ w[index_x] += ((ETA * delta[index_x]) + (MOMENTUM * oldw[index_x])); oldw[index_x] = ((ETA * delta[index_x]) + (MOMENTUM * oldw[index_x])); } } #endif
487f095f1e78c9353f39c27a0bf6f92100bd84e2.cu
#include "hip_runtime.h" #ifndef _BACKPROP_CUDA_KERNEL_H_ #define _BACKPROP_CUDA_KERNEL_H_ #include <stdio.h> #include "backprop.h" #include "math.h" #include "cuda.h" __global__ void bpnn_layerforward_CUDA(hipLaunchParm lp, float *input_cuda, float *output_hidden_cuda, float *input_hidden_cuda, float *hidden_partial_sum, int in, int hid) { int by = hipBlockIdx_y; int tx = hipThreadIdx_x; int ty = hipThreadIdx_y; int index = ( hid + 1 ) * HEIGHT * by + ( hid + 1 ) * ty + tx + 1 + ( hid + 1 ) ; int index_in = HEIGHT * by + ty + 1; __shared__ float input_node[HEIGHT]; __shared__ float weight_matrix[HEIGHT][WIDTH]; if ( tx == 0 ) input_node[ty] = input_cuda[index_in] ; __syncthreads(); weight_matrix[ty][tx] = input_hidden_cuda[index]; __syncthreads(); weight_matrix[ty][tx] = weight_matrix[ty][tx] * input_node[ty]; __syncthreads(); for ( int i = 1 ; i <= __log2f(HEIGHT) ; i++){ int power_two = __powf(2, i); if( ty % power_two == 0 ) weight_matrix[ty][tx] = weight_matrix[ty][tx] + weight_matrix[ty + power_two/2][tx]; __syncthreads(); } //__syncthreads(); input_hidden_cuda[index] = weight_matrix[ty][tx]; /* for ( unsigned int i = 2 ; i <= HEIGHT ; i *= 2){ unsigned int power_two = i - 1; if( (ty & power_two) == 0 ) { weight_matrix[ty][tx] = weight_matrix[ty][tx] + weight_matrix[ty + power_two/2][tx]; } } */ __syncthreads(); if ( tx == 0 ) { hidden_partial_sum[by * hid + ty] = weight_matrix[tx][ty]; } } __global__ void bpnn_adjust_weights_cuda(hipLaunchParm lp, float * delta, int hid, float * ly, int in, float * w, float * oldw) { int by = hipBlockIdx_y; int tx = hipThreadIdx_x; int ty = hipThreadIdx_y; int index = ( hid + 1 ) * HEIGHT * by + ( hid + 1 ) * ty + tx + 1 + ( hid + 1 ) ; int index_y = HEIGHT * by + ty + 1; int index_x = tx + 1; //eta = 0.3; //momentum = 0.3; w[index] += ((ETA * delta[index_x] * ly[index_y]) + (MOMENTUM * oldw[index])); oldw[index] = ((ETA * delta[index_x] * ly[index_y]) + (MOMENTUM * oldw[index])); __syncthreads(); if (ty == 0 && by ==0){ w[index_x] += ((ETA * delta[index_x]) + (MOMENTUM * oldw[index_x])); oldw[index_x] = ((ETA * delta[index_x]) + (MOMENTUM * oldw[index_x])); } } #endif
a2a72370b746f36c64201665c5c712c4954a9e6f.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <iostream> #include <string> #include <cstdlib> // 2 GB of data #define BYTES 2147483648 #define MAX_STRIDE 4194304 #define MAX_INDEX (BYTES/MAX_STRIDE) static void __global__ Set(const int Seed, const int Stride, char *data) { // Everyone set some data for(int i = threadIdx.x; i < BYTES ; i+= blockDim.x ) { data[i] = (char)(i * threadIdx.x + Seed); } } static void __global__ CacheTest(const int Seed , const int Stride , char *data, int *result ) { if(threadIdx.x == 0 ) { int local_result = 0; for(int i = 0 ; i < MAX_INDEX; ++i) { local_result += data[i *(Stride+1)]; } *result = local_result; // Here to make sure we don't optimize the loop away } } static void Validate(bool cond, std::string msg) { if(!cond) { std::cout << msg << std::endl; hipDeviceReset(); std::exit(EXIT_FAILURE); } } static void CudaCheck(std::string msg, hipError_t err) { Validate(err==hipSuccess, hipGetErrorString(err) + std::string("\n") + msg); } void sort(float *durations, int size) { for(int i = 1 ; i < size; ++i) { float cand = durations[i]; int j = i; while( j > 0 && cand < durations[j-1]) { if(durations[j] < durations[j-1]) { durations[j] = durations[j-1]; } --j; } durations[j] = cand; } } float Run(const int Seed, const int Stride) { const int Blocks = 1; const int Threads = 1024; float time; hipEvent_t start,end; char *d_data; int *d_result; CudaCheck("Malloc Result", hipMalloc(&d_result, sizeof(int) ) ); CudaCheck("Malloc Data", hipMalloc(&d_data, BYTES ) ); CudaCheck("Memset Result", hipMemset(d_result, 0, sizeof(int) ) ); CudaCheck("Memset Data", hipMemset(d_data, 0, BYTES ) ); hipLaunchKernelGGL(( Set), dim3(Blocks),dim3(Threads), 0, 0, Seed, Stride, d_data); CudaCheck("Set",hipDeviceSynchronize()); CudaCheck("Create start",hipEventCreate(&start)); CudaCheck("Create end",hipEventCreate(&end)); CudaCheck("Record start",hipEventRecord(start,0)); hipLaunchKernelGGL(( CacheTest), dim3(Blocks),dim3(Threads), 0, 0, Seed, Stride, d_data, d_result); CudaCheck("Record end",hipEventRecord(end,0)); CudaCheck("Device Sync",hipDeviceSynchronize()); CudaCheck("Event sync", hipEventSynchronize(end) ); CudaCheck("Get elapsed time",hipEventElapsedTime(&time,start,end)); CudaCheck("Destroy start",hipEventDestroy(start)); CudaCheck("Destroy end",hipEventDestroy(end)); CudaCheck("Free result", hipFree(d_result)); CudaCheck("Free data", hipFree(d_data)); CudaCheck("Reset",hipDeviceReset()); return time; } int main(int argc, char* argv[]) { const int Runs = 50; float durations[Runs]; Validate(argc==2,"Usage: " + std::string(argv[0]) + " stride"); const int Stride = atoi(argv[1]); Validate(Stride <= MAX_STRIDE,"Decrease Stride"); std::cout << "Stride: " << Stride << std::endl; for(int i = 0 ; i < Runs ; ++i ) { durations[i] = Run(i+1, Stride); } sort(durations,Runs); float time = 0; int count = 0; for(int i = 0; i < Runs; ++i) { time += durations[i]; ++count; } time /= count; std::cout << "Elapsed Time: " << time << "ms" << std::endl; return EXIT_SUCCESS; }
a2a72370b746f36c64201665c5c712c4954a9e6f.cu
#include <iostream> #include <string> #include <cstdlib> // 2 GB of data #define BYTES 2147483648 #define MAX_STRIDE 4194304 #define MAX_INDEX (BYTES/MAX_STRIDE) static void __global__ Set(const int Seed, const int Stride, char *data) { // Everyone set some data for(int i = threadIdx.x; i < BYTES ; i+= blockDim.x ) { data[i] = (char)(i * threadIdx.x + Seed); } } static void __global__ CacheTest(const int Seed , const int Stride , char *data, int *result ) { if(threadIdx.x == 0 ) { int local_result = 0; for(int i = 0 ; i < MAX_INDEX; ++i) { local_result += data[i *(Stride+1)]; } *result = local_result; // Here to make sure we don't optimize the loop away } } static void Validate(bool cond, std::string msg) { if(!cond) { std::cout << msg << std::endl; cudaDeviceReset(); std::exit(EXIT_FAILURE); } } static void CudaCheck(std::string msg, cudaError err) { Validate(err==cudaSuccess, cudaGetErrorString(err) + std::string("\n") + msg); } void sort(float *durations, int size) { for(int i = 1 ; i < size; ++i) { float cand = durations[i]; int j = i; while( j > 0 && cand < durations[j-1]) { if(durations[j] < durations[j-1]) { durations[j] = durations[j-1]; } --j; } durations[j] = cand; } } float Run(const int Seed, const int Stride) { const int Blocks = 1; const int Threads = 1024; float time; cudaEvent_t start,end; char *d_data; int *d_result; CudaCheck("Malloc Result", cudaMalloc(&d_result, sizeof(int) ) ); CudaCheck("Malloc Data", cudaMalloc(&d_data, BYTES ) ); CudaCheck("Memset Result", cudaMemset(d_result, 0, sizeof(int) ) ); CudaCheck("Memset Data", cudaMemset(d_data, 0, BYTES ) ); Set<<<Blocks,Threads>>>(Seed, Stride, d_data); CudaCheck("Set",cudaDeviceSynchronize()); CudaCheck("Create start",cudaEventCreate(&start)); CudaCheck("Create end",cudaEventCreate(&end)); CudaCheck("Record start",cudaEventRecord(start,0)); CacheTest<<<Blocks,Threads>>>(Seed, Stride, d_data, d_result); CudaCheck("Record end",cudaEventRecord(end,0)); CudaCheck("Device Sync",cudaDeviceSynchronize()); CudaCheck("Event sync", cudaEventSynchronize(end) ); CudaCheck("Get elapsed time",cudaEventElapsedTime(&time,start,end)); CudaCheck("Destroy start",cudaEventDestroy(start)); CudaCheck("Destroy end",cudaEventDestroy(end)); CudaCheck("Free result", cudaFree(d_result)); CudaCheck("Free data", cudaFree(d_data)); CudaCheck("Reset",cudaDeviceReset()); return time; } int main(int argc, char* argv[]) { const int Runs = 50; float durations[Runs]; Validate(argc==2,"Usage: " + std::string(argv[0]) + " stride"); const int Stride = atoi(argv[1]); Validate(Stride <= MAX_STRIDE,"Decrease Stride"); std::cout << "Stride: " << Stride << std::endl; for(int i = 0 ; i < Runs ; ++i ) { durations[i] = Run(i+1, Stride); } sort(durations,Runs); float time = 0; int count = 0; for(int i = 0; i < Runs; ++i) { time += durations[i]; ++count; } time /= count; std::cout << "Elapsed Time: " << time << "ms" << std::endl; return EXIT_SUCCESS; }