hip_filename
stringlengths 5
84
| hip_content
stringlengths 79
9.69M
| cuda_filename
stringlengths 4
83
| cuda_content
stringlengths 19
9.69M
|
---|---|---|---|
8da098dc49c87a956f28e318cb3759faf349119e.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#ifndef THC_GENERIC_FILE
#define THC_GENERIC_FILE "THHUNN/generic/RReLU.hip"
#else
#include <THHUNN/common.h>
#include <ATen/CUDAGeneratorImpl.h>
void THNN_(RReLU_updateOutput)(
THCState *state,
THCTensor *input,
THCTensor *output,
THCTensor *noise,
double lower,
double upper,
bool train,
bool inplace,
c10::optional<at::Generator> generator)
{
THCUNN_assertSameGPU(state, 3, input, output, noise);
auto gen = at::get_generator_or_default<at::CUDAGeneratorImpl>(generator, at::cuda::detail::getDefaultCUDAGenerator());
if (train)
{
auto inputTensor = THTensor_wrap(input).contiguous();
input = inputTensor.unsafeGetTensorImpl();
THCTensor_(resizeAs)(state, noise, input);
scalar_t *input_data = THCTensor_(data)(state, input);
scalar_t *noise_data = THCTensor_(data)(state, noise);
ptrdiff_t n = THCTensor_(nElement)(state, input);
// philox offset calculation for grid-stride loop utilizing hiprand4
const uint32_t curand4_engine_calls = 4;
dim3 grid = NUM_BLOCKS(n);
uint64_t counter_offset = ((n - 1) / (BLOCK_SIZE * grid.x) + 1) * curand4_engine_calls;
std::pair<uint64_t, uint64_t> rng_engine_inputs;
{
// See Note [Acquire lock when using random generators]
std::lock_guard<std::mutex> lock(gen->mutex_);
rng_engine_inputs = gen->philox_engine_inputs(counter_offset);
}
if (inplace)
{
hipLaunchKernelGGL(( rreluUpdateOutputTrain), dim3(grid), dim3(BLOCK_SIZE), 0, c10::hip::getCurrentHIPStreamMasqueradingAsCUDA(),
n, rng_engine_inputs, input_data, noise_data, input_data, lower, upper);
THCTensor_(set)(state, output, input);
}
else
{
THCTensor_(resizeAs)(state, output, input);
scalar_t *output_data = THCTensor_(data)(state, output);
hipLaunchKernelGGL(( rreluUpdateOutputTrain), dim3(grid), dim3(BLOCK_SIZE), 0, c10::hip::getCurrentHIPStreamMasqueradingAsCUDA(),
n, rng_engine_inputs, input_data, noise_data, output_data, lower, upper);
}
THCudaCheck(hipGetLastError());
}
else
{
const scalar_t negSlope = ScalarConvert<double, scalar_t>::to((lower + upper) / 2);
if (inplace)
{
THC_pointwiseApply1<scalar_t>(state, input, RReLUUpdateOutputEvalIP_functor<scalar_t>(negSlope));
THCTensor_(set)(state, output, input);
}
else
{
THCTensor_(resizeAs)(state, output, input);
THC_pointwiseApply2<scalar_t, scalar_t>(state, output, input, RReLUUpdateOutputEval_functor<scalar_t>(negSlope));
}
}
}
void THNN_(RReLU_updateGradInput)(
THCState *state,
THCTensor *input,
THCTensor *gradOutput,
THCTensor *gradInput,
THCTensor *noise,
double lower,
double upper,
bool train,
bool inplace)
{
THCUNN_check_nElement(state, input, gradOutput);
THCUNN_assertSameGPU(state, 4, input, gradOutput, gradInput, noise);
auto gradOutputTensor = THTensor_wrap(gradOutput).contiguous();
gradOutput = gradOutputTensor.unsafeGetTensorImpl();
if (train && upper - lower > 1E-6) // e.g. if upper == lower, RReLU behaves like LeakyReLU
{
// multiply the gradient by the noise tensor
if (inplace)
{
THCTensor_(cmul)(state, gradOutput, gradOutput, noise);
THCTensor_(set)(state, gradInput, gradOutput);
}
else
{
THCTensor_(resizeAs)(state, gradInput, input);
THCTensor_(cmul)(state, gradInput, gradOutput, noise);
}
}
else
{
// use constant factor for negative input values
const scalar_t negSlope = ScalarConvert<double, scalar_t>::to((lower + upper) / 2);
if (inplace)
{
THC_pointwiseApply2<scalar_t, scalar_t>(state, gradOutput, input, RReLUupdateGradInputEvalIP_functor<scalar_t>(negSlope));
THCTensor_(set)(state, gradInput, gradOutput);
}
else
{
THCTensor_(resizeAs)(state, gradInput, input);
THC_pointwiseApply3<scalar_t, scalar_t, scalar_t>(state, gradInput, gradOutput, input, RReLUupdateGradInputEval_functor<scalar_t>(negSlope));
}
}
}
#endif
| 8da098dc49c87a956f28e318cb3759faf349119e.cu | #ifndef THC_GENERIC_FILE
#define THC_GENERIC_FILE "THCUNN/generic/RReLU.cu"
#else
#include <THCUNN/common.h>
#include <ATen/CUDAGeneratorImpl.h>
void THNN_(RReLU_updateOutput)(
THCState *state,
THCTensor *input,
THCTensor *output,
THCTensor *noise,
double lower,
double upper,
bool train,
bool inplace,
c10::optional<at::Generator> generator)
{
THCUNN_assertSameGPU(state, 3, input, output, noise);
auto gen = at::get_generator_or_default<at::CUDAGeneratorImpl>(generator, at::cuda::detail::getDefaultCUDAGenerator());
if (train)
{
auto inputTensor = THTensor_wrap(input).contiguous();
input = inputTensor.unsafeGetTensorImpl();
THCTensor_(resizeAs)(state, noise, input);
scalar_t *input_data = THCTensor_(data)(state, input);
scalar_t *noise_data = THCTensor_(data)(state, noise);
ptrdiff_t n = THCTensor_(nElement)(state, input);
// philox offset calculation for grid-stride loop utilizing curand4
const uint32_t curand4_engine_calls = 4;
dim3 grid = NUM_BLOCKS(n);
uint64_t counter_offset = ((n - 1) / (BLOCK_SIZE * grid.x) + 1) * curand4_engine_calls;
std::pair<uint64_t, uint64_t> rng_engine_inputs;
{
// See Note [Acquire lock when using random generators]
std::lock_guard<std::mutex> lock(gen->mutex_);
rng_engine_inputs = gen->philox_engine_inputs(counter_offset);
}
if (inplace)
{
rreluUpdateOutputTrain<<<grid, BLOCK_SIZE, 0, c10::cuda::getCurrentCUDAStream()>>>(
n, rng_engine_inputs, input_data, noise_data, input_data, lower, upper);
THCTensor_(set)(state, output, input);
}
else
{
THCTensor_(resizeAs)(state, output, input);
scalar_t *output_data = THCTensor_(data)(state, output);
rreluUpdateOutputTrain<<<grid, BLOCK_SIZE, 0, c10::cuda::getCurrentCUDAStream()>>>(
n, rng_engine_inputs, input_data, noise_data, output_data, lower, upper);
}
THCudaCheck(cudaGetLastError());
}
else
{
const scalar_t negSlope = ScalarConvert<double, scalar_t>::to((lower + upper) / 2);
if (inplace)
{
THC_pointwiseApply1<scalar_t>(state, input, RReLUUpdateOutputEvalIP_functor<scalar_t>(negSlope));
THCTensor_(set)(state, output, input);
}
else
{
THCTensor_(resizeAs)(state, output, input);
THC_pointwiseApply2<scalar_t, scalar_t>(state, output, input, RReLUUpdateOutputEval_functor<scalar_t>(negSlope));
}
}
}
void THNN_(RReLU_updateGradInput)(
THCState *state,
THCTensor *input,
THCTensor *gradOutput,
THCTensor *gradInput,
THCTensor *noise,
double lower,
double upper,
bool train,
bool inplace)
{
THCUNN_check_nElement(state, input, gradOutput);
THCUNN_assertSameGPU(state, 4, input, gradOutput, gradInput, noise);
auto gradOutputTensor = THTensor_wrap(gradOutput).contiguous();
gradOutput = gradOutputTensor.unsafeGetTensorImpl();
if (train && upper - lower > 1E-6) // e.g. if upper == lower, RReLU behaves like LeakyReLU
{
// multiply the gradient by the noise tensor
if (inplace)
{
THCTensor_(cmul)(state, gradOutput, gradOutput, noise);
THCTensor_(set)(state, gradInput, gradOutput);
}
else
{
THCTensor_(resizeAs)(state, gradInput, input);
THCTensor_(cmul)(state, gradInput, gradOutput, noise);
}
}
else
{
// use constant factor for negative input values
const scalar_t negSlope = ScalarConvert<double, scalar_t>::to((lower + upper) / 2);
if (inplace)
{
THC_pointwiseApply2<scalar_t, scalar_t>(state, gradOutput, input, RReLUupdateGradInputEvalIP_functor<scalar_t>(negSlope));
THCTensor_(set)(state, gradInput, gradOutput);
}
else
{
THCTensor_(resizeAs)(state, gradInput, input);
THC_pointwiseApply3<scalar_t, scalar_t, scalar_t>(state, gradInput, gradOutput, input, RReLUupdateGradInputEval_functor<scalar_t>(negSlope));
}
}
}
#endif
|
77789627e0d6ab0e68467c81fa4f76ef769eeecc.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
extern "C"
#include <math.h>
#include <hip/hip_complex.h>
__global__ void tanh_float(int n,int idx,float *dy,int incy,float *result) {
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < n; i += blockDim.x * gridDim.x) {
if(i >= idx && i % incy == 0)
result[i] = tanh(dy[i]);
}
} | 77789627e0d6ab0e68467c81fa4f76ef769eeecc.cu | extern "C"
#include <math.h>
#include <cuComplex.h>
__global__ void tanh_float(int n,int idx,float *dy,int incy,float *result) {
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < n; i += blockDim.x * gridDim.x) {
if(i >= idx && i % incy == 0)
result[i] = tanh(dy[i]);
}
} |
6ca97b3dd026a3fcdd442af61eb5ac3723576692.hip | // !!! This is a file automatically generated by hipify!!!
#include "THHTensorMath.h"
#include "THHGeneral.h"
#include "THHBlas.h"
#include "THHTensorCopy.h"
#include "THHTensorRandom.h"
#include "THHApply.cuh"
#include "THHReduce.cuh"
#include <thrust/device_ptr.h>
#include <thrust/fill.h>
#include <thrust/functional.h>
#include <thrust/reduce.h>
#include <thrust/inner_product.h>
#ifndef DIVUP
#define DIVUP(x, y) (((x) + (y) - 1) / (y))
#endif
struct TensorAddConstantOp {
TensorAddConstantOp(float v) : val(v) {}
__device__ __forceinline__ void operator()(float* out, float* in) {
*out = *in + val;
}
__device__ __forceinline__ void operator()(float* v) {
*v += val;
}
const float val;
};
void THCudaTensor_add(THCState *state, THCudaTensor *self_, THCudaTensor *src_, float value)
{
THAssert(THCudaTensor_checkGPU(state, 2, self_, src_));
if (self_ == src_) {
if (!THCudaTensor_pointwiseApply1(state, self_, TensorAddConstantOp(value))) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
} else {
THCudaTensor_resizeAs(state, self_, src_);
if (!THCudaTensor_pointwiseApply2(state, self_, src_, TensorAddConstantOp(value))) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
}
THCudaCheck(hipGetLastError());
}
struct TensorMulConstantOp {
TensorMulConstantOp(float v) : val(v) {}
__device__ __forceinline__ void operator()(float* out, float* in) {
*out = *in * val;
}
__device__ __forceinline__ void operator()(float* v) {
*v *= val;
}
const float val;
};
void THCudaTensor_mul(THCState *state, THCudaTensor *self_, THCudaTensor *src_, float value)
{
THAssert(THCudaTensor_checkGPU(state, 2, self_, src_));
if (self_ == src_) {
if (!THCudaTensor_pointwiseApply1(state, self_, TensorMulConstantOp(value))) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
} else {
THCudaTensor_resizeAs(state, self_, src_);
if (!THCudaTensor_pointwiseApply2(state, self_, src_, TensorMulConstantOp(value))) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
}
THCudaCheck(hipGetLastError());
}
void THCudaTensor_div(THCState* state, THCudaTensor *self_, THCudaTensor *src_, float value)
{
THAssert(THCudaTensor_checkGPU(state, 2, self_, src_));
THArgCheck(value != 0.0f, 3, "divide by zero");
if (self_ == src_) {
if (!THCudaTensor_pointwiseApply1(state, self_, TensorMulConstantOp(1.0f / value))) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
} else {
THCudaTensor_resizeAs(state, self_, src_);
if (!THCudaTensor_pointwiseApply2(state, self_, src_, TensorMulConstantOp(1.0f / value))) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
}
THCudaCheck(hipGetLastError());
}
| 6ca97b3dd026a3fcdd442af61eb5ac3723576692.cu | #include "THCTensorMath.h"
#include "THCGeneral.h"
#include "THCBlas.h"
#include "THCTensorCopy.h"
#include "THCTensorRandom.h"
#include "THCApply.cuh"
#include "THCReduce.cuh"
#include <thrust/device_ptr.h>
#include <thrust/fill.h>
#include <thrust/functional.h>
#include <thrust/reduce.h>
#include <thrust/inner_product.h>
#ifndef DIVUP
#define DIVUP(x, y) (((x) + (y) - 1) / (y))
#endif
struct TensorAddConstantOp {
TensorAddConstantOp(float v) : val(v) {}
__device__ __forceinline__ void operator()(float* out, float* in) {
*out = *in + val;
}
__device__ __forceinline__ void operator()(float* v) {
*v += val;
}
const float val;
};
void THCudaTensor_add(THCState *state, THCudaTensor *self_, THCudaTensor *src_, float value)
{
THAssert(THCudaTensor_checkGPU(state, 2, self_, src_));
if (self_ == src_) {
if (!THCudaTensor_pointwiseApply1(state, self_, TensorAddConstantOp(value))) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
} else {
THCudaTensor_resizeAs(state, self_, src_);
if (!THCudaTensor_pointwiseApply2(state, self_, src_, TensorAddConstantOp(value))) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
}
THCudaCheck(cudaGetLastError());
}
struct TensorMulConstantOp {
TensorMulConstantOp(float v) : val(v) {}
__device__ __forceinline__ void operator()(float* out, float* in) {
*out = *in * val;
}
__device__ __forceinline__ void operator()(float* v) {
*v *= val;
}
const float val;
};
void THCudaTensor_mul(THCState *state, THCudaTensor *self_, THCudaTensor *src_, float value)
{
THAssert(THCudaTensor_checkGPU(state, 2, self_, src_));
if (self_ == src_) {
if (!THCudaTensor_pointwiseApply1(state, self_, TensorMulConstantOp(value))) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
} else {
THCudaTensor_resizeAs(state, self_, src_);
if (!THCudaTensor_pointwiseApply2(state, self_, src_, TensorMulConstantOp(value))) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
}
THCudaCheck(cudaGetLastError());
}
void THCudaTensor_div(THCState* state, THCudaTensor *self_, THCudaTensor *src_, float value)
{
THAssert(THCudaTensor_checkGPU(state, 2, self_, src_));
THArgCheck(value != 0.0f, 3, "divide by zero");
if (self_ == src_) {
if (!THCudaTensor_pointwiseApply1(state, self_, TensorMulConstantOp(1.0f / value))) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
} else {
THCudaTensor_resizeAs(state, self_, src_);
if (!THCudaTensor_pointwiseApply2(state, self_, src_, TensorMulConstantOp(1.0f / value))) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
}
THCudaCheck(cudaGetLastError());
}
|
e8d52df8d3f1a61eda4be18c365a19dc158de0ab.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright (c) 2018-2020, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <common/cudart_utils.h>
#include <gtest/gtest.h>
#include <linalg/gemm.cuh>
namespace MLCommon {
namespace LinAlg {
template <typename T>
__global__ void fillKernel(T *arr, T val, int N) {
const int stride = blockDim.x * gridDim.x;
int tid = threadIdx.x + (blockIdx.x * blockDim.x);
for (int i = tid; i < N; i += stride) arr[i] = val;
}
template <typename T, int NTHREADS = 256, int NITEMS = 4>
void fill(T *arr, T val, int N) {
const int nblks = ceildiv<int>(N, NTHREADS * NITEMS);
hipLaunchKernelGGL(( fillKernel<T>), dim3(nblks), dim3(NTHREADS), 0, 0, arr, val, N);
CUDA_CHECK(hipPeekAtLastError());
}
TEST(Gemm, Gemm_128x128x8) {
float *A, *B, *C, *D;
int M = 128, N = 128, K = 64;
CUDA_CHECK(hipMalloc((void **)&A, sizeof(float) * M * K));
fill(A, 1.f, M * K);
CUDA_CHECK(hipMalloc((void **)&B, sizeof(float) * K * N));
fill(B, 0.5f, K * N);
CUDA_CHECK(hipMalloc((void **)&C, sizeof(float) * M * N));
fill(C, 2.f, M * N);
CUDA_CHECK(hipMalloc((void **)&D, sizeof(float) * M * N));
CUDA_CHECK(hipMemset(D, 0, sizeof(float) * M * N));
hipStream_t stream;
CUDA_CHECK(hipStreamCreate(&stream));
gemm<float, float, float, cutlass::Shape<8, 128, 128>>(
HIPBLAS_OP_N, HIPBLAS_OP_N, M, N, K, 1.f, B, N, A, K, 1.f, C, N, D, stream);
float *hD = new float[M * N];
updateHost<float>(hD, D, M * N, stream);
CUDA_CHECK(hipStreamSynchronize(stream));
for (int i = 0; i < M * N; ++i) {
ASSERT_FLOAT_EQ(0.5f * K + 2.f, hD[i]) << " @hD[" << i << "]";
}
delete[] hD;
CUDA_CHECK(hipStreamDestroy(stream));
CUDA_CHECK(hipFree(A));
CUDA_CHECK(hipFree(B));
CUDA_CHECK(hipFree(C));
CUDA_CHECK(hipFree(D));
}
} // end namespace LinAlg
} // end namespace MLCommon
| e8d52df8d3f1a61eda4be18c365a19dc158de0ab.cu | /*
* Copyright (c) 2018-2020, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <common/cudart_utils.h>
#include <gtest/gtest.h>
#include <linalg/gemm.cuh>
namespace MLCommon {
namespace LinAlg {
template <typename T>
__global__ void fillKernel(T *arr, T val, int N) {
const int stride = blockDim.x * gridDim.x;
int tid = threadIdx.x + (blockIdx.x * blockDim.x);
for (int i = tid; i < N; i += stride) arr[i] = val;
}
template <typename T, int NTHREADS = 256, int NITEMS = 4>
void fill(T *arr, T val, int N) {
const int nblks = ceildiv<int>(N, NTHREADS * NITEMS);
fillKernel<T><<<nblks, NTHREADS>>>(arr, val, N);
CUDA_CHECK(cudaPeekAtLastError());
}
TEST(Gemm, Gemm_128x128x8) {
float *A, *B, *C, *D;
int M = 128, N = 128, K = 64;
CUDA_CHECK(cudaMalloc((void **)&A, sizeof(float) * M * K));
fill(A, 1.f, M * K);
CUDA_CHECK(cudaMalloc((void **)&B, sizeof(float) * K * N));
fill(B, 0.5f, K * N);
CUDA_CHECK(cudaMalloc((void **)&C, sizeof(float) * M * N));
fill(C, 2.f, M * N);
CUDA_CHECK(cudaMalloc((void **)&D, sizeof(float) * M * N));
CUDA_CHECK(cudaMemset(D, 0, sizeof(float) * M * N));
cudaStream_t stream;
CUDA_CHECK(cudaStreamCreate(&stream));
gemm<float, float, float, cutlass::Shape<8, 128, 128>>(
CUBLAS_OP_N, CUBLAS_OP_N, M, N, K, 1.f, B, N, A, K, 1.f, C, N, D, stream);
float *hD = new float[M * N];
updateHost<float>(hD, D, M * N, stream);
CUDA_CHECK(cudaStreamSynchronize(stream));
for (int i = 0; i < M * N; ++i) {
ASSERT_FLOAT_EQ(0.5f * K + 2.f, hD[i]) << " @hD[" << i << "]";
}
delete[] hD;
CUDA_CHECK(cudaStreamDestroy(stream));
CUDA_CHECK(cudaFree(A));
CUDA_CHECK(cudaFree(B));
CUDA_CHECK(cudaFree(C));
CUDA_CHECK(cudaFree(D));
}
} // end namespace LinAlg
} // end namespace MLCommon
|
ee262764d660874536ea20bc32b252a07d7843b9.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <hip/hip_runtime_api.h>
#include "cuda_kernels_spline1.cuh"
#include "common_definitions.h" // for MAX_SPLINE_DEGREE
// Named "eval_basis1" because of a strange linker error with MSVC when compiling
// only for a virtual target in order to enable JIT compilation.
__constant__ float eval_basis1[MAX_SPLINE_DEGREE+1];
bool splineAlg1_updateConstantMemory_internal(float* src_ptr, size_t num_bytes) {
const auto res = hipMemcpyToSymbol(eval_basis1, src_ptr, num_bytes);
return (res == hipSuccess);
}
__global__ void RenderSplineKernel(const float* control_xs,
const float* control_ys,
const float* control_zs,
float* rendered_xs,
float* rendered_ys,
float* rendered_zs,
int cs_idx_start,
int cs_idx_end,
int NUM_SPLINES) {
const int idx = blockDim.x*blockIdx.x + threadIdx.x;
if (idx >= NUM_SPLINES) {
return;
}
// to get from one control point to the next, we have
// to make a jump of size equal to number of splines
float rendered_x = 0.0f;
float rendered_y = 0.0f;
float rendered_z = 0.0f;
for (int i = cs_idx_start; i <= cs_idx_end; i++) {
rendered_x += control_xs[NUM_SPLINES*i + idx]*eval_basis1[i-cs_idx_start];
rendered_y += control_ys[NUM_SPLINES*i + idx]*eval_basis1[i-cs_idx_start];
rendered_z += control_zs[NUM_SPLINES*i + idx]*eval_basis1[i-cs_idx_start];
}
// write result to memory
rendered_xs[idx] = rendered_x;
rendered_ys[idx] = rendered_y;
rendered_zs[idx] = rendered_z;
}
| ee262764d660874536ea20bc32b252a07d7843b9.cu | #include <cuda.h>
#include <cuda_runtime_api.h>
#include "cuda_kernels_spline1.cuh"
#include "common_definitions.h" // for MAX_SPLINE_DEGREE
// Named "eval_basis1" because of a strange linker error with MSVC when compiling
// only for a virtual target in order to enable JIT compilation.
__constant__ float eval_basis1[MAX_SPLINE_DEGREE+1];
bool splineAlg1_updateConstantMemory_internal(float* src_ptr, size_t num_bytes) {
const auto res = cudaMemcpyToSymbol(eval_basis1, src_ptr, num_bytes);
return (res == cudaSuccess);
}
__global__ void RenderSplineKernel(const float* control_xs,
const float* control_ys,
const float* control_zs,
float* rendered_xs,
float* rendered_ys,
float* rendered_zs,
int cs_idx_start,
int cs_idx_end,
int NUM_SPLINES) {
const int idx = blockDim.x*blockIdx.x + threadIdx.x;
if (idx >= NUM_SPLINES) {
return;
}
// to get from one control point to the next, we have
// to make a jump of size equal to number of splines
float rendered_x = 0.0f;
float rendered_y = 0.0f;
float rendered_z = 0.0f;
for (int i = cs_idx_start; i <= cs_idx_end; i++) {
rendered_x += control_xs[NUM_SPLINES*i + idx]*eval_basis1[i-cs_idx_start];
rendered_y += control_ys[NUM_SPLINES*i + idx]*eval_basis1[i-cs_idx_start];
rendered_z += control_zs[NUM_SPLINES*i + idx]*eval_basis1[i-cs_idx_start];
}
// write result to memory
rendered_xs[idx] = rendered_x;
rendered_ys[idx] = rendered_y;
rendered_zs[idx] = rendered_z;
}
|
366d0f87cfddf2feeff9734f9299a7cc4881d23d.hip | // !!! This is a file automatically generated by hipify!!!
/*********************************************************************************/
/* Matrix product program for a multi-core CPU and for a many-core GPU */
/* S. Vialle - November 2015 */
/*********************************************************************************/
#include <stdio.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include "main.h"
#include "gpu-op.h"
/*-------------------------------------------------------------------------------*/
/* GPU symbols */
/*-------------------------------------------------------------------------------*/
__device__ double GPU_A[SIZE][SIZE];
__device__ double GPU_B[SIZE][SIZE];
__device__ double GPU_C[SIZE][SIZE];
/*-------------------------------------------------------------------------------*/
/* Init and finalize the GPU device. */
/*-------------------------------------------------------------------------------*/
void gpuInit(void)
{
hipInit(0);
}
void gpuFinalize(void)
{
}
/*-------------------------------------------------------------------------------*/
/* Transfer of CPU input data into GPU symbols */
/*-------------------------------------------------------------------------------*/
void gpuSetDataOnGPU(void)
{
//Set GPU_A symbol
CHECK_CUDA_SUCCESS(hipMemcpyToSymbol(GPU_A, &A[0][0], sizeof(double)*SIZE*SIZE, 0, hipMemcpyHostToDevice),
"Transfer A-->GPU_A");
//Set GPU_B symbol
CHECK_CUDA_SUCCESS(hipMemcpyToSymbol(GPU_B, &B[0][0], sizeof(double)*SIZE*SIZE, 0, hipMemcpyHostToDevice),
"Transfer B-->GPU_B");
}
/*-------------------------------------------------------------------------------*/
/* Transfer of GPU results into CPU array */
/*-------------------------------------------------------------------------------*/
void gpuGetResultOnCPU(void)
{
// Get GPU_C symbol
CHECK_CUDA_SUCCESS(hipMemcpyFromSymbol(&C[0][0], GPU_C, sizeof(double)*SIZE*SIZE, 0, hipMemcpyDeviceToHost),
"Transfer GPU_C-->C");
}
/*-------------------------------------------------------------------------------*/
/* Small matrix product on the local GPU. */
/*-------------------------------------------------------------------------------*/
__global__ void MatrixProductKernel_v0(void)
{
// Index computations
int col = (blockIdx.y * blockDim.y) + threadIdx.y;
int lig = (blockIdx.x * blockDim.x) + threadIdx.x;
if(col < SIZE && lig < SIZE){
double res = 0.0;
// Matrix product computation
int i;
for(i=0;i<SIZE;i++)
{
res += GPU_A[i][lig]*GPU_B[col][i];
}
GPU_C[col][lig] = res;
}
}
/*-------------------------------------------------------------------------------*/
/* Small matrix product on the local GPU. */
/*-------------------------------------------------------------------------------*/
__global__ void MatrixProductKernel_v3(void){
// Index computations
int i,j;
int col = (blockIdx.x * blockDim.x) + threadIdx.x;
int lig = (blockIdx.y * blockDim.y) + threadIdx.y;
if(col < SIZE && lig < SIZE){
__shared__ double shared_A[BLOCK_SIZE_X_K3][BLOCK_SIZE_Y_K3];
__shared__ double shared_B[BLOCK_SIZE_X_K3][BLOCK_SIZE_Y_K3];
double res = 0.0;
for(i = 0; i < (SIZE / BLOCK_SIZE_X_K3)+(SIZE%BLOCK_SIZE_X_K3? 1 : 0); i++){
int offset = i * BLOCK_SIZE_X_K3;
if((offset+threadIdx.x) < SIZE && (offset+threadIdx.y) < SIZE){
shared_A[threadIdx.y][threadIdx.x] = GPU_A[lig][offset + threadIdx.x];
shared_B[threadIdx.y][threadIdx.x] = GPU_B[offset + threadIdx.y][col];
__syncthreads();
// Matrix product computation
for(j=0;j<BLOCK_SIZE_X_K3;j++){
if(j + offset < SIZE){
res += shared_A[threadIdx.y][j]*shared_B[j][threadIdx.x];
}
}
__syncthreads();
}
}
GPU_C[lig][col] = res;
}
}
__global__ void MatrixProductKernel_v4(void){
// Index computations
int i,j;
int col = (blockIdx.x * blockDim.x) + threadIdx.x;
int lig = (blockIdx.y * blockDim.y) + threadIdx.y;
double res;
__shared__ double shared_A[BLOCK_SIZE_X_K3][BLOCK_SIZE_Y_K3];
__shared__ double shared_B[BLOCK_SIZE_X_K3][BLOCK_SIZE_Y_K3];
res = 0.0;
for(i = 0; i < (SIZE / BLOCK_SIZE_X_K3)+(SIZE%BLOCK_SIZE_X_K3?1:0); i++){
int offset = i * BLOCK_SIZE_X_K3;
if((offset+threadIdx.x) < SIZE && lig < SIZE){
shared_A[threadIdx.y][threadIdx.x] = GPU_A[lig][offset + threadIdx.x];
}
if((offset+threadIdx.y) < SIZE && col < SIZE){
shared_B[threadIdx.y][threadIdx.x] = GPU_B[offset + threadIdx.y][col];
}
__syncthreads();
// Matrix product computation
if(offset < SIZE-1){
for(j=0;j<BLOCK_SIZE_X_K3;j++){
res += shared_A[threadIdx.y][j]*shared_B[j][threadIdx.x];
}
}else{
for(j=0;j< (SIZE%BLOCK_SIZE_X_K3);j++){
res += shared_A[threadIdx.y][j]*shared_B[j][threadIdx.x];
}
}
__syncthreads();
if(col < SIZE && lig < SIZE){
GPU_C[lig][col] = res;
}
}
}
/*-------------------------------------------------------------------------------*/
/* Small matrix product on the local GPU. */
/*-------------------------------------------------------------------------------*/
void gpuProduct(gkid_t kid)
{
dim3 Dg, Db;
switch(kid) {
case GK0 : // Kernel v0 - using only global memory (with coalescent data accesses)
// - init the grid of blocs
Db.x = BLOCK_SIZE_X_K0;
Db.y = BLOCK_SIZE_Y_K0;
Db.z = 1;
Dg.x = SIZE/BLOCK_SIZE_X_K0;
Dg.y = SIZE/BLOCK_SIZE_Y_K0;
Dg.z = 1;
// - run the Grid of Blocs of threads
hipLaunchKernelGGL(( MatrixProductKernel_v0), dim3(Dg),dim3(Db), 0, 0, );
break;
case GK1 :
Db.x = BLOCK_SIZE_X_K0;
Db.y = BLOCK_SIZE_Y_K0;
Db.z = 1;
Dg.x = SIZE/BLOCK_SIZE_X_K0 + (SIZE%BLOCK_SIZE_X_K0 ? 1 : 0);
Dg.y = SIZE/BLOCK_SIZE_Y_K0 + (SIZE%BLOCK_SIZE_Y_K0 ? 1 : 0);
Dg.z = 1;
// - run the Grid of Blocs of threads
hipLaunchKernelGGL(( MatrixProductKernel_v0), dim3(Dg),dim3(Db), 0, 0, );
break;
case GK2 :
Db.x = BLOCK_SIZE_X_K0;
Db.y = BLOCK_SIZE_Y_K0;
Db.z = 1;
Dg.x = SIZE/BLOCK_SIZE_X_K0 + (SIZE%BLOCK_SIZE_X_K0 ? 1 : 0);
Dg.y = SIZE/BLOCK_SIZE_Y_K0 + (SIZE%BLOCK_SIZE_Y_K0 ? 1 : 0);
Dg.z = 1;
// - run the Grid of Blocs of threads
hipLaunchKernelGGL(( MatrixProductKernel_v0), dim3(Dg),dim3(Db), 0, 0, );
break;
case GK3 :
Db.x = BLOCK_SIZE_X_K3;
Db.y = BLOCK_SIZE_Y_K3;
Db.z = 1;
Dg.x = SIZE/BLOCK_SIZE_X_K3 + (SIZE%BLOCK_SIZE_X_K3 ? 1 : 0);
Dg.y = SIZE/BLOCK_SIZE_Y_K3 + (SIZE%BLOCK_SIZE_Y_K3 ? 1 : 0);
Dg.z = 1;
// - run the Grid of Blocs of threads
hipLaunchKernelGGL(( MatrixProductKernel_v3), dim3(Dg),dim3(Db), 0, 0, );
break;
case GK4 :
Db.x = BLOCK_SIZE_X_K3;
Db.y = BLOCK_SIZE_Y_K3;
Db.z = 1;
Dg.x = SIZE/BLOCK_SIZE_X_K3 + (SIZE%BLOCK_SIZE_X_K3 ? 1 : 0);
Dg.y = SIZE/BLOCK_SIZE_Y_K3 + (SIZE%BLOCK_SIZE_Y_K3 ? 1 : 0);
Dg.z = 1;
// - run the Grid of Blocs of threads
hipLaunchKernelGGL(( MatrixProductKernel_v4), dim3(Dg),dim3(Db), 0, 0, );
break;
case GK5 :
break;
default :
fprintf(stderr,"Unknown GPU kernel!");
exit(EXIT_FAILURE);
}
}
| 366d0f87cfddf2feeff9734f9299a7cc4881d23d.cu | /*********************************************************************************/
/* Matrix product program for a multi-core CPU and for a many-core GPU */
/* S. Vialle - November 2015 */
/*********************************************************************************/
#include <stdio.h>
#include <stdlib.h>
#include <cuda.h>
#include <cuda_runtime.h>
#include "main.h"
#include "gpu-op.h"
/*-------------------------------------------------------------------------------*/
/* GPU symbols */
/*-------------------------------------------------------------------------------*/
__device__ double GPU_A[SIZE][SIZE];
__device__ double GPU_B[SIZE][SIZE];
__device__ double GPU_C[SIZE][SIZE];
/*-------------------------------------------------------------------------------*/
/* Init and finalize the GPU device. */
/*-------------------------------------------------------------------------------*/
void gpuInit(void)
{
cuInit(0);
}
void gpuFinalize(void)
{
}
/*-------------------------------------------------------------------------------*/
/* Transfer of CPU input data into GPU symbols */
/*-------------------------------------------------------------------------------*/
void gpuSetDataOnGPU(void)
{
//Set GPU_A symbol
CHECK_CUDA_SUCCESS(cudaMemcpyToSymbol(GPU_A, &A[0][0], sizeof(double)*SIZE*SIZE, 0, cudaMemcpyHostToDevice),
"Transfer A-->GPU_A");
//Set GPU_B symbol
CHECK_CUDA_SUCCESS(cudaMemcpyToSymbol(GPU_B, &B[0][0], sizeof(double)*SIZE*SIZE, 0, cudaMemcpyHostToDevice),
"Transfer B-->GPU_B");
}
/*-------------------------------------------------------------------------------*/
/* Transfer of GPU results into CPU array */
/*-------------------------------------------------------------------------------*/
void gpuGetResultOnCPU(void)
{
// Get GPU_C symbol
CHECK_CUDA_SUCCESS(cudaMemcpyFromSymbol(&C[0][0], GPU_C, sizeof(double)*SIZE*SIZE, 0, cudaMemcpyDeviceToHost),
"Transfer GPU_C-->C");
}
/*-------------------------------------------------------------------------------*/
/* Small matrix product on the local GPU. */
/*-------------------------------------------------------------------------------*/
__global__ void MatrixProductKernel_v0(void)
{
// Index computations
int col = (blockIdx.y * blockDim.y) + threadIdx.y;
int lig = (blockIdx.x * blockDim.x) + threadIdx.x;
if(col < SIZE && lig < SIZE){
double res = 0.0;
// Matrix product computation
int i;
for(i=0;i<SIZE;i++)
{
res += GPU_A[i][lig]*GPU_B[col][i];
}
GPU_C[col][lig] = res;
}
}
/*-------------------------------------------------------------------------------*/
/* Small matrix product on the local GPU. */
/*-------------------------------------------------------------------------------*/
__global__ void MatrixProductKernel_v3(void){
// Index computations
int i,j;
int col = (blockIdx.x * blockDim.x) + threadIdx.x;
int lig = (blockIdx.y * blockDim.y) + threadIdx.y;
if(col < SIZE && lig < SIZE){
__shared__ double shared_A[BLOCK_SIZE_X_K3][BLOCK_SIZE_Y_K3];
__shared__ double shared_B[BLOCK_SIZE_X_K3][BLOCK_SIZE_Y_K3];
double res = 0.0;
for(i = 0; i < (SIZE / BLOCK_SIZE_X_K3)+(SIZE%BLOCK_SIZE_X_K3? 1 : 0); i++){
int offset = i * BLOCK_SIZE_X_K3;
if((offset+threadIdx.x) < SIZE && (offset+threadIdx.y) < SIZE){
shared_A[threadIdx.y][threadIdx.x] = GPU_A[lig][offset + threadIdx.x];
shared_B[threadIdx.y][threadIdx.x] = GPU_B[offset + threadIdx.y][col];
__syncthreads();
// Matrix product computation
for(j=0;j<BLOCK_SIZE_X_K3;j++){
if(j + offset < SIZE){
res += shared_A[threadIdx.y][j]*shared_B[j][threadIdx.x];
}
}
__syncthreads();
}
}
GPU_C[lig][col] = res;
}
}
__global__ void MatrixProductKernel_v4(void){
// Index computations
int i,j;
int col = (blockIdx.x * blockDim.x) + threadIdx.x;
int lig = (blockIdx.y * blockDim.y) + threadIdx.y;
double res;
__shared__ double shared_A[BLOCK_SIZE_X_K3][BLOCK_SIZE_Y_K3];
__shared__ double shared_B[BLOCK_SIZE_X_K3][BLOCK_SIZE_Y_K3];
res = 0.0;
for(i = 0; i < (SIZE / BLOCK_SIZE_X_K3)+(SIZE%BLOCK_SIZE_X_K3?1:0); i++){
int offset = i * BLOCK_SIZE_X_K3;
if((offset+threadIdx.x) < SIZE && lig < SIZE){
shared_A[threadIdx.y][threadIdx.x] = GPU_A[lig][offset + threadIdx.x];
}
if((offset+threadIdx.y) < SIZE && col < SIZE){
shared_B[threadIdx.y][threadIdx.x] = GPU_B[offset + threadIdx.y][col];
}
__syncthreads();
// Matrix product computation
if(offset < SIZE-1){
for(j=0;j<BLOCK_SIZE_X_K3;j++){
res += shared_A[threadIdx.y][j]*shared_B[j][threadIdx.x];
}
}else{
for(j=0;j< (SIZE%BLOCK_SIZE_X_K3);j++){
res += shared_A[threadIdx.y][j]*shared_B[j][threadIdx.x];
}
}
__syncthreads();
if(col < SIZE && lig < SIZE){
GPU_C[lig][col] = res;
}
}
}
/*-------------------------------------------------------------------------------*/
/* Small matrix product on the local GPU. */
/*-------------------------------------------------------------------------------*/
void gpuProduct(gkid_t kid)
{
dim3 Dg, Db;
switch(kid) {
case GK0 : // Kernel v0 - using only global memory (with coalescent data accesses)
// - init the grid of blocs
Db.x = BLOCK_SIZE_X_K0;
Db.y = BLOCK_SIZE_Y_K0;
Db.z = 1;
Dg.x = SIZE/BLOCK_SIZE_X_K0;
Dg.y = SIZE/BLOCK_SIZE_Y_K0;
Dg.z = 1;
// - run the Grid of Blocs of threads
MatrixProductKernel_v0<<<Dg,Db>>>();
break;
case GK1 :
Db.x = BLOCK_SIZE_X_K0;
Db.y = BLOCK_SIZE_Y_K0;
Db.z = 1;
Dg.x = SIZE/BLOCK_SIZE_X_K0 + (SIZE%BLOCK_SIZE_X_K0 ? 1 : 0);
Dg.y = SIZE/BLOCK_SIZE_Y_K0 + (SIZE%BLOCK_SIZE_Y_K0 ? 1 : 0);
Dg.z = 1;
// - run the Grid of Blocs of threads
MatrixProductKernel_v0<<<Dg,Db>>>();
break;
case GK2 :
Db.x = BLOCK_SIZE_X_K0;
Db.y = BLOCK_SIZE_Y_K0;
Db.z = 1;
Dg.x = SIZE/BLOCK_SIZE_X_K0 + (SIZE%BLOCK_SIZE_X_K0 ? 1 : 0);
Dg.y = SIZE/BLOCK_SIZE_Y_K0 + (SIZE%BLOCK_SIZE_Y_K0 ? 1 : 0);
Dg.z = 1;
// - run the Grid of Blocs of threads
MatrixProductKernel_v0<<<Dg,Db>>>();
break;
case GK3 :
Db.x = BLOCK_SIZE_X_K3;
Db.y = BLOCK_SIZE_Y_K3;
Db.z = 1;
Dg.x = SIZE/BLOCK_SIZE_X_K3 + (SIZE%BLOCK_SIZE_X_K3 ? 1 : 0);
Dg.y = SIZE/BLOCK_SIZE_Y_K3 + (SIZE%BLOCK_SIZE_Y_K3 ? 1 : 0);
Dg.z = 1;
// - run the Grid of Blocs of threads
MatrixProductKernel_v3<<<Dg,Db>>>();
break;
case GK4 :
Db.x = BLOCK_SIZE_X_K3;
Db.y = BLOCK_SIZE_Y_K3;
Db.z = 1;
Dg.x = SIZE/BLOCK_SIZE_X_K3 + (SIZE%BLOCK_SIZE_X_K3 ? 1 : 0);
Dg.y = SIZE/BLOCK_SIZE_Y_K3 + (SIZE%BLOCK_SIZE_Y_K3 ? 1 : 0);
Dg.z = 1;
// - run the Grid of Blocs of threads
MatrixProductKernel_v4<<<Dg,Db>>>();
break;
case GK5 :
break;
default :
fprintf(stderr,"Unknown GPU kernel!");
exit(EXIT_FAILURE);
}
}
|
8276c7b4454452e7b6d6e1c7791d501eea3dcae3.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "hiprand/hiprand.h"
#include "rocblas.h"
extern "C" {
#include "pooling.h"
#include "hip/hip_runtime.h"
}
void forward_pool_gpu(pool_layer pl, float *input_gpu) {
float alpha = 1.0f;
float beta = 0.0f;
CUDNN_CHECK(cudnnPoolingForward(cudnn_handler(),
pl.poolDesc,
&alpha,
pl.inputTensorDesc,
input_gpu,
&beta,
pl.outputTensorDesc,
pl.output_gpu));
}
| 8276c7b4454452e7b6d6e1c7791d501eea3dcae3.cu | #include "cuda_runtime.h"
#include "curand.h"
#include "cublas_v2.h"
extern "C" {
#include "pooling.h"
#include "cuda.h"
}
void forward_pool_gpu(pool_layer pl, float *input_gpu) {
float alpha = 1.0f;
float beta = 0.0f;
CUDNN_CHECK(cudnnPoolingForward(cudnn_handler(),
pl.poolDesc,
&alpha,
pl.inputTensorDesc,
input_gpu,
&beta,
pl.outputTensorDesc,
pl.output_gpu));
}
|
d7edf4eb04e5d9c9688b20032c87bfb3739ec54f.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <gmock/gmock-matchers.h>
#include <gtest/gtest.h>
#include <ATen/hip/HIPGeneratorImpl.h>
#include <c10/util/Optional.h>
#include <fusion.h>
#include <ir_all_nodes.h>
#include <kernel_cache.h>
#include <ops/arith.h>
#include <scheduler/all_schedulers.h>
#include <test/test_gpu_validator.h>
#include <test/test_utils.h>
#include <ATen/hip/HIPGraphsUtils.cuh>
#include <cassert>
#include <type_traits>
#include <hiprand/hiprand.h>
#include <hiprand/hiprand_kernel.h>
#include <hiprand/hiprand_kernel.h>
namespace nvfuser {
enum RNGTest_t {
Uniform,
Normal,
};
namespace {
template <typename T>
__global__ void generate_random_numbers_kernel(
T* output,
int64_t size,
at::PhiloxCudaState philox_args,
RNGTest_t rng_test) {
int64_t tid = blockIdx.x * blockDim.x + threadIdx.x;
auto seeds = at::cuda::philox::unpack(philox_args);
hiprandStatePhilox4_32_10_t state;
hiprand_init(std::get<0>(seeds), tid, std::get<1>(seeds), &state);
double2 (*ref_rng_double)(hiprandStatePhilox4_32_10_t*);
float4 (*ref_rng_float)(hiprandStatePhilox4_32_10_t*);
switch (rng_test) {
case RNGTest_t::Uniform: {
ref_rng_double = hiprand_uniform2_double;
ref_rng_float = hiprand_uniform4;
break;
}
case RNGTest_t::Normal: {
ref_rng_double = hiprand_normal2_double;
ref_rng_float = hiprand_normal4;
break;
}
}
if (std::is_same<T, double>::value) {
double2 result = ref_rng_double(&state);
if (tid * 2 < size) {
output[tid * 2] = result.x;
}
if (tid * 2 + 1 < size) {
output[tid * 2 + 1] = result.y;
}
} else {
auto is_float = std::is_same<T, float>::value;
assert(is_float);
float4 result = ref_rng_float(&state);
if (tid * 4 < size) {
output[tid * 4] = result.x;
}
if (tid * 4 + 1 < size) {
output[tid * 4 + 1] = result.y;
}
if (tid * 4 + 2 < size) {
output[tid * 4 + 2] = result.z;
}
if (tid * 4 + 3 < size) {
output[tid * 4 + 3] = result.w;
}
}
}
at::Tensor generate_random_numbers(
int64_t size,
at::ScalarType dtype,
RNGTest_t rng_test) {
auto options = at::TensorOptions().dtype(dtype).device(at::kCUDA, 0);
auto result = at::empty({size}, options);
auto gen = at::get_generator_or_default<at::CUDAGeneratorImpl>(
c10::nullopt, at::cuda::detail::getDefaultCUDAGenerator());
at::PhiloxCudaState rng_engine_inputs;
{
// See Note [Acquire lock when using random generators]
std::lock_guard<std::mutex> lock(gen->mutex_);
rng_engine_inputs = gen->philox_cuda_state(4);
}
if (dtype == at::kFloat) {
int64_t block = 128;
int64_t block_elems = block * 4;
int64_t grid = (size + block_elems - 1) / block_elems;
hipLaunchKernelGGL(( generate_random_numbers_kernel),
dim3(grid),
dim3(block),
0,
at::hip::getCurrentHIPStreamMasqueradingAsCUDA(),
result.data_ptr<float>(), size, rng_engine_inputs, rng_test);
} else {
TORCH_CHECK(dtype == at::kDouble);
int64_t block = 128;
int64_t block_elems = block * 2;
int64_t grid = (size + block_elems - 1) / block_elems;
hipLaunchKernelGGL(( generate_random_numbers_kernel),
dim3(grid),
dim3(block),
0,
at::hip::getCurrentHIPStreamMasqueradingAsCUDA(),
result.data_ptr<double>(), size, rng_engine_inputs, rng_test);
}
return result;
}
at::Tensor generate_uniform(int64_t size, at::ScalarType dtype) {
return generate_random_numbers(size, dtype, RNGTest_t::Uniform);
}
at::Tensor generate_normal(int64_t size, at::ScalarType dtype) {
return generate_random_numbers(size, dtype, RNGTest_t::Normal);
}
} // namespace
TEST_F(NVFuserTest, FusionRNGValidateWithCURand_CUDA) {
std::unique_ptr<Fusion> fusion_ptr = std::make_unique<Fusion>();
auto fusion = fusion_ptr.get();
FusionGuard fg(fusion);
Int* size_val = IrBuilder::create<Int>();
fusion->addInput(size_val);
TensorView* tv0 = rand({size_val}, DataType::Float);
TensorView* tv1 = rand({size_val}, DataType::Double);
fusion->addOutput(tv0);
fusion->addOutput(tv1);
FusionExecutorCache fec(std::move(fusion_ptr));
for (int64_t size : {16, 1024, 10001, 10002, 10003, 100000, 10000001}) {
at::manual_seed(0);
auto cg_outputs = fec.runFusionWithInputs({size});
at::manual_seed(0);
auto ref0 = generate_uniform(size, at::kFloat);
auto ref1 = generate_uniform(size, at::kDouble);
testValidate(
fec.fusion(), cg_outputs, {size}, {ref0, ref1}, __LINE__, __FILE__);
}
}
TEST_F(NVFuserTest, FusionRNGManualScheduleValidateWithCURand_CUDA) {
int64_t size = 128;
auto dtype = at::kFloat;
std::unique_ptr<Fusion> fusion_ptr = std::make_unique<Fusion>();
auto fusion = fusion_ptr.get();
FusionGuard fg(fusion);
TensorView* tv0 = makeSymbolicTensor(1, aten_to_data_type(dtype));
fusion->addInput(tv0);
auto tv1 = rand_like(tv0);
auto tv2 = set(tv1);
fusion->addOutput(tv2);
tv2->split(0, 8);
tv2->axis(0)->parallelize(ParallelType::TIDx);
tv1->computeAt(tv2, 1);
auto options = at::TensorOptions().dtype(dtype).device(at::kCUDA, 0);
at::Tensor t0 = at::zeros({size}, options);
FusionExecutor fe;
fe.compileFusion(fusion, {t0});
at::manual_seed(0);
auto cg_outputs = fe.runFusion({t0});
auto out = cg_outputs[0];
at::manual_seed(0);
auto ref = generate_uniform(size, dtype);
testValidate(fusion, {out}, {t0}, {ref}, __LINE__, __FILE__);
}
TEST_F(NVFuserTest, FusionRNGManualScheduleValidateWithCURand2_CUDA) {
#ifdef FBCODE_CAFFE2
GTEST_SKIP() << "Fails accuracy on V100 32gb";
#endif
auto dtype = at::kFloat;
std::unique_ptr<Fusion> fusion_ptr = std::make_unique<Fusion>();
auto fusion = fusion_ptr.get();
FusionGuard fg(fusion);
Int* size1 = IrBuilder::create<Int>();
Int* size2 = IrBuilder::create<Int>();
Int* size3 = IrBuilder::create<Int>();
Int* size4 = IrBuilder::create<Int>();
fusion->addInput(size1);
fusion->addInput(size2);
fusion->addInput(size3);
fusion->addInput(size4);
TensorView* tv0 = rand({size1, size2, size3, size4}, DataType::Float);
fusion->addOutput(tv0);
FusionExecutor fe;
fe.compileFusion(fusion, {10, 10, 10, 10});
at::manual_seed(0);
auto cg_outputs = fe.runFusion({10, 10, 10, 10});
auto out = cg_outputs[0];
at::manual_seed(0);
auto ref = generate_uniform(10000, dtype).view({10, 10, 10, 10});
testValidate(fusion, {out}, {10, 10, 10, 10}, {ref}, __LINE__, __FILE__);
}
TEST_F(NVFuserTest, FusionBroadcastingRNG_CUDA) {
for (auto dtype : {at::kFloat, at::kDouble}) {
std::unique_ptr<Fusion> fusion_ptr = std::make_unique<Fusion>();
auto fusion = fusion_ptr.get();
FusionGuard fg(fusion);
TensorView* tv0 = makeConcreteTensor({5, 1}, aten_to_data_type(dtype));
TensorView* tv1 = makeConcreteTensor({5, 5}, aten_to_data_type(dtype));
fusion->addInput(tv0);
fusion->addInput(tv1);
auto tv2 = rand_like(tv0);
auto tv3 = add(tv1, tv2);
auto tv4 = add(tv0, tv3);
fusion->addOutput(tv4);
FusionExecutorCache fec(std::move(fusion_ptr));
auto options = at::TensorOptions().dtype(dtype).device(at::kCUDA, 0);
at::Tensor t0 = at::zeros({5, 1}, options);
at::Tensor t1 = at::zeros({5, 5}, options);
auto cg_outputs = fec.runFusionWithInputs({t0, t1});
auto out = cg_outputs[0];
TORCH_CHECK((out.select(1, 0) == out.select(1, 1)).all().item<bool>())
TORCH_CHECK((out.select(1, 0) == out.select(1, 2)).all().item<bool>())
TORCH_CHECK((out.select(1, 0) == out.select(1, 3)).all().item<bool>())
TORCH_CHECK((out.select(1, 0) == out.select(1, 4)).all().item<bool>())
}
}
TEST_F(NVFuserTest, FusionBroadcastingRNG2_CUDA) {
for (int64_t size : {16, 1024, 10001, 10002, 10003, 100000, 10000001}) {
for (auto dtype : {at::kFloat, at::kDouble}) {
std::unique_ptr<Fusion> fusion_ptr = std::make_unique<Fusion>();
auto fusion = fusion_ptr.get();
FusionGuard fg(fusion);
TensorView* tv0 = makeConcreteTensor({1}, aten_to_data_type(dtype));
TensorView* tv1 = makeSymbolicTensor(1, aten_to_data_type(dtype));
fusion->addInput(tv0);
fusion->addInput(tv1);
auto tv2 = rand_like(tv0);
auto tv3 = add(tv1, tv2);
fusion->addOutput(tv3);
FusionExecutorCache fec(std::move(fusion_ptr));
auto options = at::TensorOptions().dtype(dtype).device(at::kCUDA, 0);
at::Tensor t0 = at::zeros({1}, options);
at::Tensor t1 = at::zeros({size}, options);
at::manual_seed(0);
auto cg_outputs = fec.runFusionWithInputs({t0, t1});
auto out = cg_outputs[0];
at::manual_seed(0);
auto ref = generate_uniform(1, dtype).expand_as(t1);
testValidate(fec.fusion(), {out}, {t0, t1}, {ref}, __LINE__, __FILE__);
}
}
}
TEST_F(NVFuserTest, FusionBroadcastingRNGSmem_CUDA) {
for (auto dtype : {at::kFloat, at::kDouble}) {
std::unique_ptr<Fusion> fusion_ptr = std::make_unique<Fusion>();
auto fusion = fusion_ptr.get();
FusionGuard fg(fusion);
TensorView* tv0 = makeConcreteTensor({5, 1}, aten_to_data_type(dtype));
TensorView* tv1 = makeConcreteTensor({5, 5}, aten_to_data_type(dtype));
fusion->addInput(tv0);
fusion->addInput(tv1);
auto tv2 = rand_like(tv0);
auto tv3 = add(tv1, tv2);
auto tv4 = add(tv0, tv3);
fusion->addOutput(tv4);
auto options = at::TensorOptions().dtype(dtype).device(at::kCUDA, 0);
at::Tensor t0 = at::zeros({5, 1}, options);
at::Tensor t1 = at::zeros({5, 5}, options);
auto lparams = scheduleTranspose(fusion, {t0, t1});
FusionExecutor fe;
fe.compileFusion(fusion, {t0, t1}, lparams);
auto cg_outputs = fe.runFusion({t0, t1}, lparams);
auto out = cg_outputs[0];
TORCH_CHECK((out.select(1, 0) == out.select(1, 1)).all().item<bool>())
TORCH_CHECK((out.select(1, 0) == out.select(1, 2)).all().item<bool>())
TORCH_CHECK((out.select(1, 0) == out.select(1, 3)).all().item<bool>())
TORCH_CHECK((out.select(1, 0) == out.select(1, 4)).all().item<bool>())
}
}
TEST_F(NVFuserTest, FusionBroadcastingRNGSmemNonSquareTile_CUDA) {
// https://github.com/csarofeen/pytorch/issues/1926
std::unique_ptr<Fusion> fusion_ptr = std::make_unique<Fusion>();
auto fusion = fusion_ptr.get();
FusionGuard fg(fusion);
TensorView* tv0 = makeConcreteTensor({5, 1});
TensorView* tv1 = makeConcreteTensor({5, 5});
fusion->addInput(tv0);
fusion->addInput(tv1);
auto tv2 = rand_like(tv0);
auto tv3 = add(tv1, tv2);
auto tv4 = add(tv0, tv3);
fusion->addOutput(tv4);
auto options = at::TensorOptions().dtype(at::kFloat).device(at::kCUDA, 0);
at::Tensor t0 = at::zeros({5, 1}, options);
at::Tensor t1 = at::zeros({5, 5}, options);
TransposeParams heuristics;
heuristics.tile_size1 = 8;
heuristics.tile_size2 = 4;
scheduleTranspose(fusion, heuristics);
FusionExecutor fe;
fe.compileFusion(fusion, {t0, t1});
auto cg_outputs = fe.runFusion({t0, t1});
auto out = cg_outputs[0];
TORCH_CHECK((out.select(1, 0) == out.select(1, 1)).all().item<bool>());
TORCH_CHECK((out.select(1, 0) == out.select(1, 2)).all().item<bool>());
TORCH_CHECK((out.select(1, 0) == out.select(1, 3)).all().item<bool>());
TORCH_CHECK((out.select(1, 0) == out.select(1, 4)).all().item<bool>());
}
TEST_F(NVFuserTest, FusionUniform_CUDA) {
std::unique_ptr<Fusion> fusion_ptr = std::make_unique<Fusion>();
auto fusion = fusion_ptr.get();
FusionGuard fg(fusion);
Int* size_val = IrBuilder::create<Int>();
Double* low = IrBuilder::create<Double>();
Double* high = IrBuilder::create<Double>();
fusion->addInput(size_val);
fusion->addInput(low);
fusion->addInput(high);
TensorView* tv0 = uniform({size_val}, low, high, DataType::Float);
TensorView* tv1 = uniform({size_val}, low, high, DataType::Double);
fusion->addOutput(tv0);
fusion->addOutput(tv1);
FusionExecutorCache fec(std::move(fusion_ptr));
for (int64_t size : {16, 1024, 10001, 10002, 10003, 100000, 10000001}) {
at::manual_seed(0);
auto cg_outputs = fec.runFusionWithInputs({size, -1.0, 1.0});
at::manual_seed(0);
auto ref0 = generate_uniform(size, at::kFloat) * 2 - 1;
auto ref1 = generate_uniform(size, at::kDouble) * 2 - 1;
testValidate(
fec.fusion(),
cg_outputs,
{size, -1.0, 1.0},
{ref0, ref1},
__LINE__,
__FILE__);
}
}
TEST_F(NVFuserTest, FusionNormal_CUDA) {
std::unique_ptr<Fusion> fusion_ptr = std::make_unique<Fusion>();
auto fusion = fusion_ptr.get();
FusionGuard fg(fusion);
Int* size_val = IrBuilder::create<Int>();
Double* mean = IrBuilder::create<Double>();
Double* std = IrBuilder::create<Double>();
fusion->addInput(size_val);
fusion->addInput(mean);
fusion->addInput(std);
TensorView* tv0 = normal({size_val}, mean, std, DataType::Float);
TensorView* tv1 = normal({size_val}, mean, std, DataType::Double);
TensorView* tv2 = randn({size_val}, DataType::Double);
TensorView* tv3 = randn_like(tv2);
fusion->addOutput(tv0);
fusion->addOutput(tv1);
fusion->addOutput(tv2);
fusion->addOutput(tv3);
FusionExecutorCache fec(std::move(fusion_ptr));
for (int64_t size : {16, 1024, 10001, 10002, 10003, 100000, 10000001}) {
at::manual_seed(0);
auto cg_outputs = fec.runFusionWithInputs({size, 1.0, 0.5});
at::manual_seed(0);
auto ref0 = generate_normal(size, at::kFloat) * 0.5f + 1.0f;
auto ref1 = generate_normal(size, at::kDouble) * 0.5 + 1.0;
auto ref2 = generate_normal(size, at::kDouble);
auto ref3 = generate_normal(size, at::kDouble);
testValidate(
fec.fusion(),
cg_outputs,
{size, 1.0, 0.5},
{ref0, ref1, ref2, ref3},
__LINE__,
__FILE__);
}
}
TEST_F(NVFuserTest, FusionRandLikeReduction_CUDA) {
auto dtype = at::kFloat;
std::unique_ptr<Fusion> fusion_ptr = std::make_unique<Fusion>();
auto fusion = fusion_ptr.get();
FusionGuard fg(fusion);
TensorView* tv0 = makeSymbolicTensor(2, aten_to_data_type(dtype));
fusion->addInput(tv0);
auto tv1 = sum(tv0, {0});
auto tv2 = rand_like(tv1);
auto tv3 = add(tv1, tv2);
fusion->addOutput(tv3);
FusionExecutorCache fec(std::move(fusion_ptr));
auto options = at::TensorOptions().dtype(dtype).device(at::kCUDA, 0);
at::Tensor t0 = at::zeros({2, 3}, options);
at::manual_seed(0);
auto cg_outputs = fec.runFusionWithInputs({t0});
auto out = cg_outputs[0];
at::manual_seed(0);
auto t1 = t0.sum(0);
auto t2 = generate_uniform(3, dtype).expand_as(t1);
auto t3 = t1.add(t2);
testValidate(fec.fusion(), {out}, {t0}, {t3}, __LINE__, __FILE__);
}
} // namespace nvfuser
| d7edf4eb04e5d9c9688b20032c87bfb3739ec54f.cu | #include <gmock/gmock-matchers.h>
#include <gtest/gtest.h>
#include <ATen/cuda/CUDAGeneratorImpl.h>
#include <c10/util/Optional.h>
#include <fusion.h>
#include <ir_all_nodes.h>
#include <kernel_cache.h>
#include <ops/arith.h>
#include <scheduler/all_schedulers.h>
#include <test/test_gpu_validator.h>
#include <test/test_utils.h>
#include <ATen/cuda/CUDAGraphsUtils.cuh>
#include <cassert>
#include <type_traits>
#include <curand.h>
#include <curand_kernel.h>
#include <curand_philox4x32_x.h>
namespace nvfuser {
enum RNGTest_t {
Uniform,
Normal,
};
namespace {
template <typename T>
__global__ void generate_random_numbers_kernel(
T* output,
int64_t size,
at::PhiloxCudaState philox_args,
RNGTest_t rng_test) {
int64_t tid = blockIdx.x * blockDim.x + threadIdx.x;
auto seeds = at::cuda::philox::unpack(philox_args);
curandStatePhilox4_32_10_t state;
curand_init(std::get<0>(seeds), tid, std::get<1>(seeds), &state);
double2 (*ref_rng_double)(curandStatePhilox4_32_10_t*);
float4 (*ref_rng_float)(curandStatePhilox4_32_10_t*);
switch (rng_test) {
case RNGTest_t::Uniform: {
ref_rng_double = curand_uniform2_double;
ref_rng_float = curand_uniform4;
break;
}
case RNGTest_t::Normal: {
ref_rng_double = curand_normal2_double;
ref_rng_float = curand_normal4;
break;
}
}
if (std::is_same<T, double>::value) {
double2 result = ref_rng_double(&state);
if (tid * 2 < size) {
output[tid * 2] = result.x;
}
if (tid * 2 + 1 < size) {
output[tid * 2 + 1] = result.y;
}
} else {
auto is_float = std::is_same<T, float>::value;
assert(is_float);
float4 result = ref_rng_float(&state);
if (tid * 4 < size) {
output[tid * 4] = result.x;
}
if (tid * 4 + 1 < size) {
output[tid * 4 + 1] = result.y;
}
if (tid * 4 + 2 < size) {
output[tid * 4 + 2] = result.z;
}
if (tid * 4 + 3 < size) {
output[tid * 4 + 3] = result.w;
}
}
}
at::Tensor generate_random_numbers(
int64_t size,
at::ScalarType dtype,
RNGTest_t rng_test) {
auto options = at::TensorOptions().dtype(dtype).device(at::kCUDA, 0);
auto result = at::empty({size}, options);
auto gen = at::get_generator_or_default<at::CUDAGeneratorImpl>(
c10::nullopt, at::cuda::detail::getDefaultCUDAGenerator());
at::PhiloxCudaState rng_engine_inputs;
{
// See Note [Acquire lock when using random generators]
std::lock_guard<std::mutex> lock(gen->mutex_);
rng_engine_inputs = gen->philox_cuda_state(4);
}
if (dtype == at::kFloat) {
int64_t block = 128;
int64_t block_elems = block * 4;
int64_t grid = (size + block_elems - 1) / block_elems;
generate_random_numbers_kernel<<<
grid,
block,
0,
at::cuda::getCurrentCUDAStream()>>>(
result.data_ptr<float>(), size, rng_engine_inputs, rng_test);
} else {
TORCH_CHECK(dtype == at::kDouble);
int64_t block = 128;
int64_t block_elems = block * 2;
int64_t grid = (size + block_elems - 1) / block_elems;
generate_random_numbers_kernel<<<
grid,
block,
0,
at::cuda::getCurrentCUDAStream()>>>(
result.data_ptr<double>(), size, rng_engine_inputs, rng_test);
}
return result;
}
at::Tensor generate_uniform(int64_t size, at::ScalarType dtype) {
return generate_random_numbers(size, dtype, RNGTest_t::Uniform);
}
at::Tensor generate_normal(int64_t size, at::ScalarType dtype) {
return generate_random_numbers(size, dtype, RNGTest_t::Normal);
}
} // namespace
TEST_F(NVFuserTest, FusionRNGValidateWithCURand_CUDA) {
std::unique_ptr<Fusion> fusion_ptr = std::make_unique<Fusion>();
auto fusion = fusion_ptr.get();
FusionGuard fg(fusion);
Int* size_val = IrBuilder::create<Int>();
fusion->addInput(size_val);
TensorView* tv0 = rand({size_val}, DataType::Float);
TensorView* tv1 = rand({size_val}, DataType::Double);
fusion->addOutput(tv0);
fusion->addOutput(tv1);
FusionExecutorCache fec(std::move(fusion_ptr));
for (int64_t size : {16, 1024, 10001, 10002, 10003, 100000, 10000001}) {
at::manual_seed(0);
auto cg_outputs = fec.runFusionWithInputs({size});
at::manual_seed(0);
auto ref0 = generate_uniform(size, at::kFloat);
auto ref1 = generate_uniform(size, at::kDouble);
testValidate(
fec.fusion(), cg_outputs, {size}, {ref0, ref1}, __LINE__, __FILE__);
}
}
TEST_F(NVFuserTest, FusionRNGManualScheduleValidateWithCURand_CUDA) {
int64_t size = 128;
auto dtype = at::kFloat;
std::unique_ptr<Fusion> fusion_ptr = std::make_unique<Fusion>();
auto fusion = fusion_ptr.get();
FusionGuard fg(fusion);
TensorView* tv0 = makeSymbolicTensor(1, aten_to_data_type(dtype));
fusion->addInput(tv0);
auto tv1 = rand_like(tv0);
auto tv2 = set(tv1);
fusion->addOutput(tv2);
tv2->split(0, 8);
tv2->axis(0)->parallelize(ParallelType::TIDx);
tv1->computeAt(tv2, 1);
auto options = at::TensorOptions().dtype(dtype).device(at::kCUDA, 0);
at::Tensor t0 = at::zeros({size}, options);
FusionExecutor fe;
fe.compileFusion(fusion, {t0});
at::manual_seed(0);
auto cg_outputs = fe.runFusion({t0});
auto out = cg_outputs[0];
at::manual_seed(0);
auto ref = generate_uniform(size, dtype);
testValidate(fusion, {out}, {t0}, {ref}, __LINE__, __FILE__);
}
TEST_F(NVFuserTest, FusionRNGManualScheduleValidateWithCURand2_CUDA) {
#ifdef FBCODE_CAFFE2
GTEST_SKIP() << "Fails accuracy on V100 32gb";
#endif
auto dtype = at::kFloat;
std::unique_ptr<Fusion> fusion_ptr = std::make_unique<Fusion>();
auto fusion = fusion_ptr.get();
FusionGuard fg(fusion);
Int* size1 = IrBuilder::create<Int>();
Int* size2 = IrBuilder::create<Int>();
Int* size3 = IrBuilder::create<Int>();
Int* size4 = IrBuilder::create<Int>();
fusion->addInput(size1);
fusion->addInput(size2);
fusion->addInput(size3);
fusion->addInput(size4);
TensorView* tv0 = rand({size1, size2, size3, size4}, DataType::Float);
fusion->addOutput(tv0);
FusionExecutor fe;
fe.compileFusion(fusion, {10, 10, 10, 10});
at::manual_seed(0);
auto cg_outputs = fe.runFusion({10, 10, 10, 10});
auto out = cg_outputs[0];
at::manual_seed(0);
auto ref = generate_uniform(10000, dtype).view({10, 10, 10, 10});
testValidate(fusion, {out}, {10, 10, 10, 10}, {ref}, __LINE__, __FILE__);
}
TEST_F(NVFuserTest, FusionBroadcastingRNG_CUDA) {
for (auto dtype : {at::kFloat, at::kDouble}) {
std::unique_ptr<Fusion> fusion_ptr = std::make_unique<Fusion>();
auto fusion = fusion_ptr.get();
FusionGuard fg(fusion);
TensorView* tv0 = makeConcreteTensor({5, 1}, aten_to_data_type(dtype));
TensorView* tv1 = makeConcreteTensor({5, 5}, aten_to_data_type(dtype));
fusion->addInput(tv0);
fusion->addInput(tv1);
auto tv2 = rand_like(tv0);
auto tv3 = add(tv1, tv2);
auto tv4 = add(tv0, tv3);
fusion->addOutput(tv4);
FusionExecutorCache fec(std::move(fusion_ptr));
auto options = at::TensorOptions().dtype(dtype).device(at::kCUDA, 0);
at::Tensor t0 = at::zeros({5, 1}, options);
at::Tensor t1 = at::zeros({5, 5}, options);
auto cg_outputs = fec.runFusionWithInputs({t0, t1});
auto out = cg_outputs[0];
TORCH_CHECK((out.select(1, 0) == out.select(1, 1)).all().item<bool>())
TORCH_CHECK((out.select(1, 0) == out.select(1, 2)).all().item<bool>())
TORCH_CHECK((out.select(1, 0) == out.select(1, 3)).all().item<bool>())
TORCH_CHECK((out.select(1, 0) == out.select(1, 4)).all().item<bool>())
}
}
TEST_F(NVFuserTest, FusionBroadcastingRNG2_CUDA) {
for (int64_t size : {16, 1024, 10001, 10002, 10003, 100000, 10000001}) {
for (auto dtype : {at::kFloat, at::kDouble}) {
std::unique_ptr<Fusion> fusion_ptr = std::make_unique<Fusion>();
auto fusion = fusion_ptr.get();
FusionGuard fg(fusion);
TensorView* tv0 = makeConcreteTensor({1}, aten_to_data_type(dtype));
TensorView* tv1 = makeSymbolicTensor(1, aten_to_data_type(dtype));
fusion->addInput(tv0);
fusion->addInput(tv1);
auto tv2 = rand_like(tv0);
auto tv3 = add(tv1, tv2);
fusion->addOutput(tv3);
FusionExecutorCache fec(std::move(fusion_ptr));
auto options = at::TensorOptions().dtype(dtype).device(at::kCUDA, 0);
at::Tensor t0 = at::zeros({1}, options);
at::Tensor t1 = at::zeros({size}, options);
at::manual_seed(0);
auto cg_outputs = fec.runFusionWithInputs({t0, t1});
auto out = cg_outputs[0];
at::manual_seed(0);
auto ref = generate_uniform(1, dtype).expand_as(t1);
testValidate(fec.fusion(), {out}, {t0, t1}, {ref}, __LINE__, __FILE__);
}
}
}
TEST_F(NVFuserTest, FusionBroadcastingRNGSmem_CUDA) {
for (auto dtype : {at::kFloat, at::kDouble}) {
std::unique_ptr<Fusion> fusion_ptr = std::make_unique<Fusion>();
auto fusion = fusion_ptr.get();
FusionGuard fg(fusion);
TensorView* tv0 = makeConcreteTensor({5, 1}, aten_to_data_type(dtype));
TensorView* tv1 = makeConcreteTensor({5, 5}, aten_to_data_type(dtype));
fusion->addInput(tv0);
fusion->addInput(tv1);
auto tv2 = rand_like(tv0);
auto tv3 = add(tv1, tv2);
auto tv4 = add(tv0, tv3);
fusion->addOutput(tv4);
auto options = at::TensorOptions().dtype(dtype).device(at::kCUDA, 0);
at::Tensor t0 = at::zeros({5, 1}, options);
at::Tensor t1 = at::zeros({5, 5}, options);
auto lparams = scheduleTranspose(fusion, {t0, t1});
FusionExecutor fe;
fe.compileFusion(fusion, {t0, t1}, lparams);
auto cg_outputs = fe.runFusion({t0, t1}, lparams);
auto out = cg_outputs[0];
TORCH_CHECK((out.select(1, 0) == out.select(1, 1)).all().item<bool>())
TORCH_CHECK((out.select(1, 0) == out.select(1, 2)).all().item<bool>())
TORCH_CHECK((out.select(1, 0) == out.select(1, 3)).all().item<bool>())
TORCH_CHECK((out.select(1, 0) == out.select(1, 4)).all().item<bool>())
}
}
TEST_F(NVFuserTest, FusionBroadcastingRNGSmemNonSquareTile_CUDA) {
// https://github.com/csarofeen/pytorch/issues/1926
std::unique_ptr<Fusion> fusion_ptr = std::make_unique<Fusion>();
auto fusion = fusion_ptr.get();
FusionGuard fg(fusion);
TensorView* tv0 = makeConcreteTensor({5, 1});
TensorView* tv1 = makeConcreteTensor({5, 5});
fusion->addInput(tv0);
fusion->addInput(tv1);
auto tv2 = rand_like(tv0);
auto tv3 = add(tv1, tv2);
auto tv4 = add(tv0, tv3);
fusion->addOutput(tv4);
auto options = at::TensorOptions().dtype(at::kFloat).device(at::kCUDA, 0);
at::Tensor t0 = at::zeros({5, 1}, options);
at::Tensor t1 = at::zeros({5, 5}, options);
TransposeParams heuristics;
heuristics.tile_size1 = 8;
heuristics.tile_size2 = 4;
scheduleTranspose(fusion, heuristics);
FusionExecutor fe;
fe.compileFusion(fusion, {t0, t1});
auto cg_outputs = fe.runFusion({t0, t1});
auto out = cg_outputs[0];
TORCH_CHECK((out.select(1, 0) == out.select(1, 1)).all().item<bool>());
TORCH_CHECK((out.select(1, 0) == out.select(1, 2)).all().item<bool>());
TORCH_CHECK((out.select(1, 0) == out.select(1, 3)).all().item<bool>());
TORCH_CHECK((out.select(1, 0) == out.select(1, 4)).all().item<bool>());
}
TEST_F(NVFuserTest, FusionUniform_CUDA) {
std::unique_ptr<Fusion> fusion_ptr = std::make_unique<Fusion>();
auto fusion = fusion_ptr.get();
FusionGuard fg(fusion);
Int* size_val = IrBuilder::create<Int>();
Double* low = IrBuilder::create<Double>();
Double* high = IrBuilder::create<Double>();
fusion->addInput(size_val);
fusion->addInput(low);
fusion->addInput(high);
TensorView* tv0 = uniform({size_val}, low, high, DataType::Float);
TensorView* tv1 = uniform({size_val}, low, high, DataType::Double);
fusion->addOutput(tv0);
fusion->addOutput(tv1);
FusionExecutorCache fec(std::move(fusion_ptr));
for (int64_t size : {16, 1024, 10001, 10002, 10003, 100000, 10000001}) {
at::manual_seed(0);
auto cg_outputs = fec.runFusionWithInputs({size, -1.0, 1.0});
at::manual_seed(0);
auto ref0 = generate_uniform(size, at::kFloat) * 2 - 1;
auto ref1 = generate_uniform(size, at::kDouble) * 2 - 1;
testValidate(
fec.fusion(),
cg_outputs,
{size, -1.0, 1.0},
{ref0, ref1},
__LINE__,
__FILE__);
}
}
TEST_F(NVFuserTest, FusionNormal_CUDA) {
std::unique_ptr<Fusion> fusion_ptr = std::make_unique<Fusion>();
auto fusion = fusion_ptr.get();
FusionGuard fg(fusion);
Int* size_val = IrBuilder::create<Int>();
Double* mean = IrBuilder::create<Double>();
Double* std = IrBuilder::create<Double>();
fusion->addInput(size_val);
fusion->addInput(mean);
fusion->addInput(std);
TensorView* tv0 = normal({size_val}, mean, std, DataType::Float);
TensorView* tv1 = normal({size_val}, mean, std, DataType::Double);
TensorView* tv2 = randn({size_val}, DataType::Double);
TensorView* tv3 = randn_like(tv2);
fusion->addOutput(tv0);
fusion->addOutput(tv1);
fusion->addOutput(tv2);
fusion->addOutput(tv3);
FusionExecutorCache fec(std::move(fusion_ptr));
for (int64_t size : {16, 1024, 10001, 10002, 10003, 100000, 10000001}) {
at::manual_seed(0);
auto cg_outputs = fec.runFusionWithInputs({size, 1.0, 0.5});
at::manual_seed(0);
auto ref0 = generate_normal(size, at::kFloat) * 0.5f + 1.0f;
auto ref1 = generate_normal(size, at::kDouble) * 0.5 + 1.0;
auto ref2 = generate_normal(size, at::kDouble);
auto ref3 = generate_normal(size, at::kDouble);
testValidate(
fec.fusion(),
cg_outputs,
{size, 1.0, 0.5},
{ref0, ref1, ref2, ref3},
__LINE__,
__FILE__);
}
}
TEST_F(NVFuserTest, FusionRandLikeReduction_CUDA) {
auto dtype = at::kFloat;
std::unique_ptr<Fusion> fusion_ptr = std::make_unique<Fusion>();
auto fusion = fusion_ptr.get();
FusionGuard fg(fusion);
TensorView* tv0 = makeSymbolicTensor(2, aten_to_data_type(dtype));
fusion->addInput(tv0);
auto tv1 = sum(tv0, {0});
auto tv2 = rand_like(tv1);
auto tv3 = add(tv1, tv2);
fusion->addOutput(tv3);
FusionExecutorCache fec(std::move(fusion_ptr));
auto options = at::TensorOptions().dtype(dtype).device(at::kCUDA, 0);
at::Tensor t0 = at::zeros({2, 3}, options);
at::manual_seed(0);
auto cg_outputs = fec.runFusionWithInputs({t0});
auto out = cg_outputs[0];
at::manual_seed(0);
auto t1 = t0.sum(0);
auto t2 = generate_uniform(3, dtype).expand_as(t1);
auto t3 = t1.add(t2);
testValidate(fec.fusion(), {out}, {t0}, {t3}, __LINE__, __FILE__);
}
} // namespace nvfuser
|
3abf93fe10c083fa1860447378c385c036083931.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// This program completes parallel multiplication of Matricies
// Included C libraries
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#include <float.h>
#include <time.h>
// Included CUDA libraries
#include <cutil.h>
#include <rocblas.h>
// Included files
#include "mm_util.cu"
#include "mm_kernel.cu"
#include "mm_gold.cpp"
#define ITERATIONS 50
int main( int argc, char* argv[])
{
int mm1_BLOCKS = 16;
// Screen output
printf("MM %i\n", mm1_BLOCKS);
printf("Parallel Matrix multiplication.\n");
// Check limitation of available device
int dev = 0; // assumed only one device
hipDeviceProp_t deviceProp;
hipGetDeviceProperties(&deviceProp, dev);
printf("Device 0: Maximum number of threads per block is %d.\n", deviceProp.maxThreadsPerBlock);
/**************************************************
* Create timers *
**************************************************/
unsigned timer_cpu;
unsigned timer_gpu1;
unsigned timer_gpu2;
unsigned timer_gpu3;
unsigned timer_gpu4;
unsigned timer_gpu5;
unsigned timer_gpu6;
CUT_SAFE_CALL(cutCreateTimer(&timer_cpu));
CUT_SAFE_CALL(cutCreateTimer(&timer_gpu1));
CUT_SAFE_CALL(cutCreateTimer(&timer_gpu2));
CUT_SAFE_CALL(cutCreateTimer(&timer_gpu3));
CUT_SAFE_CALL(cutCreateTimer(&timer_gpu4));
CUT_SAFE_CALL(cutCreateTimer(&timer_gpu5));
CUT_SAFE_CALL(cutCreateTimer(&timer_gpu6));
/****************************
* Initialization of memory *
****************************/
int matrix_blocks = 16;
int m = matrix_blocks*20;
int k = matrix_blocks*40;
int n = matrix_blocks*60;
//int m, k, n;
//n = m = k = 1280;
// Pointers to CPU (host) data
Matrix A_h;
Matrix B_h;
Matrix C_h;
create_matrix(&A_h, m, k);
create_matrix(&B_h, k, n);
create_matrix(&C_h, m, n);
srand((unsigned int)time(NULL));
int A_size = A_h.width * A_h.height;
for(int i = 0; i < A_size; i++)
A_h.elements[i] = (float) rand() / RAND_MAX;
int B_size = B_h.width * B_h.height;
for(int i = 0; i < B_size; i++)
B_h.elements[i] = (float) rand() / RAND_MAX;
int C_size = C_h.width * C_h.height;
for(int i = 0; i < C_size; i++)
C_h.elements[i] = 0.0f;
/***************************
* CPU execution *
***************************/
Matrix mmGold_h = clone_matrix(&C_h);
for (int i = 0; i < C_size; ++i) {
mmGold_h.elements[i] = 0.0f;
}
CUT_SAFE_CALL(cutStartTimer(timer_cpu));
for (int iter = 0; iter < ITERATIONS; ++iter)
{
mm_gold(m,n,k,A_h.elements,B_h.elements,mmGold_h.elements);
}
CUT_SAFE_CALL(cutStopTimer(timer_cpu));
/***************************
* GPU execution (naive) *
***************************/
// Split problem into threads
dim3 mm1_threadBlock( mm1_BLOCKS, mm1_BLOCKS );
unsigned int blocky = (unsigned int) ceil(((float)m)/mm1_BLOCKS);
unsigned int blockx = (unsigned int)ceil(((float)n)/mm1_BLOCKS);
dim3 mm1_blockGrid(blockx, blocky);
printf("Allocated grid (%u,%u)\n", blockx, blocky);
Matrix mm1_h = clone_matrix(&C_h);
CUT_SAFE_CALL(cutStartTimer(timer_gpu1));
Matrix A_d = alloc_matrix_on_device( &A_h);
Matrix B_d = alloc_matrix_on_device( &B_h);
Matrix C_d = alloc_matrix_on_device(&C_h);
for (int iter = 0; iter < ITERATIONS; ++iter)
{
copy_matrix_to_device( &A_h, &A_d);
copy_matrix_to_device( &B_h, &B_d);
// Kernel invocation
hipLaunchKernelGGL(( mm_kernel1), dim3(mm1_blockGrid), dim3(mm1_threadBlock), 0, 0, C_d, A_d, B_d);
// Error check
CUT_CHECK_ERROR("parallel reduction kernel execution failed\n");
CUDA_SAFE_CALL( hipDeviceSynchronize() );
copy_matrix_from_device(&mm1_h, &C_d);
CUDA_SAFE_CALL( hipDeviceSynchronize() );
}
CUDA_SAFE_CALL( hipFree(A_d.elements));
CUDA_SAFE_CALL( hipFree(B_d.elements));
CUDA_SAFE_CALL( hipFree(C_d.elements));
CUT_SAFE_CALL(cutStopTimer(timer_gpu1));
/***************************
* GPU execution (v2) *
***************************/
Matrix mm2_h = clone_matrix(&C_h);
CUT_SAFE_CALL(cutStartTimer(timer_gpu2));
A_d = alloc_matrix_on_device( &A_h);
B_d = alloc_matrix_on_device( &B_h);
C_d = alloc_matrix_on_device(&C_h);
for (int iter = 0; iter < ITERATIONS; ++iter)
{
copy_matrix_to_device( &A_h, &A_d);
copy_matrix_to_device( &B_h, &B_d);
// Kernel invocation
hipLaunchKernelGGL(( mm_kernel2), dim3(mm1_blockGrid), dim3(mm1_threadBlock), 0, 0, C_d, A_d, B_d);
// Error check
CUT_CHECK_ERROR("parallel reduction kernel execution failed\n");
CUDA_SAFE_CALL( hipDeviceSynchronize() );
copy_matrix_from_device(&mm2_h, &C_d);
CUDA_SAFE_CALL( hipDeviceSynchronize() );
}
CUDA_SAFE_CALL( hipFree(A_d.elements));
CUDA_SAFE_CALL( hipFree(B_d.elements));
CUDA_SAFE_CALL( hipFree(C_d.elements));
CUT_SAFE_CALL(cutStopTimer(timer_gpu2));
/***************************
* GPU execution (v3) *
***************************/
dim3 mm3_threadBlock( 16 , 1 );
Matrix mm3_h = clone_matrix(&C_h);
CUT_SAFE_CALL(cutStartTimer(timer_gpu3));
A_d = alloc_matrix_on_device( &A_h);
B_d = alloc_matrix_on_device( &B_h);
C_d = alloc_matrix_on_device(&C_h);
for (int iter = 0; iter < ITERATIONS; ++iter)
{
copy_matrix_to_device( &A_h, &A_d);
copy_matrix_to_device( &B_h, &B_d);
// Kernel invocation
hipLaunchKernelGGL(( mm_kernel3), dim3(mm1_blockGrid), dim3(mm3_threadBlock), 0, 0, C_d, A_d, B_d);
// Error check
CUT_CHECK_ERROR("parallel reduction kernel execution failed\n");
CUDA_SAFE_CALL( hipDeviceSynchronize() );
copy_matrix_from_device(&mm3_h, &C_d);
CUDA_SAFE_CALL( hipDeviceSynchronize() );
}
CUDA_SAFE_CALL( hipFree(A_d.elements));
CUDA_SAFE_CALL( hipFree(B_d.elements));
CUDA_SAFE_CALL( hipFree(C_d.elements));
CUT_SAFE_CALL(cutStopTimer(timer_gpu3));
/***************************
* GPU execution (v4) *
***************************/
dim3 mm4_threadBlock( 64, 1 );
unsigned int blocky4 = (unsigned int) ceil(((float)m)/16);
unsigned int blockx4 = (unsigned int)ceil(((float)n)/64);
dim3 mm4_blockGrid(blockx4, blocky4);
Matrix mm4_h = clone_matrix(&C_h);
CUT_SAFE_CALL(cutStartTimer(timer_gpu4));
A_d = alloc_matrix_on_device( &A_h);
B_d = alloc_matrix_on_device( &B_h);
C_d = alloc_matrix_on_device(&C_h);
for (int iter = 0; iter < ITERATIONS; ++iter)
{
copy_matrix_to_device( &A_h, &A_d);
copy_matrix_to_device( &B_h, &B_d);
// Kernel invocation
hipLaunchKernelGGL(( mm_kernel4), dim3(mm4_blockGrid), dim3(mm4_threadBlock), 0, 0, C_d, A_d, B_d);
// Error check
CUT_CHECK_ERROR("parallel reduction kernel execution failed\n");
CUDA_SAFE_CALL( hipDeviceSynchronize() );
copy_matrix_from_device(&mm4_h, &C_d);
CUDA_SAFE_CALL( hipDeviceSynchronize() );
}
CUDA_SAFE_CALL( hipFree(A_d.elements));
CUDA_SAFE_CALL( hipFree(B_d.elements));
CUDA_SAFE_CALL( hipFree(C_d.elements));
CUT_SAFE_CALL(cutStopTimer(timer_gpu4));
/***************************
* GPU execution (v5) *
***************************/
dim3 mm5_threadBlock( 64, 1 );
unsigned int blocky5 = (unsigned int) ceil(((float)m)/16);
unsigned int blockx5 = (unsigned int)ceil(((float)n)/64);
dim3 mm5_blockGrid(blockx5, blocky5);
Matrix mm5_h = clone_matrix(&C_h);
CUT_SAFE_CALL(cutStartTimer(timer_gpu5));
A_d = alloc_matrix_on_device( &A_h);
B_d = alloc_matrix_on_device( &B_h);
C_d = alloc_matrix_on_device(&C_h);
for (int iter = 0; iter < ITERATIONS; ++iter)
{
copy_matrix_to_device( &A_h, &A_d);
copy_matrix_to_device( &B_h, &B_d);
// Kernel invocation
hipLaunchKernelGGL(( mm_kernel5), dim3(mm5_blockGrid), dim3(mm5_threadBlock), 0, 0, C_d, A_d, B_d);
// Error check
CUT_CHECK_ERROR("parallel reduction kernel execution failed\n");
CUDA_SAFE_CALL( hipDeviceSynchronize() );
copy_matrix_from_device(&mm5_h, &C_d);
CUDA_SAFE_CALL( hipDeviceSynchronize() );
}
CUDA_SAFE_CALL( hipFree(A_d.elements));
CUDA_SAFE_CALL( hipFree(B_d.elements));
CUDA_SAFE_CALL( hipFree(C_d.elements));
CUT_SAFE_CALL(cutStopTimer(timer_gpu5));
/***************************
* GPU execution (cublas) *
***************************/
Matrix mmcu_h = clone_matrix(&C_h);
CUT_SAFE_CALL(cutStartTimer(timer_gpu6));
A_d = alloc_matrix_on_device( &A_h);
B_d = alloc_matrix_on_device( &B_h);
C_d = alloc_matrix_on_device(&C_h);
for (int iter = 0; iter < ITERATIONS; ++iter)
{
copy_matrix_to_device( &A_h, &A_d);
copy_matrix_to_device( &B_h, &B_d);
float alpha = 1.0f;
float beta = 0.0f;
int lda = k;
int ldb = n;
int ldc = n;
// Invocation of the
hipblasSgemm('N', 'N',
n, m, k,
alpha,
B_d.elements, ldb,
A_d.elements, lda,
beta,
C_d.elements, ldc);
// Error check
CUT_CHECK_ERROR("parallel reduction kernel execution failed\n");
CUDA_SAFE_CALL( hipDeviceSynchronize() );
copy_matrix_from_device(&mmcu_h, &C_d);
CUDA_SAFE_CALL( hipDeviceSynchronize() );
}
CUDA_SAFE_CALL( hipFree(A_d.elements));
CUDA_SAFE_CALL( hipFree(B_d.elements));
CUDA_SAFE_CALL( hipFree(C_d.elements));
CUT_SAFE_CALL(cutStopTimer(timer_gpu6));
/*********************************
* Output timings & verification *
*********************************/
printf(" CPU time : %.4f (ms)\n\n",cutGetTimerValue(timer_cpu));
print_matrix_result(&mm1_h, "Naive ", timer_gpu1, timer_cpu, &mmGold_h);
print_matrix_result(&mm2_h, "Shared ", timer_gpu2, timer_cpu, &mmGold_h);
print_matrix_result(&mm3_h, "4a ", timer_gpu3, timer_cpu, &mmGold_h);
print_matrix_result(&mm4_h, "4b ", timer_gpu4, timer_cpu, &mmGold_h);
print_matrix_result(&mm5_h, "Pimped ", timer_gpu5, timer_cpu, &mmGold_h);
print_matrix_result(&mmcu_h, "hipblasSgemm ", timer_gpu6, timer_cpu, &mmGold_h);
/***************************
* Cleaning memory *
***************************/
free(C_h.elements);
free(A_h.elements);
free(B_h.elements);
free(mm1_h.elements);
free(mm2_h.elements);
free(mm3_h.elements);
free(mm4_h.elements);
free(mm5_h.elements);
free(mmcu_h.elements);
}
| 3abf93fe10c083fa1860447378c385c036083931.cu | // This program completes parallel multiplication of Matricies
// Included C libraries
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#include <float.h>
#include <time.h>
// Included CUDA libraries
#include <cutil.h>
#include <cublas.h>
// Included files
#include "mm_util.cu"
#include "mm_kernel.cu"
#include "mm_gold.cpp"
#define ITERATIONS 50
int main( int argc, char* argv[])
{
int mm1_BLOCKS = 16;
// Screen output
printf("MM %i\n", mm1_BLOCKS);
printf("Parallel Matrix multiplication.\n");
// Check limitation of available device
int dev = 0; // assumed only one device
cudaDeviceProp deviceProp;
cudaGetDeviceProperties(&deviceProp, dev);
printf("Device 0: Maximum number of threads per block is %d.\n", deviceProp.maxThreadsPerBlock);
/**************************************************
* Create timers *
**************************************************/
unsigned timer_cpu;
unsigned timer_gpu1;
unsigned timer_gpu2;
unsigned timer_gpu3;
unsigned timer_gpu4;
unsigned timer_gpu5;
unsigned timer_gpu6;
CUT_SAFE_CALL(cutCreateTimer(&timer_cpu));
CUT_SAFE_CALL(cutCreateTimer(&timer_gpu1));
CUT_SAFE_CALL(cutCreateTimer(&timer_gpu2));
CUT_SAFE_CALL(cutCreateTimer(&timer_gpu3));
CUT_SAFE_CALL(cutCreateTimer(&timer_gpu4));
CUT_SAFE_CALL(cutCreateTimer(&timer_gpu5));
CUT_SAFE_CALL(cutCreateTimer(&timer_gpu6));
/****************************
* Initialization of memory *
****************************/
int matrix_blocks = 16;
int m = matrix_blocks*20;
int k = matrix_blocks*40;
int n = matrix_blocks*60;
//int m, k, n;
//n = m = k = 1280;
// Pointers to CPU (host) data
Matrix A_h;
Matrix B_h;
Matrix C_h;
create_matrix(&A_h, m, k);
create_matrix(&B_h, k, n);
create_matrix(&C_h, m, n);
srand((unsigned int)time(NULL));
int A_size = A_h.width * A_h.height;
for(int i = 0; i < A_size; i++)
A_h.elements[i] = (float) rand() / RAND_MAX;
int B_size = B_h.width * B_h.height;
for(int i = 0; i < B_size; i++)
B_h.elements[i] = (float) rand() / RAND_MAX;
int C_size = C_h.width * C_h.height;
for(int i = 0; i < C_size; i++)
C_h.elements[i] = 0.0f;
/***************************
* CPU execution *
***************************/
Matrix mmGold_h = clone_matrix(&C_h);
for (int i = 0; i < C_size; ++i) {
mmGold_h.elements[i] = 0.0f;
}
CUT_SAFE_CALL(cutStartTimer(timer_cpu));
for (int iter = 0; iter < ITERATIONS; ++iter)
{
mm_gold(m,n,k,A_h.elements,B_h.elements,mmGold_h.elements);
}
CUT_SAFE_CALL(cutStopTimer(timer_cpu));
/***************************
* GPU execution (naive) *
***************************/
// Split problem into threads
dim3 mm1_threadBlock( mm1_BLOCKS, mm1_BLOCKS );
unsigned int blocky = (unsigned int) ceil(((float)m)/mm1_BLOCKS);
unsigned int blockx = (unsigned int)ceil(((float)n)/mm1_BLOCKS);
dim3 mm1_blockGrid(blockx, blocky);
printf("Allocated grid (%u,%u)\n", blockx, blocky);
Matrix mm1_h = clone_matrix(&C_h);
CUT_SAFE_CALL(cutStartTimer(timer_gpu1));
Matrix A_d = alloc_matrix_on_device( &A_h);
Matrix B_d = alloc_matrix_on_device( &B_h);
Matrix C_d = alloc_matrix_on_device(&C_h);
for (int iter = 0; iter < ITERATIONS; ++iter)
{
copy_matrix_to_device( &A_h, &A_d);
copy_matrix_to_device( &B_h, &B_d);
// Kernel invocation
mm_kernel1<<< mm1_blockGrid, mm1_threadBlock>>>(C_d, A_d, B_d);
// Error check
CUT_CHECK_ERROR("parallel reduction kernel execution failed\n");
CUDA_SAFE_CALL( cudaThreadSynchronize() );
copy_matrix_from_device(&mm1_h, &C_d);
CUDA_SAFE_CALL( cudaThreadSynchronize() );
}
CUDA_SAFE_CALL( cudaFree(A_d.elements));
CUDA_SAFE_CALL( cudaFree(B_d.elements));
CUDA_SAFE_CALL( cudaFree(C_d.elements));
CUT_SAFE_CALL(cutStopTimer(timer_gpu1));
/***************************
* GPU execution (v2) *
***************************/
Matrix mm2_h = clone_matrix(&C_h);
CUT_SAFE_CALL(cutStartTimer(timer_gpu2));
A_d = alloc_matrix_on_device( &A_h);
B_d = alloc_matrix_on_device( &B_h);
C_d = alloc_matrix_on_device(&C_h);
for (int iter = 0; iter < ITERATIONS; ++iter)
{
copy_matrix_to_device( &A_h, &A_d);
copy_matrix_to_device( &B_h, &B_d);
// Kernel invocation
mm_kernel2<<< mm1_blockGrid, mm1_threadBlock>>>(C_d, A_d, B_d);
// Error check
CUT_CHECK_ERROR("parallel reduction kernel execution failed\n");
CUDA_SAFE_CALL( cudaThreadSynchronize() );
copy_matrix_from_device(&mm2_h, &C_d);
CUDA_SAFE_CALL( cudaThreadSynchronize() );
}
CUDA_SAFE_CALL( cudaFree(A_d.elements));
CUDA_SAFE_CALL( cudaFree(B_d.elements));
CUDA_SAFE_CALL( cudaFree(C_d.elements));
CUT_SAFE_CALL(cutStopTimer(timer_gpu2));
/***************************
* GPU execution (v3) *
***************************/
dim3 mm3_threadBlock( 16 , 1 );
Matrix mm3_h = clone_matrix(&C_h);
CUT_SAFE_CALL(cutStartTimer(timer_gpu3));
A_d = alloc_matrix_on_device( &A_h);
B_d = alloc_matrix_on_device( &B_h);
C_d = alloc_matrix_on_device(&C_h);
for (int iter = 0; iter < ITERATIONS; ++iter)
{
copy_matrix_to_device( &A_h, &A_d);
copy_matrix_to_device( &B_h, &B_d);
// Kernel invocation
mm_kernel3<<< mm1_blockGrid, mm3_threadBlock>>>(C_d, A_d, B_d);
// Error check
CUT_CHECK_ERROR("parallel reduction kernel execution failed\n");
CUDA_SAFE_CALL( cudaThreadSynchronize() );
copy_matrix_from_device(&mm3_h, &C_d);
CUDA_SAFE_CALL( cudaThreadSynchronize() );
}
CUDA_SAFE_CALL( cudaFree(A_d.elements));
CUDA_SAFE_CALL( cudaFree(B_d.elements));
CUDA_SAFE_CALL( cudaFree(C_d.elements));
CUT_SAFE_CALL(cutStopTimer(timer_gpu3));
/***************************
* GPU execution (v4) *
***************************/
dim3 mm4_threadBlock( 64, 1 );
unsigned int blocky4 = (unsigned int) ceil(((float)m)/16);
unsigned int blockx4 = (unsigned int)ceil(((float)n)/64);
dim3 mm4_blockGrid(blockx4, blocky4);
Matrix mm4_h = clone_matrix(&C_h);
CUT_SAFE_CALL(cutStartTimer(timer_gpu4));
A_d = alloc_matrix_on_device( &A_h);
B_d = alloc_matrix_on_device( &B_h);
C_d = alloc_matrix_on_device(&C_h);
for (int iter = 0; iter < ITERATIONS; ++iter)
{
copy_matrix_to_device( &A_h, &A_d);
copy_matrix_to_device( &B_h, &B_d);
// Kernel invocation
mm_kernel4<<< mm4_blockGrid, mm4_threadBlock>>>(C_d, A_d, B_d);
// Error check
CUT_CHECK_ERROR("parallel reduction kernel execution failed\n");
CUDA_SAFE_CALL( cudaThreadSynchronize() );
copy_matrix_from_device(&mm4_h, &C_d);
CUDA_SAFE_CALL( cudaThreadSynchronize() );
}
CUDA_SAFE_CALL( cudaFree(A_d.elements));
CUDA_SAFE_CALL( cudaFree(B_d.elements));
CUDA_SAFE_CALL( cudaFree(C_d.elements));
CUT_SAFE_CALL(cutStopTimer(timer_gpu4));
/***************************
* GPU execution (v5) *
***************************/
dim3 mm5_threadBlock( 64, 1 );
unsigned int blocky5 = (unsigned int) ceil(((float)m)/16);
unsigned int blockx5 = (unsigned int)ceil(((float)n)/64);
dim3 mm5_blockGrid(blockx5, blocky5);
Matrix mm5_h = clone_matrix(&C_h);
CUT_SAFE_CALL(cutStartTimer(timer_gpu5));
A_d = alloc_matrix_on_device( &A_h);
B_d = alloc_matrix_on_device( &B_h);
C_d = alloc_matrix_on_device(&C_h);
for (int iter = 0; iter < ITERATIONS; ++iter)
{
copy_matrix_to_device( &A_h, &A_d);
copy_matrix_to_device( &B_h, &B_d);
// Kernel invocation
mm_kernel5<<< mm5_blockGrid, mm5_threadBlock>>>(C_d, A_d, B_d);
// Error check
CUT_CHECK_ERROR("parallel reduction kernel execution failed\n");
CUDA_SAFE_CALL( cudaThreadSynchronize() );
copy_matrix_from_device(&mm5_h, &C_d);
CUDA_SAFE_CALL( cudaThreadSynchronize() );
}
CUDA_SAFE_CALL( cudaFree(A_d.elements));
CUDA_SAFE_CALL( cudaFree(B_d.elements));
CUDA_SAFE_CALL( cudaFree(C_d.elements));
CUT_SAFE_CALL(cutStopTimer(timer_gpu5));
/***************************
* GPU execution (cublas) *
***************************/
Matrix mmcu_h = clone_matrix(&C_h);
CUT_SAFE_CALL(cutStartTimer(timer_gpu6));
A_d = alloc_matrix_on_device( &A_h);
B_d = alloc_matrix_on_device( &B_h);
C_d = alloc_matrix_on_device(&C_h);
for (int iter = 0; iter < ITERATIONS; ++iter)
{
copy_matrix_to_device( &A_h, &A_d);
copy_matrix_to_device( &B_h, &B_d);
float alpha = 1.0f;
float beta = 0.0f;
int lda = k;
int ldb = n;
int ldc = n;
// Invocation of the
cublasSgemm('N', 'N',
n, m, k,
alpha,
B_d.elements, ldb,
A_d.elements, lda,
beta,
C_d.elements, ldc);
// Error check
CUT_CHECK_ERROR("parallel reduction kernel execution failed\n");
CUDA_SAFE_CALL( cudaThreadSynchronize() );
copy_matrix_from_device(&mmcu_h, &C_d);
CUDA_SAFE_CALL( cudaThreadSynchronize() );
}
CUDA_SAFE_CALL( cudaFree(A_d.elements));
CUDA_SAFE_CALL( cudaFree(B_d.elements));
CUDA_SAFE_CALL( cudaFree(C_d.elements));
CUT_SAFE_CALL(cutStopTimer(timer_gpu6));
/*********************************
* Output timings & verification *
*********************************/
printf(" CPU time : %.4f (ms)\n\n",cutGetTimerValue(timer_cpu));
print_matrix_result(&mm1_h, "Naive ", timer_gpu1, timer_cpu, &mmGold_h);
print_matrix_result(&mm2_h, "Shared ", timer_gpu2, timer_cpu, &mmGold_h);
print_matrix_result(&mm3_h, "4a ", timer_gpu3, timer_cpu, &mmGold_h);
print_matrix_result(&mm4_h, "4b ", timer_gpu4, timer_cpu, &mmGold_h);
print_matrix_result(&mm5_h, "Pimped ", timer_gpu5, timer_cpu, &mmGold_h);
print_matrix_result(&mmcu_h, "cublasSgemm ", timer_gpu6, timer_cpu, &mmGold_h);
/***************************
* Cleaning memory *
***************************/
free(C_h.elements);
free(A_h.elements);
free(B_h.elements);
free(mm1_h.elements);
free(mm2_h.elements);
free(mm3_h.elements);
free(mm4_h.elements);
free(mm5_h.elements);
free(mmcu_h.elements);
}
|
bdba26345b329ee5dab0c6dbb30cac23d0e210d8.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void TgvThresholdingL1Kernel(float2* Tp, float* u_, float* Iu, float* Iz, float lambda, float tau, float* eta_u, float* u, float* us, int width, int height, int stride)
{
int iy = blockIdx.y * blockDim.y + threadIdx.y; // current row
int ix = blockIdx.x * blockDim.x + threadIdx.x; // current column
float desiredRadius = (float)width / 2.20f;
float halfWidth = (float)width / 2.0f;
float halfHeight = (float)height / 2.0f;
float radius = sqrtf((iy - halfHeight) * (iy - halfHeight) + (ix - halfWidth) * (ix - halfWidth));
if ((iy < height) && (ix < width))
{
int pos = ix + iy * stride;
if (radius >= desiredRadius)
{
us[pos] = 0.0f;// u[pos];
}
else {
//int right = (ix + 1) + iy * stride;
//int down = ix + (iy + 1) * stride;
int left = (ix - 1) + iy * stride;
int up = ix + (iy - 1) * stride;
//div_p = dxm(Tp(:, : , 1)) + dym(Tp(:, : , 2));
float div_p;
float dxmTp, dymTp;
if ((ix - 1) >= 0)
dxmTp = Tp[pos].x - Tp[left].x;
else if (ix == width - 1)
dxmTp = -Tp[left].x;
else
dxmTp = Tp[pos].x;
if ((iy - 1) >= 0)
dymTp = Tp[pos].y - Tp[up].y;
else if (iy == height - 1)
dymTp = -Tp[up].y;
else
dymTp = Tp[pos].y;
div_p = dxmTp + dymTp;
//tau_eta_u = tau. / eta_u;
float tau_eta_u;
if (eta_u[pos] == 0) {
tau_eta_u = tau;
}
else {
tau_eta_u = tau / eta_u[pos];
}
// Thresholding
float uhat = u_[pos] + tau_eta_u * div_p;
float dun = (uhat - u[pos]);
float Ius = Iu[pos];
float rho = Ius * dun + Iz[pos];
float upper = lambda * tau_eta_u*(Ius*Ius);
float lower = -lambda * tau_eta_u*(Ius*Ius);
float du;
if ((rho <= upper) && (rho >= lower)) {
if (Ius == 0) {
du = dun;
}
else {
du = dun - rho / Ius;
}
}
else if (rho < lower) {
du = dun + lambda * tau_eta_u*Ius;
}
else if (rho > upper) {
du = dun - lambda * tau_eta_u*Ius;
}
us[pos] = u[pos] + du;
}
}
} | bdba26345b329ee5dab0c6dbb30cac23d0e210d8.cu | #include "includes.h"
__global__ void TgvThresholdingL1Kernel(float2* Tp, float* u_, float* Iu, float* Iz, float lambda, float tau, float* eta_u, float* u, float* us, int width, int height, int stride)
{
int iy = blockIdx.y * blockDim.y + threadIdx.y; // current row
int ix = blockIdx.x * blockDim.x + threadIdx.x; // current column
float desiredRadius = (float)width / 2.20f;
float halfWidth = (float)width / 2.0f;
float halfHeight = (float)height / 2.0f;
float radius = sqrtf((iy - halfHeight) * (iy - halfHeight) + (ix - halfWidth) * (ix - halfWidth));
if ((iy < height) && (ix < width))
{
int pos = ix + iy * stride;
if (radius >= desiredRadius)
{
us[pos] = 0.0f;// u[pos];
}
else {
//int right = (ix + 1) + iy * stride;
//int down = ix + (iy + 1) * stride;
int left = (ix - 1) + iy * stride;
int up = ix + (iy - 1) * stride;
//div_p = dxm(Tp(:, : , 1)) + dym(Tp(:, : , 2));
float div_p;
float dxmTp, dymTp;
if ((ix - 1) >= 0)
dxmTp = Tp[pos].x - Tp[left].x;
else if (ix == width - 1)
dxmTp = -Tp[left].x;
else
dxmTp = Tp[pos].x;
if ((iy - 1) >= 0)
dymTp = Tp[pos].y - Tp[up].y;
else if (iy == height - 1)
dymTp = -Tp[up].y;
else
dymTp = Tp[pos].y;
div_p = dxmTp + dymTp;
//tau_eta_u = tau. / eta_u;
float tau_eta_u;
if (eta_u[pos] == 0) {
tau_eta_u = tau;
}
else {
tau_eta_u = tau / eta_u[pos];
}
// Thresholding
float uhat = u_[pos] + tau_eta_u * div_p;
float dun = (uhat - u[pos]);
float Ius = Iu[pos];
float rho = Ius * dun + Iz[pos];
float upper = lambda * tau_eta_u*(Ius*Ius);
float lower = -lambda * tau_eta_u*(Ius*Ius);
float du;
if ((rho <= upper) && (rho >= lower)) {
if (Ius == 0) {
du = dun;
}
else {
du = dun - rho / Ius;
}
}
else if (rho < lower) {
du = dun + lambda * tau_eta_u*Ius;
}
else if (rho > upper) {
du = dun - lambda * tau_eta_u*Ius;
}
us[pos] = u[pos] + du;
}
}
} |
864efb0763f330c5d2f79a014f0f1021e6cf2546.hip | // !!! This is a file automatically generated by hipify!!!
#include "THHUNN.h"
#include "common.h"
#include "im2col.h"
#include "THHHalf.h"
#include "THHHalfAutoNumerics.cuh"
#include "generic/SpatialConvolutionLocalBatch.cu"
#include "THHGenerateFloatTypes.h"
| 864efb0763f330c5d2f79a014f0f1021e6cf2546.cu | #include "THCUNN.h"
#include "common.h"
#include "im2col.h"
#include "THCHalf.h"
#include "THCHalfAutoNumerics.cuh"
#include "generic/SpatialConvolutionLocalBatch.cu"
#include "THCGenerateFloatTypes.h"
|
3c0ef6dbe2082b54812b1e31d4fc29baef3b4cd7.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
__global__ void add(int *a, int *b, int *c)
{
/* insert code to calculate the index properly using blockIdx.x, blockDim.x, threadIdx.x */
int index = blockIdx.x * blockDim.x + threadIdx.x;
c[index] = a[index] + b[index];
}
/* experiment with N */
/* how large can it be? */ // Assumes that the dimension fits inside RAM and each dimension is divisible by 32
#define N (2048*2048)
#define THREADS_PER_BLOCK 512
int main()
{
int *a, *b, *c;
int *d_a, *d_b, *d_c;
int size = N * sizeof( int );
/* allocate space for device copies of a, b, c */
hipMalloc( (void **) &d_a, size );
hipMalloc( (void **) &d_b, size );
hipMalloc( (void **) &d_c, size );
/* allocate space for host copies of a, b, c and setup input values */
a = (int *)malloc( size );
b = (int *)malloc( size );
c = (int *)malloc( size );
for( int i = 0; i < N; i++ )
{
a[i] = b[i] = i;
c[i] = 0;
}
/* copy inputs to device */
hipMemcpy( d_a, a, size, hipMemcpyHostToDevice );
hipMemcpy( d_b, b, size, hipMemcpyHostToDevice );
/* launch the kernel on the GPU */
/* insert the launch parameters to launch the kernel properly using blocks and threads */
hipLaunchKernelGGL(( add), dim3(N/THREADS_PER_BLOCK),dim3(THREADS_PER_BLOCK), 0, 0, d_a,d_b,d_c);
/* copy result back to host */
hipMemcpy( c, d_c, size, hipMemcpyDeviceToHost );
printf( "c[0] = %d\n",0,c[0] );
printf( "c[%d] = %d\n",N-1, c[N-1] );
/* clean up */
free(a);
free(b);
free(c);
hipFree( d_a );
/* additional clean up*/
hipFree( d_b );
hipFree( d_c );
return 0;
} /* end main */
| 3c0ef6dbe2082b54812b1e31d4fc29baef3b4cd7.cu | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
__global__ void add(int *a, int *b, int *c)
{
/* insert code to calculate the index properly using blockIdx.x, blockDim.x, threadIdx.x */
int index = blockIdx.x * blockDim.x + threadIdx.x;
c[index] = a[index] + b[index];
}
/* experiment with N */
/* how large can it be? */ // Assumes that the dimension fits inside RAM and each dimension is divisible by 32
#define N (2048*2048)
#define THREADS_PER_BLOCK 512
int main()
{
int *a, *b, *c;
int *d_a, *d_b, *d_c;
int size = N * sizeof( int );
/* allocate space for device copies of a, b, c */
cudaMalloc( (void **) &d_a, size );
cudaMalloc( (void **) &d_b, size );
cudaMalloc( (void **) &d_c, size );
/* allocate space for host copies of a, b, c and setup input values */
a = (int *)malloc( size );
b = (int *)malloc( size );
c = (int *)malloc( size );
for( int i = 0; i < N; i++ )
{
a[i] = b[i] = i;
c[i] = 0;
}
/* copy inputs to device */
cudaMemcpy( d_a, a, size, cudaMemcpyHostToDevice );
cudaMemcpy( d_b, b, size, cudaMemcpyHostToDevice );
/* launch the kernel on the GPU */
/* insert the launch parameters to launch the kernel properly using blocks and threads */
add<<<N/THREADS_PER_BLOCK,THREADS_PER_BLOCK>>>(d_a,d_b,d_c);
/* copy result back to host */
cudaMemcpy( c, d_c, size, cudaMemcpyDeviceToHost );
printf( "c[0] = %d\n",0,c[0] );
printf( "c[%d] = %d\n",N-1, c[N-1] );
/* clean up */
free(a);
free(b);
free(c);
cudaFree( d_a );
/* additional clean up*/
cudaFree( d_b );
cudaFree( d_c );
return 0;
} /* end main */
|
65c6e01485a7ced2ece847e2c26af0f27fd6e384.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <algorithm>
#include <cfloat>
#include <vector>
#include "caffe/layers/LSRO_loss_layer.hpp"
#include "caffe/util/math_functions.hpp"
namespace caffe {
template <typename Dtype>
__global__ void LSROLossForwardGPU(const int nthreads,
const Dtype* prob_data, const Dtype* label, Dtype* loss,
const int num, const int dim, const int spatial_dim,
const bool has_ignore_label_, const int ignore_label_,
Dtype* counts) {
const int channels = dim / spatial_dim;
CUDA_KERNEL_LOOP(index, nthreads) {
const int n = index / spatial_dim;
const int s = index % spatial_dim;
const int label_value = static_cast<int>(label[n * spatial_dim + s]);
if (has_ignore_label_ && label_value == ignore_label_) {
loss[index] = 0;
counts[index] = 0;
//} else if (label_value == -1) {
} else if (label_value < 0) {
loss[index] = 0;
for (int c = 0; c < channels; ++c) {
loss[index] -= 1. / channels * log(max(prob_data[n * dim + c * spatial_dim + s],
Dtype(FLT_MIN)));
}
counts[index] = 1;
} else {
loss[index] = -log(max(prob_data[n * dim + label_value * spatial_dim + s],
Dtype(FLT_MIN)));
counts[index] = 1;
}
}
}
template <typename Dtype>
void LSROLossLayer<Dtype>::Forward_gpu(
const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) {
softmax_layer_->Forward(softmax_bottom_vec_, softmax_top_vec_);
const Dtype* prob_data = prob_.gpu_data();
const Dtype* label = bottom[1]->gpu_data();
const int dim = prob_.count() / outer_num_;
const int nthreads = outer_num_ * inner_num_;
// Since this memory is not used for anything until it is overwritten
// on the backward pass, we use it here to avoid having to allocate new GPU
// memory to accumulate intermediate results in the kernel.
Dtype* loss_data = bottom[0]->mutable_gpu_diff();
// Similarly, this memory is never used elsewhere, and thus we can use it
// to avoid having to allocate additional GPU memory.
Dtype* counts = prob_.mutable_gpu_diff();
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( LSROLossForwardGPU<Dtype>), dim3(CAFFE_GET_BLOCKS(nthreads)),
dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, nthreads, prob_data, label, loss_data,
outer_num_, dim, inner_num_, has_ignore_label_, ignore_label_, counts);
Dtype loss;
caffe_gpu_asum(nthreads, loss_data, &loss);
Dtype valid_count = -1;
// Only launch another CUDA kernel if we actually need the count of valid
// outputs.
if (normalization_ == LossParameter_NormalizationMode_VALID &&
has_ignore_label_) {
caffe_gpu_asum(nthreads, counts, &valid_count);
}
top[0]->mutable_cpu_data()[0] = loss / get_normalizer(normalization_,
valid_count);
if (top.size() == 2) {
top[1]->ShareData(prob_);
}
}
template <typename Dtype>
__global__ void LSROLossBackwardGPU(const int nthreads, const Dtype* top,
const Dtype* label, Dtype* bottom_diff, const int num, const int dim,
const int spatial_dim, const bool has_ignore_label_,
const int ignore_label_, Dtype* counts) {
const int channels = dim / spatial_dim;
CUDA_KERNEL_LOOP(index, nthreads) {
const int n = index / spatial_dim;
const int s = index % spatial_dim;
const int label_value = static_cast<int>(label[n * spatial_dim + s]);
if (has_ignore_label_ && label_value == ignore_label_) {
for (int c = 0; c < channels; ++c) {
bottom_diff[n * dim + c * spatial_dim + s] = 0;
}
counts[index] = 0;
//} else if (label_value == -1) {
} else if (label_value < 0) {
for (int c = 0; c < channels; ++c) {
bottom_diff[n * dim + c * spatial_dim + s] -= 1. / channels;
}
counts[index] = 1;
} else {
bottom_diff[n * dim + label_value * spatial_dim + s] -= 1;
counts[index] = 1;
}
}
}
template <typename Dtype>
void LSROLossLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
if (propagate_down[1]) {
LOG(FATAL) << this->type()
<< " Layer cannot backpropagate to label inputs.";
}
if (propagate_down[0]) {
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
const Dtype* prob_data = prob_.gpu_data();
const Dtype* top_data = top[0]->gpu_data();
caffe_gpu_memcpy(prob_.count() * sizeof(Dtype), prob_data, bottom_diff);
const Dtype* label = bottom[1]->gpu_data();
const int dim = prob_.count() / outer_num_;
const int nthreads = outer_num_ * inner_num_;
// Since this memory is never used for anything else,
// we use to to avoid allocating new GPU memory.
Dtype* counts = prob_.mutable_gpu_diff();
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( LSROLossBackwardGPU<Dtype>), dim3(CAFFE_GET_BLOCKS(nthreads)),
dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, nthreads, top_data, label, bottom_diff,
outer_num_, dim, inner_num_, has_ignore_label_, ignore_label_, counts);
Dtype valid_count = -1;
// Only launch another CUDA kernel if we actually need the count of valid
// outputs.
if (normalization_ == LossParameter_NormalizationMode_VALID &&
has_ignore_label_) {
caffe_gpu_asum(nthreads, counts, &valid_count);
}
const Dtype loss_weight = top[0]->cpu_diff()[0] /
get_normalizer(normalization_, valid_count);
caffe_gpu_scal(prob_.count(), loss_weight , bottom_diff);
}
}
INSTANTIATE_LAYER_GPU_FUNCS(LSROLossLayer);
} // namespace caffe
| 65c6e01485a7ced2ece847e2c26af0f27fd6e384.cu | #include <algorithm>
#include <cfloat>
#include <vector>
#include "caffe/layers/LSRO_loss_layer.hpp"
#include "caffe/util/math_functions.hpp"
namespace caffe {
template <typename Dtype>
__global__ void LSROLossForwardGPU(const int nthreads,
const Dtype* prob_data, const Dtype* label, Dtype* loss,
const int num, const int dim, const int spatial_dim,
const bool has_ignore_label_, const int ignore_label_,
Dtype* counts) {
const int channels = dim / spatial_dim;
CUDA_KERNEL_LOOP(index, nthreads) {
const int n = index / spatial_dim;
const int s = index % spatial_dim;
const int label_value = static_cast<int>(label[n * spatial_dim + s]);
if (has_ignore_label_ && label_value == ignore_label_) {
loss[index] = 0;
counts[index] = 0;
//} else if (label_value == -1) {
} else if (label_value < 0) {
loss[index] = 0;
for (int c = 0; c < channels; ++c) {
loss[index] -= 1. / channels * log(max(prob_data[n * dim + c * spatial_dim + s],
Dtype(FLT_MIN)));
}
counts[index] = 1;
} else {
loss[index] = -log(max(prob_data[n * dim + label_value * spatial_dim + s],
Dtype(FLT_MIN)));
counts[index] = 1;
}
}
}
template <typename Dtype>
void LSROLossLayer<Dtype>::Forward_gpu(
const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) {
softmax_layer_->Forward(softmax_bottom_vec_, softmax_top_vec_);
const Dtype* prob_data = prob_.gpu_data();
const Dtype* label = bottom[1]->gpu_data();
const int dim = prob_.count() / outer_num_;
const int nthreads = outer_num_ * inner_num_;
// Since this memory is not used for anything until it is overwritten
// on the backward pass, we use it here to avoid having to allocate new GPU
// memory to accumulate intermediate results in the kernel.
Dtype* loss_data = bottom[0]->mutable_gpu_diff();
// Similarly, this memory is never used elsewhere, and thus we can use it
// to avoid having to allocate additional GPU memory.
Dtype* counts = prob_.mutable_gpu_diff();
// NOLINT_NEXT_LINE(whitespace/operators)
LSROLossForwardGPU<Dtype><<<CAFFE_GET_BLOCKS(nthreads),
CAFFE_CUDA_NUM_THREADS>>>(nthreads, prob_data, label, loss_data,
outer_num_, dim, inner_num_, has_ignore_label_, ignore_label_, counts);
Dtype loss;
caffe_gpu_asum(nthreads, loss_data, &loss);
Dtype valid_count = -1;
// Only launch another CUDA kernel if we actually need the count of valid
// outputs.
if (normalization_ == LossParameter_NormalizationMode_VALID &&
has_ignore_label_) {
caffe_gpu_asum(nthreads, counts, &valid_count);
}
top[0]->mutable_cpu_data()[0] = loss / get_normalizer(normalization_,
valid_count);
if (top.size() == 2) {
top[1]->ShareData(prob_);
}
}
template <typename Dtype>
__global__ void LSROLossBackwardGPU(const int nthreads, const Dtype* top,
const Dtype* label, Dtype* bottom_diff, const int num, const int dim,
const int spatial_dim, const bool has_ignore_label_,
const int ignore_label_, Dtype* counts) {
const int channels = dim / spatial_dim;
CUDA_KERNEL_LOOP(index, nthreads) {
const int n = index / spatial_dim;
const int s = index % spatial_dim;
const int label_value = static_cast<int>(label[n * spatial_dim + s]);
if (has_ignore_label_ && label_value == ignore_label_) {
for (int c = 0; c < channels; ++c) {
bottom_diff[n * dim + c * spatial_dim + s] = 0;
}
counts[index] = 0;
//} else if (label_value == -1) {
} else if (label_value < 0) {
for (int c = 0; c < channels; ++c) {
bottom_diff[n * dim + c * spatial_dim + s] -= 1. / channels;
}
counts[index] = 1;
} else {
bottom_diff[n * dim + label_value * spatial_dim + s] -= 1;
counts[index] = 1;
}
}
}
template <typename Dtype>
void LSROLossLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
if (propagate_down[1]) {
LOG(FATAL) << this->type()
<< " Layer cannot backpropagate to label inputs.";
}
if (propagate_down[0]) {
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
const Dtype* prob_data = prob_.gpu_data();
const Dtype* top_data = top[0]->gpu_data();
caffe_gpu_memcpy(prob_.count() * sizeof(Dtype), prob_data, bottom_diff);
const Dtype* label = bottom[1]->gpu_data();
const int dim = prob_.count() / outer_num_;
const int nthreads = outer_num_ * inner_num_;
// Since this memory is never used for anything else,
// we use to to avoid allocating new GPU memory.
Dtype* counts = prob_.mutable_gpu_diff();
// NOLINT_NEXT_LINE(whitespace/operators)
LSROLossBackwardGPU<Dtype><<<CAFFE_GET_BLOCKS(nthreads),
CAFFE_CUDA_NUM_THREADS>>>(nthreads, top_data, label, bottom_diff,
outer_num_, dim, inner_num_, has_ignore_label_, ignore_label_, counts);
Dtype valid_count = -1;
// Only launch another CUDA kernel if we actually need the count of valid
// outputs.
if (normalization_ == LossParameter_NormalizationMode_VALID &&
has_ignore_label_) {
caffe_gpu_asum(nthreads, counts, &valid_count);
}
const Dtype loss_weight = top[0]->cpu_diff()[0] /
get_normalizer(normalization_, valid_count);
caffe_gpu_scal(prob_.count(), loss_weight , bottom_diff);
}
}
INSTANTIATE_LAYER_GPU_FUNCS(LSROLossLayer);
} // namespace caffe
|
ba6ce7a0dadecbbad4ddc769c2693bd0498ce711.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <cfloat>
#include "private.h"
#include "metric_abstraction.h"
#include "tricks.cuh"
#define CLUSTER_DISTANCES_BLOCK_SIZE 512
#define CLUSTER_DISTANCES_SHMEM 12288 // in float-s
#define CLUSTER_RADIUSES_BLOCK_SIZE 512
#define CLUSTER_RADIUSES_SHMEM 8192 // in float-s
#define KNN_BLOCK_SIZE_SHMEM 512
#define KNN_BLOCK_SIZE_GMEM 1024
__constant__ uint32_t d_samples_size;
__constant__ uint32_t d_clusters_size;
__device__ unsigned long long int d_dists_calced;
/// sample_dists musr be zero-ed!
template <KMCUDADistanceMetric M, typename F>
__global__ void knn_calc_cluster_radiuses(
uint32_t offset, uint32_t length, const uint32_t *__restrict__ inv_asses,
const uint32_t *__restrict__ inv_asses_offsets,
const F *__restrict__ centroids, const F *__restrict__ samples,
float *__restrict__ sample_dists, float *__restrict__ radiuses) {
volatile uint32_t ci = blockIdx.x * blockDim.x + threadIdx.x;
if (ci >= length) {
return;
}
ci += offset;
// stage 1 - accumulate partial distances for every sample
__shared__ F shcents[CLUSTER_RADIUSES_SHMEM];
volatile const int cent_step = min(
CLUSTER_RADIUSES_SHMEM / blockDim.x, static_cast<unsigned>(d_features_size));
F *volatile const my_cent = shcents + cent_step * threadIdx.x;
for (int cfi = 0; cfi < d_features_size; cfi += cent_step) {
const int fsize = min(cent_step, d_features_size - cfi);
for (int f = 0; f < fsize; f++) {
my_cent[f] = centroids[ci * d_features_size + cfi + f];
}
for (uint32_t ass = inv_asses_offsets[ci]; ass < inv_asses_offsets[ci + 1];
ass++) {
uint64_t sample = inv_asses[ass]; // uint64_t!
sample_dists[sample] += METRIC<M, F>::partial_t(
samples, my_cent, fsize, d_samples_size, cfi, sample);
}
}
// stage 2 - find the maximum distance
float max_dist = -1;
for (uint32_t ass = inv_asses_offsets[ci]; ass < inv_asses_offsets[ci + 1];
ass++) {
float dist = METRIC<M, F>::finalize(sample_dists[inv_asses[ass]]);
if (dist > max_dist) {
max_dist = dist;
}
}
radiuses[ci] = max_dist > -1? max_dist : NAN;
}
/// distances must be zero-ed!
template <KMCUDADistanceMetric M, typename F>
__global__ void knn_calc_cluster_distances(
uint32_t offset, const F *__restrict__ centroids, float *distances) {
volatile const uint32_t bi = blockIdx.x + offset;
const uint32_t bs = CLUSTER_DISTANCES_BLOCK_SIZE;
uint32_t x, y;
const uint32_t n = dupper(d_clusters_size, bs);
{
float tmp = n + 0.5;
float d = _sqrt(tmp * tmp - 2 * bi);
y = tmp - d;
x = bi + y + (n - y) * (n - y + 1) / 2 - n * (n + 1) / 2;
}
__shared__ F shcents[CLUSTER_DISTANCES_SHMEM];
const uint32_t fstep = CLUSTER_DISTANCES_SHMEM / bs;
F *volatile my_cent = shcents + fstep * threadIdx.x;
// stage 1 - accumulate distances
for (uint16_t fpos = 0; fpos < d_features_size; fpos += fstep) {
__syncthreads();
const uint16_t fsize = min(
fstep, static_cast<uint32_t>(d_features_size - fpos));
uint32_t cbase = x * bs + threadIdx.x;
if (cbase < d_clusters_size) {
for (uint16_t f = 0; f < fsize; f++) {
my_cent[f] = centroids[cbase * d_features_size + fpos + f];
}
}
__syncthreads();
for (uint32_t ti = 0; ti < bs; ti++) {
if ((y * bs + threadIdx.x) < d_clusters_size
&& (x * bs + ti) < d_clusters_size) {
auto other_cent = d_clusters_size <= bs?
shcents + (y * bs + threadIdx.x) * fstep
:
centroids + (y * bs + threadIdx.x) * d_features_size + fpos;
distances[(y * bs + threadIdx.x) * d_clusters_size + x * bs + ti] +=
METRIC<M, F>::partial(other_cent, shcents + ti * fstep, fsize);
}
}
}
// stage 2 - finalize the distances
for (uint32_t ti = 0; ti < bs; ti++) {
if ((y * bs + threadIdx.x) < d_clusters_size
&& (x * bs + ti) < d_clusters_size) {
uint32_t di = (y * bs + threadIdx.x) * d_clusters_size + x * bs + ti;
float dist = distances[di];
dist = METRIC<M, F>::finalize(dist);
distances[di] = dist;
}
}
}
__global__ void knn_mirror_cluster_distances(float *__restrict__ distances) {
const uint32_t bs = CLUSTER_DISTANCES_BLOCK_SIZE;
uint32_t x, y;
const uint32_t n = dupper(d_clusters_size, bs);
{
float tmp = n + 0.5;
float d = _sqrt(tmp * tmp - 2 * blockIdx.x);
y = tmp - d;
x = blockIdx.x + y + (n - y) * (n - y + 1) / 2 - n * (n + 1) / 2;
}
for (uint32_t ti = 0; ti < bs; ti++) {
if ((y * bs + threadIdx.x) < d_clusters_size && (x * bs + ti) < d_clusters_size) {
distances[(x * bs + ti) * d_clusters_size + y * bs + threadIdx.x] =
distances[(y * bs + threadIdx.x) * d_clusters_size + x * bs + ti];
}
}
}
FPATTR void push_sample(uint16_t k, float dist, uint32_t index, float *heap) {
uint16_t pos = 0;
while (true) {
float left, right;
bool left_le, right_le;
if ((2 * pos + 1) < k) {
left = heap[4 * pos + 2];
left_le = dist >= left;
} else {
left_le = true;
}
if ((2 * pos + 2) < k) {
right = heap[4 * pos + 4];
right_le = dist >= right;
} else {
right_le = true;
}
if (left_le && right_le) {
heap[2 * pos] = dist;
*reinterpret_cast<uint32_t *>(heap + 2 * pos + 1) = index;
break;
}
if (!left_le && !right_le) {
if (left <= right) {
heap[2 * pos] = right;
heap[2 * pos + 1] = heap[4 * pos + 5];
pos = 2 * pos + 2;
} else {
heap[2 * pos] = left;
heap[2 * pos + 1] = heap[4 * pos + 3];
pos = 2 * pos + 1;
}
} else if (left_le) {
heap[2 * pos] = right;
heap[2 * pos + 1] = heap[4 * pos + 5];
pos = 2 * pos + 2;
} else {
heap[2 * pos] = left;
heap[2 * pos + 1] = heap[4 * pos + 3];
pos = 2 * pos + 1;
}
}
}
template <KMCUDADistanceMetric M, typename F>
__global__ void knn_assign_shmem(
uint32_t offset, uint32_t length, uint16_t k,
const float *__restrict__ cluster_distances,
const float *__restrict__ cluster_radiuses,
const F *__restrict__ samples, const F *__restrict__ centroids,
const uint32_t *assignments, const uint32_t *inv_asses,
const uint32_t *inv_asses_offsets, uint32_t *neighbors) {
volatile uint64_t sample = blockIdx.x * blockDim.x + threadIdx.x;
if (sample >= length) {
return;
}
sample += offset;
volatile uint32_t mycls = assignments[sample];
volatile float mydist = METRIC<M, F>::distance_t(
samples, centroids + mycls * d_features_size, d_samples_size, sample);
extern __shared__ float buffer[];
float *volatile mynearest = buffer + k * 2 * threadIdx.x;
volatile float mndist = FLT_MAX;
for (int i = 0; i < static_cast<int>(k); i++) {
mynearest[i * 2] = FLT_MAX;
}
uint32_t pos_start = inv_asses_offsets[mycls];
uint32_t pos_finish = inv_asses_offsets[mycls + 1];
atomicAdd(&d_dists_calced, pos_finish - pos_start);
for (uint32_t pos = pos_start; pos < pos_finish; pos++) {
uint64_t other_sample = inv_asses[pos];
if (sample == other_sample) {
continue;
}
float dist = METRIC<M, F>::distance_tt(
samples, d_samples_size, sample, other_sample);
if (dist <= mndist) {
push_sample(k, dist, other_sample, mynearest);
mndist = mynearest[0];
}
}
for (uint32_t cls = 0; cls < d_clusters_size; cls++) {
if (cls == mycls) {
continue;
}
float cdist = cluster_distances[cls * d_clusters_size + mycls];
if (cdist != cdist) {
continue;
}
float dist = cdist - mydist - cluster_radiuses[cls];
if (dist > mndist) {
continue;
}
uint32_t pos_start = inv_asses_offsets[cls];
uint32_t pos_finish = inv_asses_offsets[cls + 1];
atomicAdd(&d_dists_calced, pos_finish - pos_start);
for (uint32_t pos = pos_start; pos < pos_finish; pos++) {
uint64_t other_sample = inv_asses[pos];
dist = METRIC<M, F>::distance_tt(
samples, d_samples_size, sample, other_sample);
if (dist <= mndist) {
push_sample(k, dist, other_sample, mynearest);
mndist = mynearest[0];
}
}
}
for (int i = k - 1; i >= 0; i--) {
neighbors[(sample - offset) * k + i] = reinterpret_cast<uint32_t*>(mynearest)[1];
push_sample(k, -1, UINT32_MAX, mynearest);
}
}
template <KMCUDADistanceMetric M, typename F>
__global__ void knn_assign_gmem(
uint32_t offset, uint32_t length, uint16_t k,
const float *__restrict__ cluster_distances,
const float *__restrict__ cluster_radiuses,
const F *__restrict__ samples, const F *__restrict__ centroids,
const uint32_t *assignments, const uint32_t *inv_asses,
const uint32_t *inv_asses_offsets, uint32_t *neighbors) {
volatile uint64_t sample = blockIdx.x * blockDim.x + threadIdx.x;
if (sample >= length) {
return;
}
sample += offset;
volatile uint32_t mycls = assignments[sample];
volatile float mydist = METRIC<M, F>::distance_t(
samples, centroids + mycls * d_features_size, d_samples_size, sample);
float *volatile mynearest =
reinterpret_cast<float*>(neighbors) + (sample - offset) * k * 2;
volatile float mndist = FLT_MAX;
for (int i = 0; i < static_cast<int>(k); i++) {
mynearest[i * 2] = FLT_MAX;
}
uint32_t pos_start = inv_asses_offsets[mycls];
uint32_t pos_finish = inv_asses_offsets[mycls + 1];
atomicAdd(&d_dists_calced, pos_finish - pos_start);
for (uint32_t pos = pos_start; pos < pos_finish; pos++) {
uint64_t other_sample = inv_asses[pos];
if (sample == other_sample) {
continue;
}
float dist = METRIC<M, F>::distance_tt(
samples, d_samples_size, sample, other_sample);
if (dist <= mndist) {
push_sample(k, dist, other_sample, mynearest);
mndist = mynearest[0];
}
}
for (uint32_t cls = 0; cls < d_clusters_size; cls++) {
if (cls == mycls) {
continue;
}
float cdist = cluster_distances[cls * d_clusters_size + mycls];
if (cdist != cdist) {
continue;
}
float dist = cdist - mydist - cluster_radiuses[cls];
if (dist > mndist) {
continue;
}
pos_start = inv_asses_offsets[cls];
pos_finish = inv_asses_offsets[cls + 1];
atomicAdd(&d_dists_calced, pos_finish - pos_start);
for (uint32_t pos = pos_start; pos < pos_finish; pos++) {
uint64_t other_sample = inv_asses[pos];
dist = METRIC<M, F>::distance_tt(
samples, d_samples_size, sample, other_sample);
if (dist <= mndist) {
push_sample(k, dist, other_sample, mynearest);
mndist = mynearest[0];
}
}
}
for (int i = 0; i < k; i++) {
uint32_t imax = reinterpret_cast<uint32_t*>(mynearest)[1];
push_sample(k - i - 1, mynearest[2 * k - 2 * i - 2],
reinterpret_cast<uint32_t*>(mynearest)[2 * k - 2 * i - 1],
mynearest);
reinterpret_cast<uint32_t*>(mynearest)[2 * k - 2 * i - 1] = imax;
}
for (int i = 0; i < k; i++) {
reinterpret_cast<uint32_t*>(mynearest)[i] =
reinterpret_cast<uint32_t*>(mynearest)[2 * i + 1];
}
}
__global__ void knn_assign_gmem_deinterleave1(
uint32_t length, uint16_t k, uint32_t *neighbors) {
volatile uint64_t sample = blockIdx.x * blockDim.x + threadIdx.x;
if (sample >= length) {
return;
}
if (sample % 2 == 1) {
for (int i = 0; i < k; i++) {
neighbors[sample * k + i] = neighbors[sample * 2 * k + i];
}
} else {
for (int i = 0; i < k; i++) {
neighbors[(length + sample) * k + k + i] = neighbors[sample * 2 * k + i];
}
}
}
__global__ void knn_assign_gmem_deinterleave2(
uint32_t length, uint16_t k, uint32_t *neighbors) {
volatile uint64_t sample = blockIdx.x * blockDim.x + threadIdx.x;
sample *= 2;
if (sample >= length) {
return;
}
for (int i = 0; i < k; i++) {
neighbors[sample * k + i] = neighbors[(length + sample) * k + k + i];
}
}
extern "C" {
KMCUDAResult knn_cuda_setup(
uint32_t h_samples_size, uint16_t h_features_size, uint32_t h_clusters_size,
const std::vector<int> &devs, int32_t verbosity) {
FOR_EACH_DEV(
CUCH(hipMemcpyToSymbol(d_samples_size, &h_samples_size, sizeof(h_samples_size)),
kmcudaMemoryCopyError);
CUCH(hipMemcpyToSymbol(d_features_size, &h_features_size, sizeof(h_features_size)),
kmcudaMemoryCopyError);
CUCH(hipMemcpyToSymbol(d_clusters_size, &h_clusters_size, sizeof(h_clusters_size)),
kmcudaMemoryCopyError);
uint64_t zero = 0;
CUCH(hipMemcpyToSymbol(d_dists_calced, &zero, sizeof(d_dists_calced)),
kmcudaMemoryCopyError);
);
return kmcudaSuccess;
}
int knn_cuda_neighbors_mem_multiplier(uint16_t k, int dev, int verbosity) {
hipDeviceProp_t props;
hipGetDeviceProperties(&props, dev);
int shmem_size = static_cast<int>(props.sharedMemPerBlock);
int needed_shmem_size = KNN_BLOCK_SIZE_SHMEM * 2 * k * sizeof(uint32_t);
if (needed_shmem_size > shmem_size) {
INFO("device #%d: needed shmem size %d > %d => using global memory\n",
dev, needed_shmem_size, shmem_size);
return 2;
}
return 1;
}
KMCUDAResult knn_cuda_calc(
uint16_t k, uint32_t h_samples_size, uint32_t h_clusters_size,
uint16_t h_features_size, KMCUDADistanceMetric metric,
const std::vector<int> &devs, int fp16x2, int verbosity,
const udevptrs<float> &samples, const udevptrs<float> ¢roids,
const udevptrs<uint32_t> &assignments, const udevptrs<uint32_t> &inv_asses,
const udevptrs<uint32_t> &inv_asses_offsets, udevptrs<float> *distances,
udevptrs<float>* sample_dists, udevptrs<float> *radiuses,
udevptrs<uint32_t> *neighbors) {
auto plan = distribute(h_clusters_size, h_features_size * sizeof(float), devs);
if (verbosity > 1) {
print_plan("plan_calc_radiuses", plan);
}
INFO("calculating the cluster radiuses...\n");
FOR_EACH_DEVI(
uint32_t offset, length;
std::tie(offset, length) = plan[devi];
if (length == 0) {
continue;
}
dim3 block(CLUSTER_RADIUSES_BLOCK_SIZE, 1, 1);
dim3 grid(upper(h_clusters_size, block.x), 1, 1);
float *dsd;
if (h_clusters_size * h_clusters_size >= h_samples_size) {
dsd = (*distances)[devi].get();
} else {
dsd = (*sample_dists)[devi].get();
}
hipLaunchKernelGGL(( KERNEL_SWITCH(knn_calc_cluster_radiuses), , dim3(grid), dim3(block), 0, 0,
offset, length, inv_asses[devi].get(), inv_asses_offsets[devi].get(),
reinterpret_cast<const F*>(centroids[devi].get()),
reinterpret_cast<const F*>(samples[devi].get()),
dsd, (*radiuses)[devi].get()));
);
FOR_EACH_DEVI(
uint32_t offset, length;
std::tie(offset, length) = plan[devi];
FOR_OTHER_DEVS(
CUP2P(radiuses, offset, length);
);
);
if (h_clusters_size * h_clusters_size >= h_samples_size) {
CUMEMSET_ASYNC(*distances, 0, h_samples_size);
}
uint32_t dist_blocks_dim = upper(
h_clusters_size, static_cast<uint32_t>(CLUSTER_DISTANCES_BLOCK_SIZE));
uint32_t dist_blocks_n = (2 * dist_blocks_dim + 1) * (2 * dist_blocks_dim + 1) / 8;
plan = distribute(dist_blocks_n, 512, devs);
{ // align across CLUSTER_DISTANCES_BLOCK_SIZE horizontal boundaries
uint32_t align = 0;
for (auto& p : plan) {
uint32_t offset, length;
std::tie(offset, length) = p;
offset += align;
std::get<0>(p) = offset;
uint32_t n = dist_blocks_dim;
float tmp = n + 0.5;
float d = sqrt(tmp * tmp - 2 * (offset + length));
uint32_t y = tmp - d;
uint32_t x = offset + length + (n - y) * (n - y + 1) / 2 - n * (n + 1) / 2;
if (x > 0) {
align = n - y - x;
std::get<1>(p) += align;
}
}
}
if (verbosity > 1) {
print_plan("plan_calc_cluster_distances", plan);
}
INFO("calculating the centroid distance matrix...\n");
FOR_EACH_DEVI(
uint32_t offset, length;
std::tie(offset, length) = plan[devi];
if (length == 0) {
continue;
}
dim3 block(CLUSTER_DISTANCES_BLOCK_SIZE, 1, 1);
dim3 grid(length, 1, 1);
hipLaunchKernelGGL(( KERNEL_SWITCH(knn_calc_cluster_distances), , dim3(grid), dim3(block), 0, 0,
offset, reinterpret_cast<const F*>(centroids[devi].get()),
(*distances)[devi].get()));
);
FOR_EACH_DEVI(
uint32_t y_start, y_finish;
{
uint32_t offset, length;
std::tie(offset, length) = plan[devi];
float tmp = dist_blocks_dim + 0.5;
float d = sqrt(tmp * tmp - 2 * offset);
y_start = tmp - d;
d = sqrt(tmp * tmp - 2 * (offset + length));
y_finish = tmp - d;
}
if (y_finish == y_start) {
continue;
}
uint32_t p_offset = y_start * h_clusters_size * CLUSTER_DISTANCES_BLOCK_SIZE;
uint32_t p_size = (y_finish - y_start) * h_clusters_size * CLUSTER_DISTANCES_BLOCK_SIZE;
p_size = ::min(p_size, h_clusters_size * h_clusters_size - p_offset);
FOR_OTHER_DEVS(
CUP2P(distances, p_offset, p_size);
);
);
FOR_EACH_DEVI(
dim3 block(CLUSTER_DISTANCES_BLOCK_SIZE, 1, 1);
dim3 grid(dist_blocks_n, 1, 1);
hipLaunchKernelGGL(( knn_mirror_cluster_distances), dim3(grid), dim3(block), 0, 0, (*distances)[devi].get());
);
plan = distribute(h_samples_size, h_features_size * sizeof(float), devs);
INFO("searching for the nearest neighbors...\n");
FOR_EACH_DEVI(
uint32_t offset, length;
std::tie(offset, length) = plan[devi];
if (knn_cuda_neighbors_mem_multiplier(k, devs[devi], 1) == 2) {
dim3 block(KNN_BLOCK_SIZE_GMEM, 1, 1);
dim3 grid(upper(h_samples_size, block.x), 1, 1);
hipLaunchKernelGGL(( KERNEL_SWITCH(knn_assign_gmem), , dim3(grid), dim3(block), 0, 0,
offset, length, k, (*distances)[devi].get(), (*radiuses)[devi].get(),
reinterpret_cast<const F*>(samples[devi].get()),
reinterpret_cast<const F*>(centroids[devi].get()),
assignments[devi].get(), inv_asses[devi].get(),
inv_asses_offsets[devi].get(), (*neighbors)[devi].get()));
hipLaunchKernelGGL(( knn_assign_gmem_deinterleave1), dim3(grid), dim3(block), 0, 0,
length, k, (*neighbors)[devi].get());
dim3 grid2(upper(h_samples_size, 2 * block.x), 1, 1);
hipLaunchKernelGGL(( knn_assign_gmem_deinterleave2), dim3(grid2), dim3(block), 0, 0,
length, k, (*neighbors)[devi].get());
} else {
dim3 block(KNN_BLOCK_SIZE_SHMEM, 1, 1);
dim3 grid(upper(h_samples_size, block.x), 1, 1);
KERNEL_SWITCH(
hipLaunchKernelGGL(( knn_assign_shmem),
, dim3(grid), dim3(block), KNN_BLOCK_SIZE_SHMEM * 2 * k * sizeof(uint32_t), 0,
offset, length, k, (*distances)[devi].get(), (*radiuses)[devi].get(),
reinterpret_cast<const F*>(samples[devi].get()),
reinterpret_cast<const F*>(centroids[devi].get()),
assignments[devi].get(), inv_asses[devi].get(),
inv_asses_offsets[devi].get(), (*neighbors)[devi].get()));
}
);
uint64_t dists_calced = 0;
FOR_EACH_DEV(
uint64_t h_dists_calced = 0;
CUCH(hipMemcpyFromSymbol(&h_dists_calced, d_dists_calced, sizeof(h_dists_calced)),
kmcudaMemoryCopyError);
DEBUG("#%d dists_calced: %" PRIu64 "\n", dev, h_dists_calced);
dists_calced += h_dists_calced;
);
uint64_t max_dists_calced = static_cast<uint64_t>(h_samples_size) * h_samples_size;
INFO("calculated %f of all the distances\n", (dists_calced + .0) / max_dists_calced);
return kmcudaSuccess;
}
} // extern "C"
| ba6ce7a0dadecbbad4ddc769c2693bd0498ce711.cu | #include <cfloat>
#include "private.h"
#include "metric_abstraction.h"
#include "tricks.cuh"
#define CLUSTER_DISTANCES_BLOCK_SIZE 512
#define CLUSTER_DISTANCES_SHMEM 12288 // in float-s
#define CLUSTER_RADIUSES_BLOCK_SIZE 512
#define CLUSTER_RADIUSES_SHMEM 8192 // in float-s
#define KNN_BLOCK_SIZE_SHMEM 512
#define KNN_BLOCK_SIZE_GMEM 1024
__constant__ uint32_t d_samples_size;
__constant__ uint32_t d_clusters_size;
__device__ unsigned long long int d_dists_calced;
/// sample_dists musr be zero-ed!
template <KMCUDADistanceMetric M, typename F>
__global__ void knn_calc_cluster_radiuses(
uint32_t offset, uint32_t length, const uint32_t *__restrict__ inv_asses,
const uint32_t *__restrict__ inv_asses_offsets,
const F *__restrict__ centroids, const F *__restrict__ samples,
float *__restrict__ sample_dists, float *__restrict__ radiuses) {
volatile uint32_t ci = blockIdx.x * blockDim.x + threadIdx.x;
if (ci >= length) {
return;
}
ci += offset;
// stage 1 - accumulate partial distances for every sample
__shared__ F shcents[CLUSTER_RADIUSES_SHMEM];
volatile const int cent_step = min(
CLUSTER_RADIUSES_SHMEM / blockDim.x, static_cast<unsigned>(d_features_size));
F *volatile const my_cent = shcents + cent_step * threadIdx.x;
for (int cfi = 0; cfi < d_features_size; cfi += cent_step) {
const int fsize = min(cent_step, d_features_size - cfi);
for (int f = 0; f < fsize; f++) {
my_cent[f] = centroids[ci * d_features_size + cfi + f];
}
for (uint32_t ass = inv_asses_offsets[ci]; ass < inv_asses_offsets[ci + 1];
ass++) {
uint64_t sample = inv_asses[ass]; // uint64_t!
sample_dists[sample] += METRIC<M, F>::partial_t(
samples, my_cent, fsize, d_samples_size, cfi, sample);
}
}
// stage 2 - find the maximum distance
float max_dist = -1;
for (uint32_t ass = inv_asses_offsets[ci]; ass < inv_asses_offsets[ci + 1];
ass++) {
float dist = METRIC<M, F>::finalize(sample_dists[inv_asses[ass]]);
if (dist > max_dist) {
max_dist = dist;
}
}
radiuses[ci] = max_dist > -1? max_dist : NAN;
}
/// distances must be zero-ed!
template <KMCUDADistanceMetric M, typename F>
__global__ void knn_calc_cluster_distances(
uint32_t offset, const F *__restrict__ centroids, float *distances) {
volatile const uint32_t bi = blockIdx.x + offset;
const uint32_t bs = CLUSTER_DISTANCES_BLOCK_SIZE;
uint32_t x, y;
const uint32_t n = dupper(d_clusters_size, bs);
{
float tmp = n + 0.5;
float d = _sqrt(tmp * tmp - 2 * bi);
y = tmp - d;
x = bi + y + (n - y) * (n - y + 1) / 2 - n * (n + 1) / 2;
}
__shared__ F shcents[CLUSTER_DISTANCES_SHMEM];
const uint32_t fstep = CLUSTER_DISTANCES_SHMEM / bs;
F *volatile my_cent = shcents + fstep * threadIdx.x;
// stage 1 - accumulate distances
for (uint16_t fpos = 0; fpos < d_features_size; fpos += fstep) {
__syncthreads();
const uint16_t fsize = min(
fstep, static_cast<uint32_t>(d_features_size - fpos));
uint32_t cbase = x * bs + threadIdx.x;
if (cbase < d_clusters_size) {
for (uint16_t f = 0; f < fsize; f++) {
my_cent[f] = centroids[cbase * d_features_size + fpos + f];
}
}
__syncthreads();
for (uint32_t ti = 0; ti < bs; ti++) {
if ((y * bs + threadIdx.x) < d_clusters_size
&& (x * bs + ti) < d_clusters_size) {
auto other_cent = d_clusters_size <= bs?
shcents + (y * bs + threadIdx.x) * fstep
:
centroids + (y * bs + threadIdx.x) * d_features_size + fpos;
distances[(y * bs + threadIdx.x) * d_clusters_size + x * bs + ti] +=
METRIC<M, F>::partial(other_cent, shcents + ti * fstep, fsize);
}
}
}
// stage 2 - finalize the distances
for (uint32_t ti = 0; ti < bs; ti++) {
if ((y * bs + threadIdx.x) < d_clusters_size
&& (x * bs + ti) < d_clusters_size) {
uint32_t di = (y * bs + threadIdx.x) * d_clusters_size + x * bs + ti;
float dist = distances[di];
dist = METRIC<M, F>::finalize(dist);
distances[di] = dist;
}
}
}
__global__ void knn_mirror_cluster_distances(float *__restrict__ distances) {
const uint32_t bs = CLUSTER_DISTANCES_BLOCK_SIZE;
uint32_t x, y;
const uint32_t n = dupper(d_clusters_size, bs);
{
float tmp = n + 0.5;
float d = _sqrt(tmp * tmp - 2 * blockIdx.x);
y = tmp - d;
x = blockIdx.x + y + (n - y) * (n - y + 1) / 2 - n * (n + 1) / 2;
}
for (uint32_t ti = 0; ti < bs; ti++) {
if ((y * bs + threadIdx.x) < d_clusters_size && (x * bs + ti) < d_clusters_size) {
distances[(x * bs + ti) * d_clusters_size + y * bs + threadIdx.x] =
distances[(y * bs + threadIdx.x) * d_clusters_size + x * bs + ti];
}
}
}
FPATTR void push_sample(uint16_t k, float dist, uint32_t index, float *heap) {
uint16_t pos = 0;
while (true) {
float left, right;
bool left_le, right_le;
if ((2 * pos + 1) < k) {
left = heap[4 * pos + 2];
left_le = dist >= left;
} else {
left_le = true;
}
if ((2 * pos + 2) < k) {
right = heap[4 * pos + 4];
right_le = dist >= right;
} else {
right_le = true;
}
if (left_le && right_le) {
heap[2 * pos] = dist;
*reinterpret_cast<uint32_t *>(heap + 2 * pos + 1) = index;
break;
}
if (!left_le && !right_le) {
if (left <= right) {
heap[2 * pos] = right;
heap[2 * pos + 1] = heap[4 * pos + 5];
pos = 2 * pos + 2;
} else {
heap[2 * pos] = left;
heap[2 * pos + 1] = heap[4 * pos + 3];
pos = 2 * pos + 1;
}
} else if (left_le) {
heap[2 * pos] = right;
heap[2 * pos + 1] = heap[4 * pos + 5];
pos = 2 * pos + 2;
} else {
heap[2 * pos] = left;
heap[2 * pos + 1] = heap[4 * pos + 3];
pos = 2 * pos + 1;
}
}
}
template <KMCUDADistanceMetric M, typename F>
__global__ void knn_assign_shmem(
uint32_t offset, uint32_t length, uint16_t k,
const float *__restrict__ cluster_distances,
const float *__restrict__ cluster_radiuses,
const F *__restrict__ samples, const F *__restrict__ centroids,
const uint32_t *assignments, const uint32_t *inv_asses,
const uint32_t *inv_asses_offsets, uint32_t *neighbors) {
volatile uint64_t sample = blockIdx.x * blockDim.x + threadIdx.x;
if (sample >= length) {
return;
}
sample += offset;
volatile uint32_t mycls = assignments[sample];
volatile float mydist = METRIC<M, F>::distance_t(
samples, centroids + mycls * d_features_size, d_samples_size, sample);
extern __shared__ float buffer[];
float *volatile mynearest = buffer + k * 2 * threadIdx.x;
volatile float mndist = FLT_MAX;
for (int i = 0; i < static_cast<int>(k); i++) {
mynearest[i * 2] = FLT_MAX;
}
uint32_t pos_start = inv_asses_offsets[mycls];
uint32_t pos_finish = inv_asses_offsets[mycls + 1];
atomicAdd(&d_dists_calced, pos_finish - pos_start);
for (uint32_t pos = pos_start; pos < pos_finish; pos++) {
uint64_t other_sample = inv_asses[pos];
if (sample == other_sample) {
continue;
}
float dist = METRIC<M, F>::distance_tt(
samples, d_samples_size, sample, other_sample);
if (dist <= mndist) {
push_sample(k, dist, other_sample, mynearest);
mndist = mynearest[0];
}
}
for (uint32_t cls = 0; cls < d_clusters_size; cls++) {
if (cls == mycls) {
continue;
}
float cdist = cluster_distances[cls * d_clusters_size + mycls];
if (cdist != cdist) {
continue;
}
float dist = cdist - mydist - cluster_radiuses[cls];
if (dist > mndist) {
continue;
}
uint32_t pos_start = inv_asses_offsets[cls];
uint32_t pos_finish = inv_asses_offsets[cls + 1];
atomicAdd(&d_dists_calced, pos_finish - pos_start);
for (uint32_t pos = pos_start; pos < pos_finish; pos++) {
uint64_t other_sample = inv_asses[pos];
dist = METRIC<M, F>::distance_tt(
samples, d_samples_size, sample, other_sample);
if (dist <= mndist) {
push_sample(k, dist, other_sample, mynearest);
mndist = mynearest[0];
}
}
}
for (int i = k - 1; i >= 0; i--) {
neighbors[(sample - offset) * k + i] = reinterpret_cast<uint32_t*>(mynearest)[1];
push_sample(k, -1, UINT32_MAX, mynearest);
}
}
template <KMCUDADistanceMetric M, typename F>
__global__ void knn_assign_gmem(
uint32_t offset, uint32_t length, uint16_t k,
const float *__restrict__ cluster_distances,
const float *__restrict__ cluster_radiuses,
const F *__restrict__ samples, const F *__restrict__ centroids,
const uint32_t *assignments, const uint32_t *inv_asses,
const uint32_t *inv_asses_offsets, uint32_t *neighbors) {
volatile uint64_t sample = blockIdx.x * blockDim.x + threadIdx.x;
if (sample >= length) {
return;
}
sample += offset;
volatile uint32_t mycls = assignments[sample];
volatile float mydist = METRIC<M, F>::distance_t(
samples, centroids + mycls * d_features_size, d_samples_size, sample);
float *volatile mynearest =
reinterpret_cast<float*>(neighbors) + (sample - offset) * k * 2;
volatile float mndist = FLT_MAX;
for (int i = 0; i < static_cast<int>(k); i++) {
mynearest[i * 2] = FLT_MAX;
}
uint32_t pos_start = inv_asses_offsets[mycls];
uint32_t pos_finish = inv_asses_offsets[mycls + 1];
atomicAdd(&d_dists_calced, pos_finish - pos_start);
for (uint32_t pos = pos_start; pos < pos_finish; pos++) {
uint64_t other_sample = inv_asses[pos];
if (sample == other_sample) {
continue;
}
float dist = METRIC<M, F>::distance_tt(
samples, d_samples_size, sample, other_sample);
if (dist <= mndist) {
push_sample(k, dist, other_sample, mynearest);
mndist = mynearest[0];
}
}
for (uint32_t cls = 0; cls < d_clusters_size; cls++) {
if (cls == mycls) {
continue;
}
float cdist = cluster_distances[cls * d_clusters_size + mycls];
if (cdist != cdist) {
continue;
}
float dist = cdist - mydist - cluster_radiuses[cls];
if (dist > mndist) {
continue;
}
pos_start = inv_asses_offsets[cls];
pos_finish = inv_asses_offsets[cls + 1];
atomicAdd(&d_dists_calced, pos_finish - pos_start);
for (uint32_t pos = pos_start; pos < pos_finish; pos++) {
uint64_t other_sample = inv_asses[pos];
dist = METRIC<M, F>::distance_tt(
samples, d_samples_size, sample, other_sample);
if (dist <= mndist) {
push_sample(k, dist, other_sample, mynearest);
mndist = mynearest[0];
}
}
}
for (int i = 0; i < k; i++) {
uint32_t imax = reinterpret_cast<uint32_t*>(mynearest)[1];
push_sample(k - i - 1, mynearest[2 * k - 2 * i - 2],
reinterpret_cast<uint32_t*>(mynearest)[2 * k - 2 * i - 1],
mynearest);
reinterpret_cast<uint32_t*>(mynearest)[2 * k - 2 * i - 1] = imax;
}
for (int i = 0; i < k; i++) {
reinterpret_cast<uint32_t*>(mynearest)[i] =
reinterpret_cast<uint32_t*>(mynearest)[2 * i + 1];
}
}
__global__ void knn_assign_gmem_deinterleave1(
uint32_t length, uint16_t k, uint32_t *neighbors) {
volatile uint64_t sample = blockIdx.x * blockDim.x + threadIdx.x;
if (sample >= length) {
return;
}
if (sample % 2 == 1) {
for (int i = 0; i < k; i++) {
neighbors[sample * k + i] = neighbors[sample * 2 * k + i];
}
} else {
for (int i = 0; i < k; i++) {
neighbors[(length + sample) * k + k + i] = neighbors[sample * 2 * k + i];
}
}
}
__global__ void knn_assign_gmem_deinterleave2(
uint32_t length, uint16_t k, uint32_t *neighbors) {
volatile uint64_t sample = blockIdx.x * blockDim.x + threadIdx.x;
sample *= 2;
if (sample >= length) {
return;
}
for (int i = 0; i < k; i++) {
neighbors[sample * k + i] = neighbors[(length + sample) * k + k + i];
}
}
extern "C" {
KMCUDAResult knn_cuda_setup(
uint32_t h_samples_size, uint16_t h_features_size, uint32_t h_clusters_size,
const std::vector<int> &devs, int32_t verbosity) {
FOR_EACH_DEV(
CUCH(cudaMemcpyToSymbol(d_samples_size, &h_samples_size, sizeof(h_samples_size)),
kmcudaMemoryCopyError);
CUCH(cudaMemcpyToSymbol(d_features_size, &h_features_size, sizeof(h_features_size)),
kmcudaMemoryCopyError);
CUCH(cudaMemcpyToSymbol(d_clusters_size, &h_clusters_size, sizeof(h_clusters_size)),
kmcudaMemoryCopyError);
uint64_t zero = 0;
CUCH(cudaMemcpyToSymbol(d_dists_calced, &zero, sizeof(d_dists_calced)),
kmcudaMemoryCopyError);
);
return kmcudaSuccess;
}
int knn_cuda_neighbors_mem_multiplier(uint16_t k, int dev, int verbosity) {
cudaDeviceProp props;
cudaGetDeviceProperties(&props, dev);
int shmem_size = static_cast<int>(props.sharedMemPerBlock);
int needed_shmem_size = KNN_BLOCK_SIZE_SHMEM * 2 * k * sizeof(uint32_t);
if (needed_shmem_size > shmem_size) {
INFO("device #%d: needed shmem size %d > %d => using global memory\n",
dev, needed_shmem_size, shmem_size);
return 2;
}
return 1;
}
KMCUDAResult knn_cuda_calc(
uint16_t k, uint32_t h_samples_size, uint32_t h_clusters_size,
uint16_t h_features_size, KMCUDADistanceMetric metric,
const std::vector<int> &devs, int fp16x2, int verbosity,
const udevptrs<float> &samples, const udevptrs<float> ¢roids,
const udevptrs<uint32_t> &assignments, const udevptrs<uint32_t> &inv_asses,
const udevptrs<uint32_t> &inv_asses_offsets, udevptrs<float> *distances,
udevptrs<float>* sample_dists, udevptrs<float> *radiuses,
udevptrs<uint32_t> *neighbors) {
auto plan = distribute(h_clusters_size, h_features_size * sizeof(float), devs);
if (verbosity > 1) {
print_plan("plan_calc_radiuses", plan);
}
INFO("calculating the cluster radiuses...\n");
FOR_EACH_DEVI(
uint32_t offset, length;
std::tie(offset, length) = plan[devi];
if (length == 0) {
continue;
}
dim3 block(CLUSTER_RADIUSES_BLOCK_SIZE, 1, 1);
dim3 grid(upper(h_clusters_size, block.x), 1, 1);
float *dsd;
if (h_clusters_size * h_clusters_size >= h_samples_size) {
dsd = (*distances)[devi].get();
} else {
dsd = (*sample_dists)[devi].get();
}
KERNEL_SWITCH(knn_calc_cluster_radiuses, <<<grid, block>>>(
offset, length, inv_asses[devi].get(), inv_asses_offsets[devi].get(),
reinterpret_cast<const F*>(centroids[devi].get()),
reinterpret_cast<const F*>(samples[devi].get()),
dsd, (*radiuses)[devi].get()));
);
FOR_EACH_DEVI(
uint32_t offset, length;
std::tie(offset, length) = plan[devi];
FOR_OTHER_DEVS(
CUP2P(radiuses, offset, length);
);
);
if (h_clusters_size * h_clusters_size >= h_samples_size) {
CUMEMSET_ASYNC(*distances, 0, h_samples_size);
}
uint32_t dist_blocks_dim = upper(
h_clusters_size, static_cast<uint32_t>(CLUSTER_DISTANCES_BLOCK_SIZE));
uint32_t dist_blocks_n = (2 * dist_blocks_dim + 1) * (2 * dist_blocks_dim + 1) / 8;
plan = distribute(dist_blocks_n, 512, devs);
{ // align across CLUSTER_DISTANCES_BLOCK_SIZE horizontal boundaries
uint32_t align = 0;
for (auto& p : plan) {
uint32_t offset, length;
std::tie(offset, length) = p;
offset += align;
std::get<0>(p) = offset;
uint32_t n = dist_blocks_dim;
float tmp = n + 0.5;
float d = sqrt(tmp * tmp - 2 * (offset + length));
uint32_t y = tmp - d;
uint32_t x = offset + length + (n - y) * (n - y + 1) / 2 - n * (n + 1) / 2;
if (x > 0) {
align = n - y - x;
std::get<1>(p) += align;
}
}
}
if (verbosity > 1) {
print_plan("plan_calc_cluster_distances", plan);
}
INFO("calculating the centroid distance matrix...\n");
FOR_EACH_DEVI(
uint32_t offset, length;
std::tie(offset, length) = plan[devi];
if (length == 0) {
continue;
}
dim3 block(CLUSTER_DISTANCES_BLOCK_SIZE, 1, 1);
dim3 grid(length, 1, 1);
KERNEL_SWITCH(knn_calc_cluster_distances, <<<grid, block>>>(
offset, reinterpret_cast<const F*>(centroids[devi].get()),
(*distances)[devi].get()));
);
FOR_EACH_DEVI(
uint32_t y_start, y_finish;
{
uint32_t offset, length;
std::tie(offset, length) = plan[devi];
float tmp = dist_blocks_dim + 0.5;
float d = sqrt(tmp * tmp - 2 * offset);
y_start = tmp - d;
d = sqrt(tmp * tmp - 2 * (offset + length));
y_finish = tmp - d;
}
if (y_finish == y_start) {
continue;
}
uint32_t p_offset = y_start * h_clusters_size * CLUSTER_DISTANCES_BLOCK_SIZE;
uint32_t p_size = (y_finish - y_start) * h_clusters_size * CLUSTER_DISTANCES_BLOCK_SIZE;
p_size = std::min(p_size, h_clusters_size * h_clusters_size - p_offset);
FOR_OTHER_DEVS(
CUP2P(distances, p_offset, p_size);
);
);
FOR_EACH_DEVI(
dim3 block(CLUSTER_DISTANCES_BLOCK_SIZE, 1, 1);
dim3 grid(dist_blocks_n, 1, 1);
knn_mirror_cluster_distances<<<grid, block>>>((*distances)[devi].get());
);
plan = distribute(h_samples_size, h_features_size * sizeof(float), devs);
INFO("searching for the nearest neighbors...\n");
FOR_EACH_DEVI(
uint32_t offset, length;
std::tie(offset, length) = plan[devi];
if (knn_cuda_neighbors_mem_multiplier(k, devs[devi], 1) == 2) {
dim3 block(KNN_BLOCK_SIZE_GMEM, 1, 1);
dim3 grid(upper(h_samples_size, block.x), 1, 1);
KERNEL_SWITCH(knn_assign_gmem, <<<grid, block>>>(
offset, length, k, (*distances)[devi].get(), (*radiuses)[devi].get(),
reinterpret_cast<const F*>(samples[devi].get()),
reinterpret_cast<const F*>(centroids[devi].get()),
assignments[devi].get(), inv_asses[devi].get(),
inv_asses_offsets[devi].get(), (*neighbors)[devi].get()));
knn_assign_gmem_deinterleave1<<<grid, block>>>(
length, k, (*neighbors)[devi].get());
dim3 grid2(upper(h_samples_size, 2 * block.x), 1, 1);
knn_assign_gmem_deinterleave2<<<grid2, block>>>(
length, k, (*neighbors)[devi].get());
} else {
dim3 block(KNN_BLOCK_SIZE_SHMEM, 1, 1);
dim3 grid(upper(h_samples_size, block.x), 1, 1);
KERNEL_SWITCH(
knn_assign_shmem,
<<<grid, block, KNN_BLOCK_SIZE_SHMEM * 2 * k * sizeof(uint32_t)>>>(
offset, length, k, (*distances)[devi].get(), (*radiuses)[devi].get(),
reinterpret_cast<const F*>(samples[devi].get()),
reinterpret_cast<const F*>(centroids[devi].get()),
assignments[devi].get(), inv_asses[devi].get(),
inv_asses_offsets[devi].get(), (*neighbors)[devi].get()));
}
);
uint64_t dists_calced = 0;
FOR_EACH_DEV(
uint64_t h_dists_calced = 0;
CUCH(cudaMemcpyFromSymbol(&h_dists_calced, d_dists_calced, sizeof(h_dists_calced)),
kmcudaMemoryCopyError);
DEBUG("#%d dists_calced: %" PRIu64 "\n", dev, h_dists_calced);
dists_calced += h_dists_calced;
);
uint64_t max_dists_calced = static_cast<uint64_t>(h_samples_size) * h_samples_size;
INFO("calculated %f of all the distances\n", (dists_calced + .0) / max_dists_calced);
return kmcudaSuccess;
}
} // extern "C"
|
34400fbcbbc89128903ec5af3440cac282643b7e.hip | // !!! This is a file automatically generated by hipify!!!
#include "../matmpidensecupm.hpp"
using namespace Petsc::mat::cupm;
using Petsc::device::cupm::DeviceType;
static constexpr impl::MatDense_MPI_CUPM<DeviceType::CUDA> mat_cupm{};
/*MC
MATDENSECUDA - "densecuda" - A matrix type to be used for dense matrices on GPUs.
This matrix type is identical to `MATSEQDENSECUDA` when constructed with a single process
communicator, and `MATMPIDENSECUDA` otherwise.
Options Database Key:
. -mat_type densecuda - sets the matrix type to `MATDENSECUDA` during a call to
`MatSetFromOptions()`
Level: beginner
.seealso: [](ch_matrices), `Mat`, `MATSEQDENSECUDA`, `MATMPIDENSECUDA`, `MATSEQDENSEHIP`,
`MATMPIDENSEHIP`, `MATDENSE`
M*/
/*MC
MATMPIDENSECUDA - "mpidensecuda" - A matrix type to be used for distributed dense matrices on
GPUs.
Options Database Key:
. -mat_type mpidensecuda - sets the matrix type to `MATMPIDENSECUDA` during a call to
`MatSetFromOptions()`
Level: beginner
.seealso: [](ch_matrices), `Mat`, `MATDENSECUDA`, `MATMPIDENSE`, `MATSEQDENSE`,
`MATSEQDENSECUDA`, `MATSEQDENSEHIP`
M*/
PETSC_INTERN PetscErrorCode MatCreate_MPIDenseCUDA(Mat A)
{
PetscFunctionBegin;
PetscCall(mat_cupm.Create(A));
PetscFunctionReturn(PETSC_SUCCESS);
}
PetscErrorCode MatConvert_MPIDense_MPIDenseCUDA(Mat A, MatType type, MatReuse reuse, Mat *ret)
{
PetscFunctionBegin;
PetscCall(mat_cupm.Convert_MPIDense_MPIDenseCUPM(A, type, reuse, ret));
PetscFunctionReturn(PETSC_SUCCESS);
}
/*@C
MatCreateDenseCUDA - Creates a matrix in `MATDENSECUDA` format using CUDA.
Collective
Input Parameters:
+ comm - MPI communicator
. m - number of local rows (or `PETSC_DECIDE` to have calculated if `M` is given)
. n - number of local columns (or `PETSC_DECIDE` to have calculated if `N` is given)
. M - number of global rows (or `PETSC_DECIDE` to have calculated if `m` is given)
. N - number of global columns (or `PETSC_DECIDE` to have calculated if `n` is given)
- data - optional location of GPU matrix data. Pass `NULL` to have PETSc to control matrix memory allocation.
Output Parameter:
. A - the matrix
Level: intermediate
.seealso: `MATDENSECUDA`, `MatCreate()`, `MatCreateDense()`
@*/
PetscErrorCode MatCreateDenseCUDA(MPI_Comm comm, PetscInt m, PetscInt n, PetscInt M, PetscInt N, PetscScalar *data, Mat *A)
{
PetscFunctionBegin;
PetscCall(MatCreateDenseCUPM<DeviceType::CUDA>(comm, m, n, M, N, data, A));
PetscFunctionReturn(PETSC_SUCCESS);
}
/*@C
MatDenseCUDAPlaceArray - Allows one to replace the GPU array in a `MATDENSECUDA` matrix with an
array provided by the user. This is useful to avoid copying an array into a matrix.
Not Collective
Input Parameters:
+ mat - the matrix
- array - the array in column major order
Level: developer
Note:
You can return to the original array with a call to `MatDenseCUDAResetArray()`. The user is
responsible for freeing this array; it will not be freed when the matrix is destroyed. The
array must have been allocated with `hipMalloc()`.
.seealso: `MATDENSECUDA`, `MatDenseCUDAGetArray()`, `MatDenseCUDAResetArray()`,
`MatDenseCUDAReplaceArray()`
@*/
PetscErrorCode MatDenseCUDAPlaceArray(Mat mat, const PetscScalar *array)
{
PetscFunctionBegin;
PetscCall(MatDenseCUPMPlaceArray<DeviceType::CUDA>(mat, array));
PetscFunctionReturn(PETSC_SUCCESS);
}
/*@C
MatDenseCUDAResetArray - Resets the matrix array to that it previously had before the call to
`MatDenseCUDAPlaceArray()`
Not Collective
Input Parameter:
. mat - the matrix
Level: developer
Note:
You can only call this after a call to `MatDenseCUDAPlaceArray()`
.seealso: `MATDENSECUDA`, `MatDenseCUDAGetArray()`, `MatDenseCUDAPlaceArray()`
@*/
PetscErrorCode MatDenseCUDAResetArray(Mat mat)
{
PetscFunctionBegin;
PetscCall(MatDenseCUPMResetArray<DeviceType::CUDA>(mat));
PetscFunctionReturn(PETSC_SUCCESS);
}
/*@C
MatDenseCUDAReplaceArray - Allows one to replace the GPU array in a `MATDENSECUDA` matrix
with an array provided by the user. This is useful to avoid copying an array into a matrix.
Not Collective
Input Parameters:
+ mat - the matrix
- array - the array in column major order
Level: developer
Note:
This permanently replaces the GPU array and frees the memory associated with the old GPU
array. The memory passed in CANNOT be freed by the user. It will be freed when the matrix is
destroyed. The array should respect the matrix leading dimension.
.seealso: `MatDenseCUDAGetArray()`, `MatDenseCUDAPlaceArray()`, `MatDenseCUDAResetArray()`
@*/
PetscErrorCode MatDenseCUDAReplaceArray(Mat mat, const PetscScalar *array)
{
PetscFunctionBegin;
PetscCall(MatDenseCUPMReplaceArray<DeviceType::CUDA>(mat, array));
PetscFunctionReturn(PETSC_SUCCESS);
}
/*@C
MatDenseCUDAGetArrayWrite - Provides write access to the CUDA buffer inside a `MATDENSECUDA`
matrix.
Not Collective
Input Parameter:
. A - the matrix
Output Parameter:
. a - the GPU array in column major order
Level: developer
Notes:
The data on the GPU may not be updated due to operations done on the CPU. If you need updated
data, use `MatDenseCUDAGetArray()`.
The array must be restored with `MatDenseCUDARestoreArrayWrite()` when no longer needed.
.seealso: `MATDENSECUDA`, `MatDenseCUDAGetArray()`, `MatDenseCUDARestoreArray()`,
`MatDenseCUDARestoreArrayWrite()`, `MatDenseCUDAGetArrayRead()`,
`MatDenseCUDARestoreArrayRead()`
@*/
PetscErrorCode MatDenseCUDAGetArrayWrite(Mat A, PetscScalar **a)
{
PetscFunctionBegin;
PetscCall(MatDenseCUPMGetArrayWrite<DeviceType::CUDA>(A, a));
PetscFunctionReturn(PETSC_SUCCESS);
}
/*@C
MatDenseCUDARestoreArrayWrite - Restore write access to the CUDA buffer inside a
`MATDENSECUDA` matrix previously obtained with `MatDenseCUDAGetArrayWrite()`.
Not Collective
Input Parameters:
+ A - the matrix
- a - the GPU array in column major order
Level: developer
.seealso: `MATDENSECUDA`, `MatDenseCUDAGetArray()`, `MatDenseCUDARestoreArray()`,
`MatDenseCUDAGetArrayWrite()`, `MatDenseCUDARestoreArrayRead()`, `MatDenseCUDAGetArrayRead()`
@*/
PetscErrorCode MatDenseCUDARestoreArrayWrite(Mat A, PetscScalar **a)
{
PetscFunctionBegin;
PetscCall(MatDenseCUPMRestoreArrayWrite<DeviceType::CUDA>(A, a));
PetscFunctionReturn(PETSC_SUCCESS);
}
/*@C
MatDenseCUDAGetArrayRead - Provides read-only access to the CUDA buffer inside a
`MATDENSECUDA` matrix. The array must be restored with `MatDenseCUDARestoreArrayRead()` when
no longer needed.
Not Collective
Input Parameter:
. A - the matrix
Output Parameter:
. a - the GPU array in column major order
Level: developer
Note:
Data may be copied to the GPU due to operations done on the CPU. If you need write only
access, use `MatDenseCUDAGetArrayWrite()`.
.seealso: `MATDENSECUDA`, `MatDenseCUDAGetArray()`, `MatDenseCUDARestoreArray()`,
`MatDenseCUDARestoreArrayWrite()`, `MatDenseCUDAGetArrayWrite()`,
`MatDenseCUDARestoreArrayRead()`
@*/
PetscErrorCode MatDenseCUDAGetArrayRead(Mat A, const PetscScalar **a)
{
PetscFunctionBegin;
PetscCall(MatDenseCUPMGetArrayRead<DeviceType::CUDA>(A, a));
PetscFunctionReturn(PETSC_SUCCESS);
}
/*@C
MatDenseCUDARestoreArrayRead - Restore read-only access to the CUDA buffer inside a
`MATDENSECUDA` matrix previously obtained with a call to `MatDenseCUDAGetArrayRead()`.
Not Collective
Input Parameters:
+ A - the matrix
- a - the GPU array in column major order
Level: developer
Note:
Data can be copied to the GPU due to operations done on the CPU. If you need write only
access, use `MatDenseCUDAGetArrayWrite()`.
.seealso: `MATDENSECUDA`, `MatDenseCUDAGetArray()`, `MatDenseCUDARestoreArray()`,
`MatDenseCUDARestoreArrayWrite()`, `MatDenseCUDAGetArrayWrite()`, `MatDenseCUDAGetArrayRead()`
@*/
PetscErrorCode MatDenseCUDARestoreArrayRead(Mat A, const PetscScalar **a)
{
PetscFunctionBegin;
PetscCall(MatDenseCUPMRestoreArrayRead<DeviceType::CUDA>(A, a));
PetscFunctionReturn(PETSC_SUCCESS);
}
/*@C
MatDenseCUDAGetArray - Provides access to the CUDA buffer inside a `MATDENSECUDA` matrix. The
array must be restored with `MatDenseCUDARestoreArray()` when no longer needed.
Not Collective
Input Parameter:
. A - the matrix
Output Parameter:
. a - the GPU array in column major order
Level: developer
Note:
Data can be copied to the GPU due to operations done on the CPU. If you need write only
access, use `MatDenseCUDAGetArrayWrite()`. For read-only access, use
`MatDenseCUDAGetArrayRead()`.
.seealso: `MATDENSECUDA`, `MatDenseCUDAGetArrayRead()`, `MatDenseCUDARestoreArray()`,
`MatDenseCUDARestoreArrayWrite()`, `MatDenseCUDAGetArrayWrite()`,
`MatDenseCUDARestoreArrayRead()`
@*/
PetscErrorCode MatDenseCUDAGetArray(Mat A, PetscScalar **a)
{
PetscFunctionBegin;
PetscCall(MatDenseCUPMGetArray<DeviceType::CUDA>(A, a));
PetscFunctionReturn(PETSC_SUCCESS);
}
/*@C
MatDenseCUDARestoreArray - Restore access to the CUDA buffer inside a `MATDENSECUDA` matrix
previously obtained with `MatDenseCUDAGetArray()`.
Not Collective
Level: developer
Input Parameters:
+ A - the matrix
- a - the GPU array in column major order
.seealso: `MATDENSECUDA`, `MatDenseCUDAGetArray()`, `MatDenseCUDARestoreArrayWrite()`,
`MatDenseCUDAGetArrayWrite()`, `MatDenseCUDARestoreArrayRead()`, `MatDenseCUDAGetArrayRead()`
@*/
PetscErrorCode MatDenseCUDARestoreArray(Mat A, PetscScalar **a)
{
PetscFunctionBegin;
PetscCall(MatDenseCUPMRestoreArray<DeviceType::CUDA>(A, a));
PetscFunctionReturn(PETSC_SUCCESS);
}
/*@C
MatDenseCUDASetPreallocation - Set the device array used for storing the matrix elements of a
`MATDENSECUDA` matrix
Collective
Input Parameters:
+ A - the matrix
- device_array - the array (or `NULL`)
Level: intermediate
.seealso: [](ch_matrices), `Mat`, `MATDENSECUDA`, `MatCreate()`, `MatCreateDenseCUDA()`,
`MatSetValues()`, `MatDenseSetLDA()`
@*/
PetscErrorCode MatDenseCUDASetPreallocation(Mat A, PetscScalar *device_array)
{
PetscFunctionBegin;
PetscCall(MatDenseCUPMSetPreallocation<DeviceType::CUDA>(A, device_array));
PetscFunctionReturn(PETSC_SUCCESS);
}
| 34400fbcbbc89128903ec5af3440cac282643b7e.cu | #include "../matmpidensecupm.hpp"
using namespace Petsc::mat::cupm;
using Petsc::device::cupm::DeviceType;
static constexpr impl::MatDense_MPI_CUPM<DeviceType::CUDA> mat_cupm{};
/*MC
MATDENSECUDA - "densecuda" - A matrix type to be used for dense matrices on GPUs.
This matrix type is identical to `MATSEQDENSECUDA` when constructed with a single process
communicator, and `MATMPIDENSECUDA` otherwise.
Options Database Key:
. -mat_type densecuda - sets the matrix type to `MATDENSECUDA` during a call to
`MatSetFromOptions()`
Level: beginner
.seealso: [](ch_matrices), `Mat`, `MATSEQDENSECUDA`, `MATMPIDENSECUDA`, `MATSEQDENSEHIP`,
`MATMPIDENSEHIP`, `MATDENSE`
M*/
/*MC
MATMPIDENSECUDA - "mpidensecuda" - A matrix type to be used for distributed dense matrices on
GPUs.
Options Database Key:
. -mat_type mpidensecuda - sets the matrix type to `MATMPIDENSECUDA` during a call to
`MatSetFromOptions()`
Level: beginner
.seealso: [](ch_matrices), `Mat`, `MATDENSECUDA`, `MATMPIDENSE`, `MATSEQDENSE`,
`MATSEQDENSECUDA`, `MATSEQDENSEHIP`
M*/
PETSC_INTERN PetscErrorCode MatCreate_MPIDenseCUDA(Mat A)
{
PetscFunctionBegin;
PetscCall(mat_cupm.Create(A));
PetscFunctionReturn(PETSC_SUCCESS);
}
PetscErrorCode MatConvert_MPIDense_MPIDenseCUDA(Mat A, MatType type, MatReuse reuse, Mat *ret)
{
PetscFunctionBegin;
PetscCall(mat_cupm.Convert_MPIDense_MPIDenseCUPM(A, type, reuse, ret));
PetscFunctionReturn(PETSC_SUCCESS);
}
/*@C
MatCreateDenseCUDA - Creates a matrix in `MATDENSECUDA` format using CUDA.
Collective
Input Parameters:
+ comm - MPI communicator
. m - number of local rows (or `PETSC_DECIDE` to have calculated if `M` is given)
. n - number of local columns (or `PETSC_DECIDE` to have calculated if `N` is given)
. M - number of global rows (or `PETSC_DECIDE` to have calculated if `m` is given)
. N - number of global columns (or `PETSC_DECIDE` to have calculated if `n` is given)
- data - optional location of GPU matrix data. Pass `NULL` to have PETSc to control matrix memory allocation.
Output Parameter:
. A - the matrix
Level: intermediate
.seealso: `MATDENSECUDA`, `MatCreate()`, `MatCreateDense()`
@*/
PetscErrorCode MatCreateDenseCUDA(MPI_Comm comm, PetscInt m, PetscInt n, PetscInt M, PetscInt N, PetscScalar *data, Mat *A)
{
PetscFunctionBegin;
PetscCall(MatCreateDenseCUPM<DeviceType::CUDA>(comm, m, n, M, N, data, A));
PetscFunctionReturn(PETSC_SUCCESS);
}
/*@C
MatDenseCUDAPlaceArray - Allows one to replace the GPU array in a `MATDENSECUDA` matrix with an
array provided by the user. This is useful to avoid copying an array into a matrix.
Not Collective
Input Parameters:
+ mat - the matrix
- array - the array in column major order
Level: developer
Note:
You can return to the original array with a call to `MatDenseCUDAResetArray()`. The user is
responsible for freeing this array; it will not be freed when the matrix is destroyed. The
array must have been allocated with `cudaMalloc()`.
.seealso: `MATDENSECUDA`, `MatDenseCUDAGetArray()`, `MatDenseCUDAResetArray()`,
`MatDenseCUDAReplaceArray()`
@*/
PetscErrorCode MatDenseCUDAPlaceArray(Mat mat, const PetscScalar *array)
{
PetscFunctionBegin;
PetscCall(MatDenseCUPMPlaceArray<DeviceType::CUDA>(mat, array));
PetscFunctionReturn(PETSC_SUCCESS);
}
/*@C
MatDenseCUDAResetArray - Resets the matrix array to that it previously had before the call to
`MatDenseCUDAPlaceArray()`
Not Collective
Input Parameter:
. mat - the matrix
Level: developer
Note:
You can only call this after a call to `MatDenseCUDAPlaceArray()`
.seealso: `MATDENSECUDA`, `MatDenseCUDAGetArray()`, `MatDenseCUDAPlaceArray()`
@*/
PetscErrorCode MatDenseCUDAResetArray(Mat mat)
{
PetscFunctionBegin;
PetscCall(MatDenseCUPMResetArray<DeviceType::CUDA>(mat));
PetscFunctionReturn(PETSC_SUCCESS);
}
/*@C
MatDenseCUDAReplaceArray - Allows one to replace the GPU array in a `MATDENSECUDA` matrix
with an array provided by the user. This is useful to avoid copying an array into a matrix.
Not Collective
Input Parameters:
+ mat - the matrix
- array - the array in column major order
Level: developer
Note:
This permanently replaces the GPU array and frees the memory associated with the old GPU
array. The memory passed in CANNOT be freed by the user. It will be freed when the matrix is
destroyed. The array should respect the matrix leading dimension.
.seealso: `MatDenseCUDAGetArray()`, `MatDenseCUDAPlaceArray()`, `MatDenseCUDAResetArray()`
@*/
PetscErrorCode MatDenseCUDAReplaceArray(Mat mat, const PetscScalar *array)
{
PetscFunctionBegin;
PetscCall(MatDenseCUPMReplaceArray<DeviceType::CUDA>(mat, array));
PetscFunctionReturn(PETSC_SUCCESS);
}
/*@C
MatDenseCUDAGetArrayWrite - Provides write access to the CUDA buffer inside a `MATDENSECUDA`
matrix.
Not Collective
Input Parameter:
. A - the matrix
Output Parameter:
. a - the GPU array in column major order
Level: developer
Notes:
The data on the GPU may not be updated due to operations done on the CPU. If you need updated
data, use `MatDenseCUDAGetArray()`.
The array must be restored with `MatDenseCUDARestoreArrayWrite()` when no longer needed.
.seealso: `MATDENSECUDA`, `MatDenseCUDAGetArray()`, `MatDenseCUDARestoreArray()`,
`MatDenseCUDARestoreArrayWrite()`, `MatDenseCUDAGetArrayRead()`,
`MatDenseCUDARestoreArrayRead()`
@*/
PetscErrorCode MatDenseCUDAGetArrayWrite(Mat A, PetscScalar **a)
{
PetscFunctionBegin;
PetscCall(MatDenseCUPMGetArrayWrite<DeviceType::CUDA>(A, a));
PetscFunctionReturn(PETSC_SUCCESS);
}
/*@C
MatDenseCUDARestoreArrayWrite - Restore write access to the CUDA buffer inside a
`MATDENSECUDA` matrix previously obtained with `MatDenseCUDAGetArrayWrite()`.
Not Collective
Input Parameters:
+ A - the matrix
- a - the GPU array in column major order
Level: developer
.seealso: `MATDENSECUDA`, `MatDenseCUDAGetArray()`, `MatDenseCUDARestoreArray()`,
`MatDenseCUDAGetArrayWrite()`, `MatDenseCUDARestoreArrayRead()`, `MatDenseCUDAGetArrayRead()`
@*/
PetscErrorCode MatDenseCUDARestoreArrayWrite(Mat A, PetscScalar **a)
{
PetscFunctionBegin;
PetscCall(MatDenseCUPMRestoreArrayWrite<DeviceType::CUDA>(A, a));
PetscFunctionReturn(PETSC_SUCCESS);
}
/*@C
MatDenseCUDAGetArrayRead - Provides read-only access to the CUDA buffer inside a
`MATDENSECUDA` matrix. The array must be restored with `MatDenseCUDARestoreArrayRead()` when
no longer needed.
Not Collective
Input Parameter:
. A - the matrix
Output Parameter:
. a - the GPU array in column major order
Level: developer
Note:
Data may be copied to the GPU due to operations done on the CPU. If you need write only
access, use `MatDenseCUDAGetArrayWrite()`.
.seealso: `MATDENSECUDA`, `MatDenseCUDAGetArray()`, `MatDenseCUDARestoreArray()`,
`MatDenseCUDARestoreArrayWrite()`, `MatDenseCUDAGetArrayWrite()`,
`MatDenseCUDARestoreArrayRead()`
@*/
PetscErrorCode MatDenseCUDAGetArrayRead(Mat A, const PetscScalar **a)
{
PetscFunctionBegin;
PetscCall(MatDenseCUPMGetArrayRead<DeviceType::CUDA>(A, a));
PetscFunctionReturn(PETSC_SUCCESS);
}
/*@C
MatDenseCUDARestoreArrayRead - Restore read-only access to the CUDA buffer inside a
`MATDENSECUDA` matrix previously obtained with a call to `MatDenseCUDAGetArrayRead()`.
Not Collective
Input Parameters:
+ A - the matrix
- a - the GPU array in column major order
Level: developer
Note:
Data can be copied to the GPU due to operations done on the CPU. If you need write only
access, use `MatDenseCUDAGetArrayWrite()`.
.seealso: `MATDENSECUDA`, `MatDenseCUDAGetArray()`, `MatDenseCUDARestoreArray()`,
`MatDenseCUDARestoreArrayWrite()`, `MatDenseCUDAGetArrayWrite()`, `MatDenseCUDAGetArrayRead()`
@*/
PetscErrorCode MatDenseCUDARestoreArrayRead(Mat A, const PetscScalar **a)
{
PetscFunctionBegin;
PetscCall(MatDenseCUPMRestoreArrayRead<DeviceType::CUDA>(A, a));
PetscFunctionReturn(PETSC_SUCCESS);
}
/*@C
MatDenseCUDAGetArray - Provides access to the CUDA buffer inside a `MATDENSECUDA` matrix. The
array must be restored with `MatDenseCUDARestoreArray()` when no longer needed.
Not Collective
Input Parameter:
. A - the matrix
Output Parameter:
. a - the GPU array in column major order
Level: developer
Note:
Data can be copied to the GPU due to operations done on the CPU. If you need write only
access, use `MatDenseCUDAGetArrayWrite()`. For read-only access, use
`MatDenseCUDAGetArrayRead()`.
.seealso: `MATDENSECUDA`, `MatDenseCUDAGetArrayRead()`, `MatDenseCUDARestoreArray()`,
`MatDenseCUDARestoreArrayWrite()`, `MatDenseCUDAGetArrayWrite()`,
`MatDenseCUDARestoreArrayRead()`
@*/
PetscErrorCode MatDenseCUDAGetArray(Mat A, PetscScalar **a)
{
PetscFunctionBegin;
PetscCall(MatDenseCUPMGetArray<DeviceType::CUDA>(A, a));
PetscFunctionReturn(PETSC_SUCCESS);
}
/*@C
MatDenseCUDARestoreArray - Restore access to the CUDA buffer inside a `MATDENSECUDA` matrix
previously obtained with `MatDenseCUDAGetArray()`.
Not Collective
Level: developer
Input Parameters:
+ A - the matrix
- a - the GPU array in column major order
.seealso: `MATDENSECUDA`, `MatDenseCUDAGetArray()`, `MatDenseCUDARestoreArrayWrite()`,
`MatDenseCUDAGetArrayWrite()`, `MatDenseCUDARestoreArrayRead()`, `MatDenseCUDAGetArrayRead()`
@*/
PetscErrorCode MatDenseCUDARestoreArray(Mat A, PetscScalar **a)
{
PetscFunctionBegin;
PetscCall(MatDenseCUPMRestoreArray<DeviceType::CUDA>(A, a));
PetscFunctionReturn(PETSC_SUCCESS);
}
/*@C
MatDenseCUDASetPreallocation - Set the device array used for storing the matrix elements of a
`MATDENSECUDA` matrix
Collective
Input Parameters:
+ A - the matrix
- device_array - the array (or `NULL`)
Level: intermediate
.seealso: [](ch_matrices), `Mat`, `MATDENSECUDA`, `MatCreate()`, `MatCreateDenseCUDA()`,
`MatSetValues()`, `MatDenseSetLDA()`
@*/
PetscErrorCode MatDenseCUDASetPreallocation(Mat A, PetscScalar *device_array)
{
PetscFunctionBegin;
PetscCall(MatDenseCUPMSetPreallocation<DeviceType::CUDA>(A, device_array));
PetscFunctionReturn(PETSC_SUCCESS);
}
|
4a4fa7e4d62d1c1c64b179f95947accae0aca9fb.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#ifndef _ORDERGRAPH_KERNEL_H_
#define _ORDERGRAPH_KERNEL_H_
#include <stdio.h>
#include "data45.h"
__device__ void Dincr(int *bit,int n);
__device__ void DincrS(int *bit,int n);
__device__ bool D_getState(int parN,int *sta,int time);
__device__ void D_findComb(int* comb, int l, int n);
__device__ int D_findindex(int *arr, int size);
__device__ int D_C(int n, int a);
__global__ void genScoreKernel(const int sizepernode,
float *D_localscore,
const int *D_data,
const float *D_LG)
{
int id=blockIdx.x*256+threadIdx.x;
int node,index;
bool flag;
int parent[5]={0};
int pre[NODE_N]={0};
int state[5]={0};
int i,j,parN=0,tmp,t;
int t1=0,t2=0;
float ls=0;
int Nij[STATE_N]={0};
if(id<sizepernode){
D_findComb(parent,id,NODE_N-1);
for(i=0;i<4;i++)
{
if(parent[i]>0) parN++;
}
for(node=0;node<NODE_N;node++){
j=1;
for(i=0;i<NODE_N;i++)
{
if(i!=node)pre[j++]=i;
}
for(tmp=0;tmp<parN;tmp++)
state[tmp]=0;
index=sizepernode*node+id;
//priors
t=0;
while(D_getState(parN,state,t++)){ // for get state
//printf("test %u\n",id);
ls=0;
for(tmp=0;tmp<STATE_N;tmp++)
Nij[tmp]=0;
for(t1=0;t1<DATA_N;t1++){
flag=true;
for(t2=0;t2<parN;t2++){
if(D_data[t1*NODE_N+pre[parent[t2]]]!=state[t2]) {
flag=false;
break;
}
}
if(!flag) continue;
Nij[D_data[t1*NODE_N+node]]++;
}
tmp=STATE_N-1;
for(t1=0;t1<STATE_N;t1++){
ls+=D_LG[Nij[t1]];
tmp+=Nij[t1];
}
ls-=D_LG[tmp];
ls+=D_LG[STATE_N-1];
D_localscore[index]+=ls;
}
}
}
}
__global__ void computeKernel(const int taskperthr,
const int sizepernode,
const float *D_localscore,
const bool *D_parent,
const int node,
const int total,
float *D_Score,
int *D_resP)
{
extern __shared__ float lsinblock[];
const unsigned int id = blockIdx.x*256 + threadIdx.x;
const unsigned int tid = threadIdx.x;
const unsigned int bid = blockIdx.x;
int posN=1,i,index,t,tmp;
int pre[NODE_N]={0};
int parN=0;
int bestparent[4]={0},parent[5]={-1};
float bestls=-999999999999999.f,ls;
for(i=0;i<NODE_N;i++){
if(D_parent[i]==1){pre[posN++]=i;}
}
for(i=0;i<taskperthr&&((id*taskperthr+i)<total);i++){
D_findComb(parent,id*taskperthr+i,posN);
for(parN=0;parN<4;parN++){
if(parent[parN]<0) break;
if(pre[parent[parN]]>node) parent[parN]=pre[parent[parN]];
else parent[parN]=pre[parent[parN]]+1;
}
for(tmp=parN;tmp>0;tmp--){
parent[tmp]=parent[tmp-1];
}
parent[0]=0;
index=D_findindex(parent,parN);
index+=sizepernode*node;
ls=D_localscore[index];
if(ls>bestls){
bestls=ls;
for(tmp=0;tmp<4;tmp++)
bestparent[tmp]=parent[tmp+1];
}
}
lsinblock[tid]=bestls;
__syncthreads();
for(i=128;i>=1;i/=2){
if(tid<i){
if(lsinblock[tid+i]>lsinblock[tid]&&lsinblock[tid+i]<0){
lsinblock[tid]=lsinblock[tid+i];
lsinblock[tid+i]=(float)(tid+i);
}
else if(lsinblock[tid+i]<lsinblock[tid]&&lsinblock[tid]<0){
lsinblock[tid+i]=(float)tid;
}
else if(lsinblock[tid]>0&&lsinblock[tid+i]<0){
lsinblock[tid]=lsinblock[tid+i];
lsinblock[tid+i]=(float)(tid+i);
}
else if(lsinblock[tid]<0&&lsinblock[tid+i]>0){
lsinblock[tid+i]=(float)tid;
}
}
__syncthreads();
}
__syncthreads();
if(tid==0){
D_Score[bid]=lsinblock[0];
t=0;
for(i=0;i<7&&t<128&&t>=0;i++){
t=(int)lsinblock[(int)powf(2.0,i)+t];
}
lsinblock[0]=(float)t;
}
__syncthreads();
if(tid==(int)lsinblock[0]){
for(i=0;i<4;i++){
D_resP[bid*4+i]=bestparent[i];
}
}
}
__device__ void Dincr(int *bit,int n){
while(n<=NODE_N){
bit[n]++;
if(bit[n]>=2)
{
bit[n]=0;
n++;
}
else{
break;
}
}
return;
}
__device__ void DincrS(int *bit,int n){
bit[n]++;
if(bit[n]>=STATE_N)
{
bit[n]=0;
Dincr(bit,n+1);
}
return;
}
__device__ bool D_getState(int parN,int *sta,int time){
int i,j=1;
for(i=0;i<parN;i++){
j*=STATE_N;
}
j--;
if(time>j) return false;
if(time>=1)
DincrS(sta,0);
return true;
}
__device__ void D_findComb(int* comb, int l, int n)
{
const int len = 4;
if (l == 0)
{
for (int i = 0; i < len; i++)
comb[i] = -1;
return;
}
int sum = 0;
int k = 1;
while (sum < l)
sum += D_C(n,k++);
l -= sum - D_C(n,--k);
int low = 0;
int pos = 0;
while (k > 1)
{
sum = 0;
int s = 1;
while (sum < l)
sum += D_C(n-s++,k-1);
l -= sum - D_C(n-(--s),--k);
low += s;
comb[pos++] = low;
n -= s;
}
comb[pos] = low + l;
for (int i = pos+1; i < 4; i++)
comb[i] = -1;
}
__device__ int D_findindex(int *arr, int size){ //reminder: arr[0] has to be 0 && size == array size-1 && index start from 0
int i,j,index=0;
for(i=1;i<size;i++){
index+=D_C(NODE_N-1,i);
}
for(i=1;i<=size-1;i++){
for(j=arr[i-1]+1;j<=arr[i]-1;j++){
index+=D_C(NODE_N-1-j,size-i);
}
}
index+=arr[size]-arr[size-1];
return index;
}
__device__ int D_C(int n, int a){
int i,res=1,atmp=a;
for(i=0;i<atmp;i++){
res*=n;
n--;
}
for(i=0;i<atmp;i++){
res/=a;
a--;
}
return res;
}
#endif
| 4a4fa7e4d62d1c1c64b179f95947accae0aca9fb.cu | #ifndef _ORDERGRAPH_KERNEL_H_
#define _ORDERGRAPH_KERNEL_H_
#include <stdio.h>
#include "data45.h"
__device__ void Dincr(int *bit,int n);
__device__ void DincrS(int *bit,int n);
__device__ bool D_getState(int parN,int *sta,int time);
__device__ void D_findComb(int* comb, int l, int n);
__device__ int D_findindex(int *arr, int size);
__device__ int D_C(int n, int a);
__global__ void genScoreKernel(const int sizepernode,
float *D_localscore,
const int *D_data,
const float *D_LG)
{
int id=blockIdx.x*256+threadIdx.x;
int node,index;
bool flag;
int parent[5]={0};
int pre[NODE_N]={0};
int state[5]={0};
int i,j,parN=0,tmp,t;
int t1=0,t2=0;
float ls=0;
int Nij[STATE_N]={0};
if(id<sizepernode){
D_findComb(parent,id,NODE_N-1);
for(i=0;i<4;i++)
{
if(parent[i]>0) parN++;
}
for(node=0;node<NODE_N;node++){
j=1;
for(i=0;i<NODE_N;i++)
{
if(i!=node)pre[j++]=i;
}
for(tmp=0;tmp<parN;tmp++)
state[tmp]=0;
index=sizepernode*node+id;
//priors
t=0;
while(D_getState(parN,state,t++)){ // for get state
//printf("test %u\n",id);
ls=0;
for(tmp=0;tmp<STATE_N;tmp++)
Nij[tmp]=0;
for(t1=0;t1<DATA_N;t1++){
flag=true;
for(t2=0;t2<parN;t2++){
if(D_data[t1*NODE_N+pre[parent[t2]]]!=state[t2]) {
flag=false;
break;
}
}
if(!flag) continue;
Nij[D_data[t1*NODE_N+node]]++;
}
tmp=STATE_N-1;
for(t1=0;t1<STATE_N;t1++){
ls+=D_LG[Nij[t1]];
tmp+=Nij[t1];
}
ls-=D_LG[tmp];
ls+=D_LG[STATE_N-1];
D_localscore[index]+=ls;
}
}
}
}
__global__ void computeKernel(const int taskperthr,
const int sizepernode,
const float *D_localscore,
const bool *D_parent,
const int node,
const int total,
float *D_Score,
int *D_resP)
{
extern __shared__ float lsinblock[];
const unsigned int id = blockIdx.x*256 + threadIdx.x;
const unsigned int tid = threadIdx.x;
const unsigned int bid = blockIdx.x;
int posN=1,i,index,t,tmp;
int pre[NODE_N]={0};
int parN=0;
int bestparent[4]={0},parent[5]={-1};
float bestls=-999999999999999.f,ls;
for(i=0;i<NODE_N;i++){
if(D_parent[i]==1){pre[posN++]=i;}
}
for(i=0;i<taskperthr&&((id*taskperthr+i)<total);i++){
D_findComb(parent,id*taskperthr+i,posN);
for(parN=0;parN<4;parN++){
if(parent[parN]<0) break;
if(pre[parent[parN]]>node) parent[parN]=pre[parent[parN]];
else parent[parN]=pre[parent[parN]]+1;
}
for(tmp=parN;tmp>0;tmp--){
parent[tmp]=parent[tmp-1];
}
parent[0]=0;
index=D_findindex(parent,parN);
index+=sizepernode*node;
ls=D_localscore[index];
if(ls>bestls){
bestls=ls;
for(tmp=0;tmp<4;tmp++)
bestparent[tmp]=parent[tmp+1];
}
}
lsinblock[tid]=bestls;
__syncthreads();
for(i=128;i>=1;i/=2){
if(tid<i){
if(lsinblock[tid+i]>lsinblock[tid]&&lsinblock[tid+i]<0){
lsinblock[tid]=lsinblock[tid+i];
lsinblock[tid+i]=(float)(tid+i);
}
else if(lsinblock[tid+i]<lsinblock[tid]&&lsinblock[tid]<0){
lsinblock[tid+i]=(float)tid;
}
else if(lsinblock[tid]>0&&lsinblock[tid+i]<0){
lsinblock[tid]=lsinblock[tid+i];
lsinblock[tid+i]=(float)(tid+i);
}
else if(lsinblock[tid]<0&&lsinblock[tid+i]>0){
lsinblock[tid+i]=(float)tid;
}
}
__syncthreads();
}
__syncthreads();
if(tid==0){
D_Score[bid]=lsinblock[0];
t=0;
for(i=0;i<7&&t<128&&t>=0;i++){
t=(int)lsinblock[(int)powf(2.0,i)+t];
}
lsinblock[0]=(float)t;
}
__syncthreads();
if(tid==(int)lsinblock[0]){
for(i=0;i<4;i++){
D_resP[bid*4+i]=bestparent[i];
}
}
}
__device__ void Dincr(int *bit,int n){
while(n<=NODE_N){
bit[n]++;
if(bit[n]>=2)
{
bit[n]=0;
n++;
}
else{
break;
}
}
return;
}
__device__ void DincrS(int *bit,int n){
bit[n]++;
if(bit[n]>=STATE_N)
{
bit[n]=0;
Dincr(bit,n+1);
}
return;
}
__device__ bool D_getState(int parN,int *sta,int time){
int i,j=1;
for(i=0;i<parN;i++){
j*=STATE_N;
}
j--;
if(time>j) return false;
if(time>=1)
DincrS(sta,0);
return true;
}
__device__ void D_findComb(int* comb, int l, int n)
{
const int len = 4;
if (l == 0)
{
for (int i = 0; i < len; i++)
comb[i] = -1;
return;
}
int sum = 0;
int k = 1;
while (sum < l)
sum += D_C(n,k++);
l -= sum - D_C(n,--k);
int low = 0;
int pos = 0;
while (k > 1)
{
sum = 0;
int s = 1;
while (sum < l)
sum += D_C(n-s++,k-1);
l -= sum - D_C(n-(--s),--k);
low += s;
comb[pos++] = low;
n -= s;
}
comb[pos] = low + l;
for (int i = pos+1; i < 4; i++)
comb[i] = -1;
}
__device__ int D_findindex(int *arr, int size){ //reminder: arr[0] has to be 0 && size == array size-1 && index start from 0
int i,j,index=0;
for(i=1;i<size;i++){
index+=D_C(NODE_N-1,i);
}
for(i=1;i<=size-1;i++){
for(j=arr[i-1]+1;j<=arr[i]-1;j++){
index+=D_C(NODE_N-1-j,size-i);
}
}
index+=arr[size]-arr[size-1];
return index;
}
__device__ int D_C(int n, int a){
int i,res=1,atmp=a;
for(i=0;i<atmp;i++){
res*=n;
n--;
}
for(i=0;i<atmp;i++){
res/=a;
a--;
}
return res;
}
#endif
|
9d9b219a449bc539406a4f6eed984e12975caa98.hip | // !!! This is a file automatically generated by hipify!!!
/*
-- MAGMA (version 2.5.4) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date October 2020
@author Jakub Kurzak
@author Stan Tomov
@author Mark Gates
@author Azzam Haidar
@author Ahmad Abdelfattah
*/
#include "magma_internal.h"
#define PRECISION_s
#include "gemm_template_kernel_vbatched_hip.cuh"
#include "gemm_config/sgemm_param_nn.h"
#include "gemm_config/sgemm_param_nt.h"
#include "gemm_config/sgemm_param_tn.h"
#include "gemm_config/sgemm_param_tt.h"
#define version(s,v) s ## _V_ ## v
/******************************************************************************/
extern "C" void
magmablas_sgemm_vbatched_core(
magma_trans_t transA, magma_trans_t transB,
magma_int_t* m, magma_int_t* n, magma_int_t* k,
float alpha,
float const * const * dA_array, magma_int_t* ldda,
float const * const * dB_array, magma_int_t* lddb,
float beta,
float **dC_array, magma_int_t* lddc,
magma_int_t max_m, magma_int_t max_n, magma_int_t max_k,
magma_int_t roffA, magma_int_t coffA,
magma_int_t roffB, magma_int_t coffB,
magma_int_t roffC, magma_int_t coffC,
magma_int_t spec_m, magma_int_t spec_n, magma_int_t spec_k,
magma_int_t batchCount, magma_queue_t queue )
{
if(max_m <=0 || max_n <= 0 || max_k <= 0) return;
magma_int_t shape = 0;
if (transA == MagmaNoTrans && transB == MagmaNoTrans) { shape = 0; } // nn
else if (transA == MagmaNoTrans && transB == MagmaTrans) { shape = 1; } // nt
else if (transA == MagmaNoTrans && transB == MagmaConjTrans) { shape = 2; } // nc
else if (transA == MagmaTrans && transB == MagmaNoTrans) { shape = 3; } // tn
else if (transA == MagmaTrans && transB == MagmaTrans) { shape = 4; } // tt
else if (transA == MagmaTrans && transB == MagmaConjTrans) { shape = 5; } // tc
else if (transA == MagmaConjTrans && transB == MagmaNoTrans) { shape = 6; } // cn
else if (transA == MagmaConjTrans && transB == MagmaTrans) { shape = 7; } // ct
else if (transA == MagmaConjTrans && transB == MagmaConjTrans) { shape = 8; } // cc
switch(shape)
{
case 0: // nn
{
if(max_k < 64)
{
if(max_k==8 && max_n==24)
gemm_template_vbatched_nn<float, version(NN,512), 0, 0>
(m, n, k, dA_array, ldda, dB_array, lddb, dC_array, lddc, alpha, beta, max_m, max_n, roffA, coffA, roffB, coffB, roffC, coffC, spec_m, spec_n, spec_k, batchCount, queue);
else if (max_n<32)
gemm_template_vbatched_nn<float, version(NN,510), 0, 0>
(m, n, k, dA_array, ldda, dB_array, lddb, dC_array, lddc, alpha, beta, max_m, max_n, roffA, coffA, roffB, coffB, roffC, coffC, spec_m, spec_n, spec_k, batchCount, queue);
else
gemm_template_vbatched_nn<float, version(NN,504), 0, 0>
(m, n, k, dA_array, ldda, dB_array, lddb, dC_array, lddc, alpha, beta, max_m, max_n, roffA, coffA, roffB, coffB, roffC, coffC, spec_m, spec_n, spec_k, batchCount, queue);
}
else
{
gemm_template_vbatched_nn<float, version(NN,518), 0, 0>
(m, n, k, dA_array, ldda, dB_array, lddb, dC_array, lddc, alpha, beta, max_m, max_n, roffA, coffA, roffB, coffB, roffC, coffC, spec_m, spec_n, spec_k, batchCount, queue);
}
}
break;
case 1: // nt
{
gemm_template_vbatched_nt<float, version(NT,734), 0, 0>
(m, n, k, dA_array, ldda, dB_array, lddb, dC_array, lddc, alpha, beta, max_m, max_n, roffA, coffA, roffB, coffB, roffC, coffC, spec_m, spec_n, spec_k, batchCount, queue);
}
break;
case 2: // nc
{
gemm_template_vbatched_nt<float, version(NT,734), 0, 1>
(m, n, k, dA_array, ldda, dB_array, lddb, dC_array, lddc, alpha, beta, max_m, max_n, roffA, coffA, roffB, coffB, roffC, coffC, spec_m, spec_n, spec_k, batchCount, queue);
}
break;
case 3: // tn
{
if(max_k < 64)
{
gemm_template_vbatched_tn<float, version(TN,654), 0, 0>
(m, n, k, dA_array, ldda, dB_array, lddb, dC_array, lddc, alpha, beta, max_m, max_n, roffA, coffA, roffB, coffB, roffC, coffC, spec_m, spec_n, spec_k, batchCount, queue);
}
else
{
gemm_template_vbatched_tn<float, version(TN,666), 0, 0>
(m, n, k, dA_array, ldda, dB_array, lddb, dC_array, lddc, alpha, beta, max_m, max_n, roffA, coffA, roffB, coffB, roffC, coffC, spec_m, spec_n, spec_k, batchCount, queue);
}
}
break;
case 6: // cn
{
if(max_k < 64)
{
gemm_template_vbatched_tn<float, version(TN,654), 1, 0>
(m, n, k, dA_array, ldda, dB_array, lddb, dC_array, lddc, alpha, beta, max_m, max_n, roffA, coffA, roffB, coffB, roffC, coffC, spec_m, spec_n, spec_k, batchCount, queue);
}
else
{
gemm_template_vbatched_tn<float, version(TN,666), 1, 0>
(m, n, k, dA_array, ldda, dB_array, lddb, dC_array, lddc, alpha, beta, max_m, max_n, roffA, coffA, roffB, coffB, roffC, coffC, spec_m, spec_n, spec_k, batchCount, queue);
}
}
break;
case 4: // tt
{
if(max_k < 128)
{
if(max_m < 128)
{
gemm_template_vbatched_tt<float, version(TT,275), 0, 0>
(m, n, k, dA_array, ldda, dB_array, lddb, dC_array, lddc, alpha, beta, max_m, max_n, roffA, coffA, roffB, coffB, roffC, coffC, spec_m, spec_n, spec_k, batchCount, queue);
}
else
{
gemm_template_vbatched_tt<float, version(TT,312), 0, 0>
(m, n, k, dA_array, ldda, dB_array, lddb, dC_array, lddc, alpha, beta, max_m, max_n, roffA, coffA, roffB, coffB, roffC, coffC, spec_m, spec_n, spec_k, batchCount, queue);
}
}
else
{
gemm_template_vbatched_tt<float, version(TT,312), 0, 0>
(m, n, k, dA_array, ldda, dB_array, lddb, dC_array, lddc, alpha, beta, max_m, max_n, roffA, coffA, roffB, coffB, roffC, coffC, spec_m, spec_n, spec_k, batchCount, queue);
}
}
break;
case 5: // tc
{
if(max_k < 128)
{
if(max_m < 128)
{
gemm_template_vbatched_tt<float, version(TT,275), 0, 1>
(m, n, k, dA_array, ldda, dB_array, lddb, dC_array, lddc, alpha, beta, max_m, max_n, roffA, coffA, roffB, coffB, roffC, coffC, spec_m, spec_n, spec_k, batchCount, queue);
}
else
{
gemm_template_vbatched_tt<float, version(TT,312), 0, 1>
(m, n, k, dA_array, ldda, dB_array, lddb, dC_array, lddc, alpha, beta, max_m, max_n, roffA, coffA, roffB, coffB, roffC, coffC, spec_m, spec_n, spec_k, batchCount, queue);
}
}
else
{
gemm_template_vbatched_tt<float, version(TT,312), 0, 1>
(m, n, k, dA_array, ldda, dB_array, lddb, dC_array, lddc, alpha, beta, max_m, max_n, roffA, coffA, roffB, coffB, roffC, coffC, spec_m, spec_n, spec_k, batchCount, queue);
}
}
break;
case 7: // ct
{
if(max_k < 128)
{
if(max_m < 128)
{
gemm_template_vbatched_tt<float, version(TT,275), 1, 0>
(m, n, k, dA_array, ldda, dB_array, lddb, dC_array, lddc, alpha, beta, max_m, max_n, roffA, coffA, roffB, coffB, roffC, coffC, spec_m, spec_n, spec_k, batchCount, queue);
}
else
{
gemm_template_vbatched_tt<float, version(TT,312), 1, 0>
(m, n, k, dA_array, ldda, dB_array, lddb, dC_array, lddc, alpha, beta, max_m, max_n, roffA, coffA, roffB, coffB, roffC, coffC, spec_m, spec_n, spec_k, batchCount, queue);
}
}
else
{
gemm_template_vbatched_tt<float, version(TT,312), 1, 0>
(m, n, k, dA_array, ldda, dB_array, lddb, dC_array, lddc, alpha, beta, max_m, max_n, roffA, coffA, roffB, coffB, roffC, coffC, spec_m, spec_n, spec_k, batchCount, queue);
}
}
break;
case 8: // cc
{
if(max_k < 128)
{
if(max_m < 128)
{
gemm_template_vbatched_tt<float, version(TT,275), 1, 1>
(m, n, k, dA_array, ldda, dB_array, lddb, dC_array, lddc, alpha, beta, max_m, max_n, roffA, coffA, roffB, coffB, roffC, coffC, spec_m, spec_n, spec_k, batchCount, queue);
}
else
{
gemm_template_vbatched_tt<float, version(TT,312), 1, 1>
(m, n, k, dA_array, ldda, dB_array, lddb, dC_array, lddc, alpha, beta, max_m, max_n, roffA, coffA, roffB, coffB, roffC, coffC, spec_m, spec_n, spec_k, batchCount, queue);
}
}
else
{
gemm_template_vbatched_tt<float, version(TT,312), 1, 1>
(m, n, k, dA_array, ldda, dB_array, lddb, dC_array, lddc, alpha, beta, max_m, max_n, roffA, coffA, roffB, coffB, roffC, coffC, spec_m, spec_n, spec_k, batchCount, queue);
}
}
break;
default:; // propose something
}
}
| 9d9b219a449bc539406a4f6eed984e12975caa98.cu | /*
-- MAGMA (version 2.5.4) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date October 2020
@author Jakub Kurzak
@author Stan Tomov
@author Mark Gates
@author Azzam Haidar
@author Ahmad Abdelfattah
*/
#include "magma_internal.h"
#define PRECISION_s
#include "gemm_template_kernel_vbatched.cuh"
#include "gemm_config/sgemm_param_nn.h"
#include "gemm_config/sgemm_param_nt.h"
#include "gemm_config/sgemm_param_tn.h"
#include "gemm_config/sgemm_param_tt.h"
#define version(s,v) s ## _V_ ## v
/******************************************************************************/
extern "C" void
magmablas_sgemm_vbatched_core(
magma_trans_t transA, magma_trans_t transB,
magma_int_t* m, magma_int_t* n, magma_int_t* k,
float alpha,
float const * const * dA_array, magma_int_t* ldda,
float const * const * dB_array, magma_int_t* lddb,
float beta,
float **dC_array, magma_int_t* lddc,
magma_int_t max_m, magma_int_t max_n, magma_int_t max_k,
magma_int_t roffA, magma_int_t coffA,
magma_int_t roffB, magma_int_t coffB,
magma_int_t roffC, magma_int_t coffC,
magma_int_t spec_m, magma_int_t spec_n, magma_int_t spec_k,
magma_int_t batchCount, magma_queue_t queue )
{
if(max_m <=0 || max_n <= 0 || max_k <= 0) return;
magma_int_t shape = 0;
if (transA == MagmaNoTrans && transB == MagmaNoTrans) { shape = 0; } // nn
else if (transA == MagmaNoTrans && transB == MagmaTrans) { shape = 1; } // nt
else if (transA == MagmaNoTrans && transB == MagmaConjTrans) { shape = 2; } // nc
else if (transA == MagmaTrans && transB == MagmaNoTrans) { shape = 3; } // tn
else if (transA == MagmaTrans && transB == MagmaTrans) { shape = 4; } // tt
else if (transA == MagmaTrans && transB == MagmaConjTrans) { shape = 5; } // tc
else if (transA == MagmaConjTrans && transB == MagmaNoTrans) { shape = 6; } // cn
else if (transA == MagmaConjTrans && transB == MagmaTrans) { shape = 7; } // ct
else if (transA == MagmaConjTrans && transB == MagmaConjTrans) { shape = 8; } // cc
switch(shape)
{
case 0: // nn
{
if(max_k < 64)
{
if(max_k==8 && max_n==24)
gemm_template_vbatched_nn<float, version(NN,512), 0, 0>
(m, n, k, dA_array, ldda, dB_array, lddb, dC_array, lddc, alpha, beta, max_m, max_n, roffA, coffA, roffB, coffB, roffC, coffC, spec_m, spec_n, spec_k, batchCount, queue);
else if (max_n<32)
gemm_template_vbatched_nn<float, version(NN,510), 0, 0>
(m, n, k, dA_array, ldda, dB_array, lddb, dC_array, lddc, alpha, beta, max_m, max_n, roffA, coffA, roffB, coffB, roffC, coffC, spec_m, spec_n, spec_k, batchCount, queue);
else
gemm_template_vbatched_nn<float, version(NN,504), 0, 0>
(m, n, k, dA_array, ldda, dB_array, lddb, dC_array, lddc, alpha, beta, max_m, max_n, roffA, coffA, roffB, coffB, roffC, coffC, spec_m, spec_n, spec_k, batchCount, queue);
}
else
{
gemm_template_vbatched_nn<float, version(NN,518), 0, 0>
(m, n, k, dA_array, ldda, dB_array, lddb, dC_array, lddc, alpha, beta, max_m, max_n, roffA, coffA, roffB, coffB, roffC, coffC, spec_m, spec_n, spec_k, batchCount, queue);
}
}
break;
case 1: // nt
{
gemm_template_vbatched_nt<float, version(NT,734), 0, 0>
(m, n, k, dA_array, ldda, dB_array, lddb, dC_array, lddc, alpha, beta, max_m, max_n, roffA, coffA, roffB, coffB, roffC, coffC, spec_m, spec_n, spec_k, batchCount, queue);
}
break;
case 2: // nc
{
gemm_template_vbatched_nt<float, version(NT,734), 0, 1>
(m, n, k, dA_array, ldda, dB_array, lddb, dC_array, lddc, alpha, beta, max_m, max_n, roffA, coffA, roffB, coffB, roffC, coffC, spec_m, spec_n, spec_k, batchCount, queue);
}
break;
case 3: // tn
{
if(max_k < 64)
{
gemm_template_vbatched_tn<float, version(TN,654), 0, 0>
(m, n, k, dA_array, ldda, dB_array, lddb, dC_array, lddc, alpha, beta, max_m, max_n, roffA, coffA, roffB, coffB, roffC, coffC, spec_m, spec_n, spec_k, batchCount, queue);
}
else
{
gemm_template_vbatched_tn<float, version(TN,666), 0, 0>
(m, n, k, dA_array, ldda, dB_array, lddb, dC_array, lddc, alpha, beta, max_m, max_n, roffA, coffA, roffB, coffB, roffC, coffC, spec_m, spec_n, spec_k, batchCount, queue);
}
}
break;
case 6: // cn
{
if(max_k < 64)
{
gemm_template_vbatched_tn<float, version(TN,654), 1, 0>
(m, n, k, dA_array, ldda, dB_array, lddb, dC_array, lddc, alpha, beta, max_m, max_n, roffA, coffA, roffB, coffB, roffC, coffC, spec_m, spec_n, spec_k, batchCount, queue);
}
else
{
gemm_template_vbatched_tn<float, version(TN,666), 1, 0>
(m, n, k, dA_array, ldda, dB_array, lddb, dC_array, lddc, alpha, beta, max_m, max_n, roffA, coffA, roffB, coffB, roffC, coffC, spec_m, spec_n, spec_k, batchCount, queue);
}
}
break;
case 4: // tt
{
if(max_k < 128)
{
if(max_m < 128)
{
gemm_template_vbatched_tt<float, version(TT,275), 0, 0>
(m, n, k, dA_array, ldda, dB_array, lddb, dC_array, lddc, alpha, beta, max_m, max_n, roffA, coffA, roffB, coffB, roffC, coffC, spec_m, spec_n, spec_k, batchCount, queue);
}
else
{
gemm_template_vbatched_tt<float, version(TT,312), 0, 0>
(m, n, k, dA_array, ldda, dB_array, lddb, dC_array, lddc, alpha, beta, max_m, max_n, roffA, coffA, roffB, coffB, roffC, coffC, spec_m, spec_n, spec_k, batchCount, queue);
}
}
else
{
gemm_template_vbatched_tt<float, version(TT,312), 0, 0>
(m, n, k, dA_array, ldda, dB_array, lddb, dC_array, lddc, alpha, beta, max_m, max_n, roffA, coffA, roffB, coffB, roffC, coffC, spec_m, spec_n, spec_k, batchCount, queue);
}
}
break;
case 5: // tc
{
if(max_k < 128)
{
if(max_m < 128)
{
gemm_template_vbatched_tt<float, version(TT,275), 0, 1>
(m, n, k, dA_array, ldda, dB_array, lddb, dC_array, lddc, alpha, beta, max_m, max_n, roffA, coffA, roffB, coffB, roffC, coffC, spec_m, spec_n, spec_k, batchCount, queue);
}
else
{
gemm_template_vbatched_tt<float, version(TT,312), 0, 1>
(m, n, k, dA_array, ldda, dB_array, lddb, dC_array, lddc, alpha, beta, max_m, max_n, roffA, coffA, roffB, coffB, roffC, coffC, spec_m, spec_n, spec_k, batchCount, queue);
}
}
else
{
gemm_template_vbatched_tt<float, version(TT,312), 0, 1>
(m, n, k, dA_array, ldda, dB_array, lddb, dC_array, lddc, alpha, beta, max_m, max_n, roffA, coffA, roffB, coffB, roffC, coffC, spec_m, spec_n, spec_k, batchCount, queue);
}
}
break;
case 7: // ct
{
if(max_k < 128)
{
if(max_m < 128)
{
gemm_template_vbatched_tt<float, version(TT,275), 1, 0>
(m, n, k, dA_array, ldda, dB_array, lddb, dC_array, lddc, alpha, beta, max_m, max_n, roffA, coffA, roffB, coffB, roffC, coffC, spec_m, spec_n, spec_k, batchCount, queue);
}
else
{
gemm_template_vbatched_tt<float, version(TT,312), 1, 0>
(m, n, k, dA_array, ldda, dB_array, lddb, dC_array, lddc, alpha, beta, max_m, max_n, roffA, coffA, roffB, coffB, roffC, coffC, spec_m, spec_n, spec_k, batchCount, queue);
}
}
else
{
gemm_template_vbatched_tt<float, version(TT,312), 1, 0>
(m, n, k, dA_array, ldda, dB_array, lddb, dC_array, lddc, alpha, beta, max_m, max_n, roffA, coffA, roffB, coffB, roffC, coffC, spec_m, spec_n, spec_k, batchCount, queue);
}
}
break;
case 8: // cc
{
if(max_k < 128)
{
if(max_m < 128)
{
gemm_template_vbatched_tt<float, version(TT,275), 1, 1>
(m, n, k, dA_array, ldda, dB_array, lddb, dC_array, lddc, alpha, beta, max_m, max_n, roffA, coffA, roffB, coffB, roffC, coffC, spec_m, spec_n, spec_k, batchCount, queue);
}
else
{
gemm_template_vbatched_tt<float, version(TT,312), 1, 1>
(m, n, k, dA_array, ldda, dB_array, lddb, dC_array, lddc, alpha, beta, max_m, max_n, roffA, coffA, roffB, coffB, roffC, coffC, spec_m, spec_n, spec_k, batchCount, queue);
}
}
else
{
gemm_template_vbatched_tt<float, version(TT,312), 1, 1>
(m, n, k, dA_array, ldda, dB_array, lddb, dC_array, lddc, alpha, beta, max_m, max_n, roffA, coffA, roffB, coffB, roffC, coffC, spec_m, spec_n, spec_k, batchCount, queue);
}
}
break;
default:; // propose something
}
}
|
6c991f98da49e2664d701ec5cc9a9ee89c8e7444.hip | // !!! This is a file automatically generated by hipify!!!
// PBR pt in CUDA using the Cook-Torrance model by Danny Huynh, 2017 (modified for simple animations)
#include <iostream>
#include <string>
#include "gputimer.h"
#include <device_launch_parameters.h>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <hip/device_functions.h>
#include <hip/device_functions.h>
#include <hip/hip_runtime_api.h>
#include "cutil_math.h"
#ifdef __WIN32
#include <Windows.h>
#else
#include <direct.h>
#endif
#define M_PI 3.14159265359f
#define width 1280
#define height 720
#define samples 1024
#define alpha 0.5
// Other settings
//#define CTBRDF // Uncomment to use the Cook-Torrance reflectance model
#define GLOBAL // Uncomment to use only direct lighting
#define TOTALBOUNCES 4
struct Ray {
float3 orig;
float3 dir;
__device__ Ray(float3 _orig, float3 _dir) : orig(_orig), dir(_dir) { }
};
enum Refl_t { DIFF, SPEC, REFR }; // TODO: Refractions not yet defined
struct Sphere {
float radius;
float3 pos, emis, albedo;
Refl_t refl;
__device__ float intersect_sphere(const Ray &r) const {
//float3 oc = r.orig - pos;
float3 oc = pos - r.orig;
float t, epsilon = 0.0001f;
float b = dot(r.dir, oc);
float discr = b*b - dot(r.dir, r.dir) * (dot(oc, oc) - radius*radius);
//float discr = b*b - dot(oc, oc) + radius*radius;
// if discriminant is not negative, then there is an intersection
if (discr < 0) { return 0; }
else { discr = sqrtf(discr); }
return (t = b - discr) > epsilon ? t : ((t = b + discr) > epsilon ? t : 0);
}
};
// SCENE
// { float radius, { float3 position }, { float3 emission }, { float3 colour }, refl_type }
__device__ Sphere spheres[] =
{
{ 1e5f, { 1e5f + 1.0f, 40.8f, 81.6f }, { 0.0f, 0.0f, 0.0f }, { 0.85f, 0.35f, 0.35f }, DIFF }, //Left
{ 1e5f, { -1e5f + 99.0f, 40.8f, 81.6f }, { 0.0f, 0.0f, 0.0f }, { .35f, .35f, .85f }, DIFF}, //Right
{ 1e5f, { 50.0f, 40.8f, 1e5f }, { 0.0f, 0.0f, 0.0f }, { .75f, .75f, .75f }, DIFF }, //Back
{ 1e5f, { 50.0f, 40.8f, -1e5f + 600.0f }, { 0.0f, 0.0f, 0.0f }, { 1.00f, 1.00f, 1.00f }, DIFF }, //Frnt
{ 1e5f, { 50.0f, 1e5f, 81.6f }, { 0.0f, 0.0f, 0.0f }, { .75f, .75f, .75f }, DIFF }, //Botm
{ 1e5f, { 50.0f, -1e5f + 81.6f, 81.6f }, { 0.0f, 0.0f, 0.0f }, { .75f, .75f, .75f }, DIFF }, //Top
{ 16.5f, { 27.0f, 16.5f, 47.0f }, { 0.0f, 0.0f, 0.0f }, { 0.9f, 0.1f, 0.1f }, DIFF }, // small sphere 1
{ 16.5f, { 73.0f, 16.5f, 78.0f }, { 0.0f, 0.0f, 0.0f }, { 0.1f, 0.3f, 1.0f }, SPEC }, // small sphere 2
{ 600.0f, { 50.0f, 681.6f - .77f, 81.6f }, { 2.0f, 1.8f, 1.6f }, { 0.0f, 0.0f, 0.0f }, DIFF } // Light
};
// check speeds if done inline
__device__ inline bool intersect_scene(const Ray &r, float &t, int &id) {
float n = sizeof(spheres)/sizeof(Sphere);
float d, inf = t = FLT_MAX;
for (int i = int(n); i >= 0; --i) {
if ( (d = spheres[i].intersect_sphere(r)) && d < t) {
t = d;
id = i;
}
}
return t < inf;
}
// random number generator from https://github.com/gz/rust-raytracer
__device__ static float getrandom(unsigned int *seed0, unsigned int *seed1) {
*seed0 = 36969 * ((*seed0) & 65535) + ((*seed0) >> 16); // hash the seeds using bitwise AND and bitshifts
*seed1 = 18000 * ((*seed1) & 65535) + ((*seed1) >> 16);
unsigned int ires = ((*seed0) << 16) + (*seed1);
// Convert to float
union {
float f;
unsigned int ui;
} res;
res.ui = (ires & 0x007fffff) | 0x40000000; // bitwise AND, bitwise OR
return (res.f - 2.f) / 2.f;
}
// For Diffuse Materials
__device__ float3 uniform_sample_hemisphere(const float &r1, const float &r2) {
// r1 = cos(theta) = y
float sintheta = sqrtf(1 - r1*r1);
float phi = 2 * M_PI * r2;
float x = sintheta * cosf(phi);
float z = sintheta * sinf(phi);
return make_float3(x, r1, z);
}
// For Specular Materials
__device__ float3 reflect_sample_hemisphere(float3 light_dir, float3 norm)
{
return light_dir - 2 * dot(light_dir, norm) * norm;
}
__device__ float reflect_coeff(float n1, float n2)
{
float f0 = ( (n1 - n2) / (n1 + n2) );
return f0 * f0;
}
// Calculates fresnel coefficient
__device__ float fresnel(float3 l, float3 norm, float n1, float n2)
{
float f0 = reflect_coeff(n1, n2);
return f0 + (1 - f0)*pow(1 - dot(l, norm), 5);
}
// Calculates proportion of microfacets pointing in direction of half-vector h
__device__ float microfacet_dist(float3 m, float3 n)
{
float cos_m = dot(m, n);
float tan_m = ( (1 - cos_m*cos_m) /cos_m );
float numer = alpha * alpha * max(0.0f, dot(m, n));
// Distribution of microfacets is
float angle = (alpha * alpha) + (tan_m * tan_m);
float denom = M_PI * pow(cos_m, 4) * angle * angle;
return numer / denom;
}
// Calculates proportion of microfacets that are masked or shadowed
__device__ float geometric_atten(float3 v, float3 l, float3 n)
{
float3 h = normalize(v + l);
float view = (2 * max(0.0f, dot(n, h)) * max(0.0f, dot(n, v))) / max(0.0f, dot(v, h));
float light = (2 * max(0.0f, dot(n, h)) * max(0.0f, dot(n, l))) / max(0.0f, dot(l, h));
return min(1.0f, min(view, light));
}
// Compute the Cook-Torrance BRDF
__device__ float ct_brdf(const float3 norm, float3 &l, const float3 nl, unsigned int *s1, unsigned int *s2)
{
// Sample unit hemisphere
// create 2 random numbers
float r1 = 2 * M_PI * getrandom(s1, s2); // pick random number on unit circle (radius = 1, circumference = 2*Pi) for azimuth
float r2 = getrandom(s1, s2); // pick random number for elevation
float r2s = sqrtf(r2);
float3 sampleLightDir = uniform_sample_hemisphere(r1, r2);
// Compute local orthonormal bases uvw at hitpoint to use for calculating random ray direction
float3 w = nl;
float3 u = normalize(cross((fabs(w.x) > .1 ? make_float3(0, 1, 0) : make_float3(1, 0, 0)), w));
float3 v = cross(w, u);
// Check which object our new light direction hits
// get Fresnel
float F = fresnel(l, norm, 1.0f, 1.2f);
// get D
float D = microfacet_dist(l, norm);
// get G
float G = geometric_atten(v, l, norm);
float fr = (F * D * G) / (4 * dot(l, norm) * dot(v, norm));
// Set the sampled light direction as the new incident light direction
l = sampleLightDir;
return fr;
}
// Radiance function, solves rendering equation
__device__ float3 radiance(Ray &r, unsigned int *s1, unsigned int *s2)
{
//("In radiance, at x:%d y:%d \n", *s1, *s2);
float3 accucolor = make_float3(0.0f, 0.0f, 0.0f);
float3 mask = make_float3(1.0f, 1.0f, 1.0f);
// ray bounce loop (no Russian Roulette)
#ifndef GLOBAL
accucolor = make_float3(1.0f, 1.0f, 1.0f);
float3 shade = make_float3(0.1f, 0.1f, 0.1f);
for (int bounces = 0; bounces < 1; ++bounces) {
float t;
int id = 0;
if (!intersect_scene(r, t, id, spheres))
return make_float3(0.0, 0.0f, 0.0f);
const Sphere &hit = spheres[id];
float3 p = r.orig + r.dir*t;
float3 n = normalize(p - hit.pos);
//accucolor *= hit.albedo * max(dot(r.dir, n), 0.0f);
float3 nl = dot(n, r.dir) < 0 ? n : n*-1;
if (id != 8) {
accucolor *= hit.albedo * max(0.0f, dot(r.dir, -nl));
}
else {
accucolor += hit.albedo;
}
Sphere light = spheres[8];
float3 d = make_float3(light.pos.x, light.pos.y - light.radius, light.pos.z);
r.orig = p + nl*0.05f; // offset ray origin slightly to prevent self intersection
r.dir = normalize(d - p);
// Shade area is the point is being blocked by another object
if (intersect_scene(r, t, id)) {
if (id != 8) {
// Something blocking
accucolor *= shade;
}
else {
}
}
}
#else
for (int bounces = 0; bounces < TOTALBOUNCES; ++bounces) {
float t ;
int id = 0;
// if no intersection, then return black
if (!intersect_scene(r, t, id))
return make_float3(0.0f, 0.0f, 0.0f);
// else, hit something!
const Sphere &hit = spheres[id];
float3 p = r.orig + r.dir*t;
float3 n = normalize(p - hit.pos);
float3 nl = dot(n, r.dir) < 0? n : n*-1; // Flip normal if not facing camera
// Add emission of current sphere to accumulated color
accucolor += mask * hit.emis; // First term in rendering equation sum
// create 2 random numbers
float r1 = 2 * M_PI * getrandom(s1, s2); // pick random number on unit circle (radius = 1, circumference = 2*Pi) for azimuth
float r2 = getrandom(s1, s2); // pick random number for elevation
float r2s = sqrtf(r2);
// compute local orthonormal basis uvw at hitpoint to use for calculation random ray direction
// first vector = normal at hitpoint, second vector is orthogonal to first, third vector is orthogonal to first two vectors
float3 w = nl;
float3 u = normalize(cross((fabs(w.x) > .1 ? make_float3(0, 1, 0) : make_float3(1, 0, 0)), w));
float3 v = cross(w, u);
float3 d;
float diff_coeff = 0.5;
// Check object's material type
if (hit.refl == DIFF) {
// compute random ray direction on hemisphere using polar coordinates
// cosine weighted importance sampling (favours ray directions closer to normal direction)
d = normalize(u*cos(r1)*r2s + v*sin(r1)*r2s + w*sqrtf(1 - r2));
}
else if (hit.refl == SPEC) {
d = reflect_sample_hemisphere(r.dir, n);
diff_coeff = 0.2;
}
else if (hit.refl == REFR) {
// TODO
}
else { // Default at diffuse
d = normalize(u*cos(r1)*r2s + v*sin(r1)*r2s + w*sqrtf(1 - r2));
}
// new ray origin is intersection point of previous ray with scene
r.orig = p + nl*0.05f; // offset ray origin slightly to prevent self intersection
r.dir = d;
#ifdef CTBRDF
// ==============
// CT-BRDF
//
float F = fresnel(r.dir, n, 1.0f, 1.2f);
float D = microfacet_dist(r.dir, n);
float G = geometric_atten(r.dir, d, n);
float fr = (F * D * G) / (4 * dot(d, n) * dot(r.dir, n));
mask *= diff_coeff * (hit.albedo * dot(d, nl)/M_PI) + (fr * (1.0 - diff_coeff));
//mask *= hit.albedo * dot(d, nl) * (diff_coeff + fr * (1.0 - diff_coeff));
// ==============
#else
mask *= hit.albedo; // multiply with colour of object
mask *= dot(d, nl); // weigh light contribution using cosine of angle between incident light and normal
mask *= 2; // fudge factor so that we can minimize the number of iterations needed
#endif // CTBRDF or not
}
#endif // Direct vs Global Illumination
return accucolor;
}
__global__ void render_kernel(float3* output_d)
{
unsigned int x = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int y = blockIdx.y * blockDim.y + threadIdx.y;
unsigned int i = clamp((height - y - 1) * width + x, (unsigned int)0, (unsigned int) (width * height - 1));
//printf("current pixel: %d\n", i);
unsigned int s1 = x;
unsigned int s2 = y;
float3 look_from = make_float3(50, 52, 295.6);
float3 look_at = normalize(make_float3(0, -0.042612, -1));
// Set camera
Ray cam(look_from, look_at);
float3 cx = make_float3(width * .5135 / height, 0.0f, 0.0f); // ray direction offset in x direction
float3 cy = normalize(cross(cx, cam.dir)) * .5135; // ray direction offset in y direction (.5135 is FOV angle)
float3 pixel_color = make_float3(0.0f);
for (int s = 0; s < samples; ++s)
{
// Compute primary ray direction
float3 d = cam.dir + cx*((.25 + x) / width - .5) + cy*((.25 + y) / height - .5);
// Calculate the pixel color at the location
pixel_color = pixel_color + radiance(Ray(cam.orig + d * 40, normalize(d)), &s1, &s2) * (1.0 / samples);
// Forced camera rays to be pushed forward to start in interior ^^
}
//printf("after radiance: %d\n", i);
// Convert 2D to 1D
output_d[i] = make_float3(clamp(pixel_color.x, 0.0f, 1.0f), clamp(pixel_color.y, 0.0f, 1.0f), clamp(pixel_color.z, 0.0f, 1.0f));
}
// Uses the bounding box to restrict where to recalculate
__global__ void render_dynamic_kernel(float3* output_d, float3* min, float3* max)
{
__shared__ float3 color[1024];
unsigned int x = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int y = blockIdx.y * blockDim.y + threadIdx.y;
unsigned int i = clamp((height - y - 1) * width + (blockIdx.x * blockDim.x), (unsigned int)0, (unsigned int)(width * height - 1));
//unsigned int i = clamp((blockIdx.x * blockDim.x), (unsigned int)0, (unsigned int)(1024));
//printf("current pixel: %d\n", i);
unsigned int s1 = x;
unsigned int s2 = y;
float3 look_from = make_float3(50, 52, 295.6);
float3 look_at = normalize(make_float3(0, -0.042612, -1));
// Set camera
Ray cam(look_from, look_at);
float3 cx = make_float3(width * .5135 / height, 0.0f, 0.0f); // ray direction offset in x direction
float3 cy = normalize(cross(cx, cam.dir)) * .5135; // ray direction offset in y direction (.5135 is FOV angle)
float3 pixel_color = make_float3(0.0f);
// Compute primary ray direction
float3 d = cam.dir + cx*((.25 + x) / width - .5) + cy*((.25 + y) / height - .5);
// Calculate the pixel color at the location
// TODO
unsigned int offset = 0;
color[i] = color[i] + radiance(Ray(cam.orig + d * 40, normalize(d)), &s1, &s2) * (1.0 / samples);
// Forced camera rays to be pushed forward to start in interior ^^
__syncthreads();
//printf("after radiance: %d\n", i);
//output_d[i] = make_float3(clamp(color[i].x, 0.0f, 1.0f), clamp(color[i].y, 0.0f, 1.0f), clamp(color[i].z, 0.0f, 1.0f));
output_d[i] = make_float3(0.0f, 0.0f, 0.0f);
}
// Clamp values to be in range [0.0, 1.0]
inline float clamp(float x) { return x < 0.0f? 0.0f : x > 1.0f? 1.0f : x; }
// Converts RGB float in range [0, 1] to int range [0, 255], while performing gamma correction
inline int toInt(float x) { return int(pow(clamp(x), 1 / 2.2) * 255 + .5); }
// Modified the global scene array that's allocated on GPU
__global__ void move_sphere_kernel(int* dynamic_idx, float3* velocity, float* delta_t)
{
spheres[(*dynamic_idx)].pos += (*velocity) * (*delta_t);
}
int main()
{
GpuTimer timer;
float3* output_h = new float3[3 * width * height];
float3* output_d;
// allocate memory to gpu
hipMalloc((void**)&output_d, 3 * width * height * sizeof(float3));
// specify the block and grid size for CUDA threads over SMs
dim3 block(16, 16, 1);
dim3 grid(width / block.x, height / block.y, 1);
hipProfilerStart();
/* ================= Dynamic object specifications ==================
* Variables:
* dynamic_sphere: the index of the object that will be given a velocity in the scene
* velocity: the speed at which the dynamic object is to move. A float3 (x, y, z)
* delta_t: the time lapse per frame.
*/
int dynamic_sphere = 7;
float3 velocity = make_float3(-2.0f, 2.0f, 0.0f);
float delta_t = 1.0f;
// For the move kernel
int* dynamic_idx;
float3* velocity_d;
float* delta_t_d;
hipMalloc((void**)&dynamic_idx, sizeof(int));
hipMalloc((void**)&velocity_d, sizeof(float3));
hipMalloc((void**)&delta_t_d, sizeof(float));
hipMemcpy(dynamic_idx, &dynamic_sphere, sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(velocity_d, &velocity, sizeof(float3), hipMemcpyHostToDevice);
hipMemcpy(delta_t_d, &delta_t, sizeof(float), hipMemcpyHostToDevice);
// Render loop generates frames (seperate image files)
int frames = 1;
for (int i = 1; i <= frames; ++i)
{
// Launch
timer.Start();
hipLaunchKernelGGL(( render_kernel) , dim3(grid), dim3(block) , 0, 0, output_d);
//render_dynamic_kernel << < dim3(width / 1024, height / 1024, 1), 1024 >> > (output_d, spheres_d, 0, 0); // NOT WORKING PROPERLY
hipDeviceSynchronize();
timer.Stop();
printf("Render Kernel %d Time: %g ms\n", i, timer.Elapsed());
// Copy the colors back to host
hipMemcpy(output_h, output_d, width * height * sizeof(float3), hipMemcpyDeviceToHost);
// Get new name for next frame
std::string number = std::to_string(i);
std::string file_name = "pt_dynamic" + number + ".ppm";
// Write to a ppm file
FILE *myFile = fopen(file_name.c_str(), "w");
fprintf(myFile, "P3\n%d %d\n%d\n", width, height, 255);
for (int i = 0; i < width * height; ++i)
{
fprintf(myFile, "%d %d %d ", toInt(output_h[i].x),
toInt(output_h[i].y),
toInt(output_h[i].z));
}
fclose(myFile);
// Move the dynamic object based on specifed time step and velocity
hipLaunchKernelGGL(( move_sphere_kernel) , dim3(1), dim3(1) , 0, 0, dynamic_idx, velocity_d, delta_t_d);
}
hipProfilerStop();
// Free any allocated memory on GPU
hipFree(output_d);
hipFree(dynamic_idx);
hipFree(velocity_d);
hipFree(delta_t_d);
// Free allocated memory on CPU
delete[] output_h;
return 0;
}
| 6c991f98da49e2664d701ec5cc9a9ee89c8e7444.cu | // PBR pt in CUDA using the Cook-Torrance model by Danny Huynh, 2017 (modified for simple animations)
#include <iostream>
#include <string>
#include "gputimer.h"
#include <device_launch_parameters.h>
#include <cuda.h>
#include <cuda_runtime.h>
#include <device_functions.h>
#include <device_functions.h>
#include <cuda_profiler_api.h>
#include "cutil_math.h"
#ifdef __WIN32
#include <Windows.h>
#else
#include <direct.h>
#endif
#define M_PI 3.14159265359f
#define width 1280
#define height 720
#define samples 1024
#define alpha 0.5
// Other settings
//#define CTBRDF // Uncomment to use the Cook-Torrance reflectance model
#define GLOBAL // Uncomment to use only direct lighting
#define TOTALBOUNCES 4
struct Ray {
float3 orig;
float3 dir;
__device__ Ray(float3 _orig, float3 _dir) : orig(_orig), dir(_dir) { }
};
enum Refl_t { DIFF, SPEC, REFR }; // TODO: Refractions not yet defined
struct Sphere {
float radius;
float3 pos, emis, albedo;
Refl_t refl;
__device__ float intersect_sphere(const Ray &r) const {
//float3 oc = r.orig - pos;
float3 oc = pos - r.orig;
float t, epsilon = 0.0001f;
float b = dot(r.dir, oc);
float discr = b*b - dot(r.dir, r.dir) * (dot(oc, oc) - radius*radius);
//float discr = b*b - dot(oc, oc) + radius*radius;
// if discriminant is not negative, then there is an intersection
if (discr < 0) { return 0; }
else { discr = sqrtf(discr); }
return (t = b - discr) > epsilon ? t : ((t = b + discr) > epsilon ? t : 0);
}
};
// SCENE
// { float radius, { float3 position }, { float3 emission }, { float3 colour }, refl_type }
__device__ Sphere spheres[] =
{
{ 1e5f, { 1e5f + 1.0f, 40.8f, 81.6f }, { 0.0f, 0.0f, 0.0f }, { 0.85f, 0.35f, 0.35f }, DIFF }, //Left
{ 1e5f, { -1e5f + 99.0f, 40.8f, 81.6f }, { 0.0f, 0.0f, 0.0f }, { .35f, .35f, .85f }, DIFF}, //Right
{ 1e5f, { 50.0f, 40.8f, 1e5f }, { 0.0f, 0.0f, 0.0f }, { .75f, .75f, .75f }, DIFF }, //Back
{ 1e5f, { 50.0f, 40.8f, -1e5f + 600.0f }, { 0.0f, 0.0f, 0.0f }, { 1.00f, 1.00f, 1.00f }, DIFF }, //Frnt
{ 1e5f, { 50.0f, 1e5f, 81.6f }, { 0.0f, 0.0f, 0.0f }, { .75f, .75f, .75f }, DIFF }, //Botm
{ 1e5f, { 50.0f, -1e5f + 81.6f, 81.6f }, { 0.0f, 0.0f, 0.0f }, { .75f, .75f, .75f }, DIFF }, //Top
{ 16.5f, { 27.0f, 16.5f, 47.0f }, { 0.0f, 0.0f, 0.0f }, { 0.9f, 0.1f, 0.1f }, DIFF }, // small sphere 1
{ 16.5f, { 73.0f, 16.5f, 78.0f }, { 0.0f, 0.0f, 0.0f }, { 0.1f, 0.3f, 1.0f }, SPEC }, // small sphere 2
{ 600.0f, { 50.0f, 681.6f - .77f, 81.6f }, { 2.0f, 1.8f, 1.6f }, { 0.0f, 0.0f, 0.0f }, DIFF } // Light
};
// check speeds if done inline
__device__ inline bool intersect_scene(const Ray &r, float &t, int &id) {
float n = sizeof(spheres)/sizeof(Sphere);
float d, inf = t = FLT_MAX;
for (int i = int(n); i >= 0; --i) {
if ( (d = spheres[i].intersect_sphere(r)) && d < t) {
t = d;
id = i;
}
}
return t < inf;
}
// random number generator from https://github.com/gz/rust-raytracer
__device__ static float getrandom(unsigned int *seed0, unsigned int *seed1) {
*seed0 = 36969 * ((*seed0) & 65535) + ((*seed0) >> 16); // hash the seeds using bitwise AND and bitshifts
*seed1 = 18000 * ((*seed1) & 65535) + ((*seed1) >> 16);
unsigned int ires = ((*seed0) << 16) + (*seed1);
// Convert to float
union {
float f;
unsigned int ui;
} res;
res.ui = (ires & 0x007fffff) | 0x40000000; // bitwise AND, bitwise OR
return (res.f - 2.f) / 2.f;
}
// For Diffuse Materials
__device__ float3 uniform_sample_hemisphere(const float &r1, const float &r2) {
// r1 = cos(theta) = y
float sintheta = sqrtf(1 - r1*r1);
float phi = 2 * M_PI * r2;
float x = sintheta * cosf(phi);
float z = sintheta * sinf(phi);
return make_float3(x, r1, z);
}
// For Specular Materials
__device__ float3 reflect_sample_hemisphere(float3 light_dir, float3 norm)
{
return light_dir - 2 * dot(light_dir, norm) * norm;
}
__device__ float reflect_coeff(float n1, float n2)
{
float f0 = ( (n1 - n2) / (n1 + n2) );
return f0 * f0;
}
// Calculates fresnel coefficient
__device__ float fresnel(float3 l, float3 norm, float n1, float n2)
{
float f0 = reflect_coeff(n1, n2);
return f0 + (1 - f0)*pow(1 - dot(l, norm), 5);
}
// Calculates proportion of microfacets pointing in direction of half-vector h
__device__ float microfacet_dist(float3 m, float3 n)
{
float cos_m = dot(m, n);
float tan_m = ( (1 - cos_m*cos_m) /cos_m );
float numer = alpha * alpha * max(0.0f, dot(m, n));
// Distribution of microfacets is
float angle = (alpha * alpha) + (tan_m * tan_m);
float denom = M_PI * pow(cos_m, 4) * angle * angle;
return numer / denom;
}
// Calculates proportion of microfacets that are masked or shadowed
__device__ float geometric_atten(float3 v, float3 l, float3 n)
{
float3 h = normalize(v + l);
float view = (2 * max(0.0f, dot(n, h)) * max(0.0f, dot(n, v))) / max(0.0f, dot(v, h));
float light = (2 * max(0.0f, dot(n, h)) * max(0.0f, dot(n, l))) / max(0.0f, dot(l, h));
return min(1.0f, min(view, light));
}
// Compute the Cook-Torrance BRDF
__device__ float ct_brdf(const float3 norm, float3 &l, const float3 nl, unsigned int *s1, unsigned int *s2)
{
// Sample unit hemisphere
// create 2 random numbers
float r1 = 2 * M_PI * getrandom(s1, s2); // pick random number on unit circle (radius = 1, circumference = 2*Pi) for azimuth
float r2 = getrandom(s1, s2); // pick random number for elevation
float r2s = sqrtf(r2);
float3 sampleLightDir = uniform_sample_hemisphere(r1, r2);
// Compute local orthonormal bases uvw at hitpoint to use for calculating random ray direction
float3 w = nl;
float3 u = normalize(cross((fabs(w.x) > .1 ? make_float3(0, 1, 0) : make_float3(1, 0, 0)), w));
float3 v = cross(w, u);
// Check which object our new light direction hits
// get Fresnel
float F = fresnel(l, norm, 1.0f, 1.2f);
// get D
float D = microfacet_dist(l, norm);
// get G
float G = geometric_atten(v, l, norm);
float fr = (F * D * G) / (4 * dot(l, norm) * dot(v, norm));
// Set the sampled light direction as the new incident light direction
l = sampleLightDir;
return fr;
}
// Radiance function, solves rendering equation
__device__ float3 radiance(Ray &r, unsigned int *s1, unsigned int *s2)
{
//("In radiance, at x:%d y:%d \n", *s1, *s2);
float3 accucolor = make_float3(0.0f, 0.0f, 0.0f);
float3 mask = make_float3(1.0f, 1.0f, 1.0f);
// ray bounce loop (no Russian Roulette)
#ifndef GLOBAL
accucolor = make_float3(1.0f, 1.0f, 1.0f);
float3 shade = make_float3(0.1f, 0.1f, 0.1f);
for (int bounces = 0; bounces < 1; ++bounces) {
float t;
int id = 0;
if (!intersect_scene(r, t, id, spheres))
return make_float3(0.0, 0.0f, 0.0f);
const Sphere &hit = spheres[id];
float3 p = r.orig + r.dir*t;
float3 n = normalize(p - hit.pos);
//accucolor *= hit.albedo * max(dot(r.dir, n), 0.0f);
float3 nl = dot(n, r.dir) < 0 ? n : n*-1;
if (id != 8) {
accucolor *= hit.albedo * max(0.0f, dot(r.dir, -nl));
}
else {
accucolor += hit.albedo;
}
Sphere light = spheres[8];
float3 d = make_float3(light.pos.x, light.pos.y - light.radius, light.pos.z);
r.orig = p + nl*0.05f; // offset ray origin slightly to prevent self intersection
r.dir = normalize(d - p);
// Shade area is the point is being blocked by another object
if (intersect_scene(r, t, id)) {
if (id != 8) {
// Something blocking
accucolor *= shade;
}
else {
}
}
}
#else
for (int bounces = 0; bounces < TOTALBOUNCES; ++bounces) {
float t ;
int id = 0;
// if no intersection, then return black
if (!intersect_scene(r, t, id))
return make_float3(0.0f, 0.0f, 0.0f);
// else, hit something!
const Sphere &hit = spheres[id];
float3 p = r.orig + r.dir*t;
float3 n = normalize(p - hit.pos);
float3 nl = dot(n, r.dir) < 0? n : n*-1; // Flip normal if not facing camera
// Add emission of current sphere to accumulated color
accucolor += mask * hit.emis; // First term in rendering equation sum
// create 2 random numbers
float r1 = 2 * M_PI * getrandom(s1, s2); // pick random number on unit circle (radius = 1, circumference = 2*Pi) for azimuth
float r2 = getrandom(s1, s2); // pick random number for elevation
float r2s = sqrtf(r2);
// compute local orthonormal basis uvw at hitpoint to use for calculation random ray direction
// first vector = normal at hitpoint, second vector is orthogonal to first, third vector is orthogonal to first two vectors
float3 w = nl;
float3 u = normalize(cross((fabs(w.x) > .1 ? make_float3(0, 1, 0) : make_float3(1, 0, 0)), w));
float3 v = cross(w, u);
float3 d;
float diff_coeff = 0.5;
// Check object's material type
if (hit.refl == DIFF) {
// compute random ray direction on hemisphere using polar coordinates
// cosine weighted importance sampling (favours ray directions closer to normal direction)
d = normalize(u*cos(r1)*r2s + v*sin(r1)*r2s + w*sqrtf(1 - r2));
}
else if (hit.refl == SPEC) {
d = reflect_sample_hemisphere(r.dir, n);
diff_coeff = 0.2;
}
else if (hit.refl == REFR) {
// TODO
}
else { // Default at diffuse
d = normalize(u*cos(r1)*r2s + v*sin(r1)*r2s + w*sqrtf(1 - r2));
}
// new ray origin is intersection point of previous ray with scene
r.orig = p + nl*0.05f; // offset ray origin slightly to prevent self intersection
r.dir = d;
#ifdef CTBRDF
// ==============
// CT-BRDF
//
float F = fresnel(r.dir, n, 1.0f, 1.2f);
float D = microfacet_dist(r.dir, n);
float G = geometric_atten(r.dir, d, n);
float fr = (F * D * G) / (4 * dot(d, n) * dot(r.dir, n));
mask *= diff_coeff * (hit.albedo * dot(d, nl)/M_PI) + (fr * (1.0 - diff_coeff));
//mask *= hit.albedo * dot(d, nl) * (diff_coeff + fr * (1.0 - diff_coeff));
// ==============
#else
mask *= hit.albedo; // multiply with colour of object
mask *= dot(d, nl); // weigh light contribution using cosine of angle between incident light and normal
mask *= 2; // fudge factor so that we can minimize the number of iterations needed
#endif // CTBRDF or not
}
#endif // Direct vs Global Illumination
return accucolor;
}
__global__ void render_kernel(float3* output_d)
{
unsigned int x = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int y = blockIdx.y * blockDim.y + threadIdx.y;
unsigned int i = clamp((height - y - 1) * width + x, (unsigned int)0, (unsigned int) (width * height - 1));
//printf("current pixel: %d\n", i);
unsigned int s1 = x;
unsigned int s2 = y;
float3 look_from = make_float3(50, 52, 295.6);
float3 look_at = normalize(make_float3(0, -0.042612, -1));
// Set camera
Ray cam(look_from, look_at);
float3 cx = make_float3(width * .5135 / height, 0.0f, 0.0f); // ray direction offset in x direction
float3 cy = normalize(cross(cx, cam.dir)) * .5135; // ray direction offset in y direction (.5135 is FOV angle)
float3 pixel_color = make_float3(0.0f);
for (int s = 0; s < samples; ++s)
{
// Compute primary ray direction
float3 d = cam.dir + cx*((.25 + x) / width - .5) + cy*((.25 + y) / height - .5);
// Calculate the pixel color at the location
pixel_color = pixel_color + radiance(Ray(cam.orig + d * 40, normalize(d)), &s1, &s2) * (1.0 / samples);
// Forced camera rays to be pushed forward to start in interior ^^
}
//printf("after radiance: %d\n", i);
// Convert 2D to 1D
output_d[i] = make_float3(clamp(pixel_color.x, 0.0f, 1.0f), clamp(pixel_color.y, 0.0f, 1.0f), clamp(pixel_color.z, 0.0f, 1.0f));
}
// Uses the bounding box to restrict where to recalculate
__global__ void render_dynamic_kernel(float3* output_d, float3* min, float3* max)
{
__shared__ float3 color[1024];
unsigned int x = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int y = blockIdx.y * blockDim.y + threadIdx.y;
unsigned int i = clamp((height - y - 1) * width + (blockIdx.x * blockDim.x), (unsigned int)0, (unsigned int)(width * height - 1));
//unsigned int i = clamp((blockIdx.x * blockDim.x), (unsigned int)0, (unsigned int)(1024));
//printf("current pixel: %d\n", i);
unsigned int s1 = x;
unsigned int s2 = y;
float3 look_from = make_float3(50, 52, 295.6);
float3 look_at = normalize(make_float3(0, -0.042612, -1));
// Set camera
Ray cam(look_from, look_at);
float3 cx = make_float3(width * .5135 / height, 0.0f, 0.0f); // ray direction offset in x direction
float3 cy = normalize(cross(cx, cam.dir)) * .5135; // ray direction offset in y direction (.5135 is FOV angle)
float3 pixel_color = make_float3(0.0f);
// Compute primary ray direction
float3 d = cam.dir + cx*((.25 + x) / width - .5) + cy*((.25 + y) / height - .5);
// Calculate the pixel color at the location
// TODO
unsigned int offset = 0;
color[i] = color[i] + radiance(Ray(cam.orig + d * 40, normalize(d)), &s1, &s2) * (1.0 / samples);
// Forced camera rays to be pushed forward to start in interior ^^
__syncthreads();
//printf("after radiance: %d\n", i);
//output_d[i] = make_float3(clamp(color[i].x, 0.0f, 1.0f), clamp(color[i].y, 0.0f, 1.0f), clamp(color[i].z, 0.0f, 1.0f));
output_d[i] = make_float3(0.0f, 0.0f, 0.0f);
}
// Clamp values to be in range [0.0, 1.0]
inline float clamp(float x) { return x < 0.0f? 0.0f : x > 1.0f? 1.0f : x; }
// Converts RGB float in range [0, 1] to int range [0, 255], while performing gamma correction
inline int toInt(float x) { return int(pow(clamp(x), 1 / 2.2) * 255 + .5); }
// Modified the global scene array that's allocated on GPU
__global__ void move_sphere_kernel(int* dynamic_idx, float3* velocity, float* delta_t)
{
spheres[(*dynamic_idx)].pos += (*velocity) * (*delta_t);
}
int main()
{
GpuTimer timer;
float3* output_h = new float3[3 * width * height];
float3* output_d;
// allocate memory to gpu
cudaMalloc((void**)&output_d, 3 * width * height * sizeof(float3));
// specify the block and grid size for CUDA threads over SMs
dim3 block(16, 16, 1);
dim3 grid(width / block.x, height / block.y, 1);
cudaProfilerStart();
/* ================= Dynamic object specifications ==================
* Variables:
* dynamic_sphere: the index of the object that will be given a velocity in the scene
* velocity: the speed at which the dynamic object is to move. A float3 (x, y, z)
* delta_t: the time lapse per frame.
*/
int dynamic_sphere = 7;
float3 velocity = make_float3(-2.0f, 2.0f, 0.0f);
float delta_t = 1.0f;
// For the move kernel
int* dynamic_idx;
float3* velocity_d;
float* delta_t_d;
cudaMalloc((void**)&dynamic_idx, sizeof(int));
cudaMalloc((void**)&velocity_d, sizeof(float3));
cudaMalloc((void**)&delta_t_d, sizeof(float));
cudaMemcpy(dynamic_idx, &dynamic_sphere, sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(velocity_d, &velocity, sizeof(float3), cudaMemcpyHostToDevice);
cudaMemcpy(delta_t_d, &delta_t, sizeof(float), cudaMemcpyHostToDevice);
// Render loop generates frames (seperate image files)
int frames = 1;
for (int i = 1; i <= frames; ++i)
{
// Launch
timer.Start();
render_kernel <<< grid, block >>> (output_d);
//render_dynamic_kernel << < dim3(width / 1024, height / 1024, 1), 1024 >> > (output_d, spheres_d, 0, 0); // NOT WORKING PROPERLY
cudaDeviceSynchronize();
timer.Stop();
printf("Render Kernel %d Time: %g ms\n", i, timer.Elapsed());
// Copy the colors back to host
cudaMemcpy(output_h, output_d, width * height * sizeof(float3), cudaMemcpyDeviceToHost);
// Get new name for next frame
std::string number = std::to_string(i);
std::string file_name = "pt_dynamic" + number + ".ppm";
// Write to a ppm file
FILE *myFile = fopen(file_name.c_str(), "w");
fprintf(myFile, "P3\n%d %d\n%d\n", width, height, 255);
for (int i = 0; i < width * height; ++i)
{
fprintf(myFile, "%d %d %d ", toInt(output_h[i].x),
toInt(output_h[i].y),
toInt(output_h[i].z));
}
fclose(myFile);
// Move the dynamic object based on specifed time step and velocity
move_sphere_kernel <<< 1, 1 >>> (dynamic_idx, velocity_d, delta_t_d);
}
cudaProfilerStop();
// Free any allocated memory on GPU
cudaFree(output_d);
cudaFree(dynamic_idx);
cudaFree(velocity_d);
cudaFree(delta_t_d);
// Free allocated memory on CPU
delete[] output_h;
return 0;
}
|
d3d9d95d28d8c99c61fbe28d12dd9e9cdc926516.hip | // !!! This is a file automatically generated by hipify!!!
// std::system includes
#include <memory>
#include <iostream>
#include <stdio.h>
#include <time.h>
// CUDA-C includes
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
//#include <helper_cuda.h>
////////////////////////////////////////////////////////////////////////////////
// Program main
////////////////////////////////////////////////////////////////////////////////
int main(int argc, char** argv)
{
printf("test\n");
const int num_vert = 629;
char const* const inputFile = argv[1];
FILE* file = fopen(inputFile, "r");
int vert_x[num_vert];
int vert_y[num_vert];
printf("test\n");
char line[256];
int i = 0;
while (fgets(line, sizeof(line), file) != NULL) {
sscanf(line, "%d %d", &vert_x[i], &vert_y[i]);
++i;
}
fclose(file);
printf("test\n");
//Sanity check for read:
srand(time(NULL));
int rand_i = rand() % num_vert;
printf("Coordinates on random index %d are (%d,%d)\n", rand_i, vert_x[rand_i], vert_y[rand_i]);
printf("Coordinates on first index are (%d,%d)\n", vert_x[0], vert_y[0]);
printf("Coordinates on last index are (%d,%d)\n", vert_x[num_vert - 1], vert_y[num_vert - 1]);
int deviceCount = 0;
hipError_t error_id = hipGetDeviceCount(&deviceCount);
if (error_id != hipSuccess)
{
printf("hipGetDeviceCount returned %d\n-> %s\n", (int)error_id, hipGetErrorString(error_id));
printf("Result = FAIL\n");
exit(EXIT_FAILURE);
}
// This function call returns 0 if there are no CUDA capable devices.
if (deviceCount == 0)
{
printf("There are no available device(s) that support CUDA\n");
}
else
{
printf("Detected %d CUDA Capable device(s)\n", deviceCount);
}
for (int dev = 0; dev < deviceCount; ++dev)
{
hipSetDevice(dev);
hipDeviceProp_t deviceProp;
hipGetDeviceProperties(&deviceProp, dev);
printf("\nDevice %d: %s \n", dev, deviceProp.name);
printf("\nMaxThreadsPerBlock: %d \n", deviceProp.maxThreadsPerBlock);
printf("\nMaxThreadDim (%d,%d,%d) \n", deviceProp.maxThreadsDim[0], deviceProp.maxThreadsDim[1], deviceProp.maxThreadsDim[2]);
printf("\nMaxGridSize (%d,%d,%d) \n", deviceProp.maxGridSize[0], deviceProp.maxGridSize[1], deviceProp.maxGridSize[2]);
}
// finish
// hipDeviceReset causes the driver to clean up all state. While
// not mandatory in normal operation, it is good practice. It is also
// needed to ensure correct operation when the application is being
// profiled. Calling hipDeviceReset causes all profile data to be
// flushed before the application exits
hipDeviceReset();
char ch;
std::cin >> ch;
exit(EXIT_SUCCESS);
} | d3d9d95d28d8c99c61fbe28d12dd9e9cdc926516.cu | // std::system includes
#include <memory>
#include <iostream>
#include <stdio.h>
#include <time.h>
// CUDA-C includes
#include <cuda.h>
#include <cuda_runtime.h>
//#include <helper_cuda.h>
////////////////////////////////////////////////////////////////////////////////
// Program main
////////////////////////////////////////////////////////////////////////////////
int main(int argc, char** argv)
{
printf("test\n");
const int num_vert = 629;
char const* const inputFile = argv[1];
FILE* file = fopen(inputFile, "r");
int vert_x[num_vert];
int vert_y[num_vert];
printf("test\n");
char line[256];
int i = 0;
while (fgets(line, sizeof(line), file) != NULL) {
sscanf(line, "%d %d", &vert_x[i], &vert_y[i]);
++i;
}
fclose(file);
printf("test\n");
//Sanity check for read:
srand(time(NULL));
int rand_i = rand() % num_vert;
printf("Coordinates on random index %d are (%d,%d)\n", rand_i, vert_x[rand_i], vert_y[rand_i]);
printf("Coordinates on first index are (%d,%d)\n", vert_x[0], vert_y[0]);
printf("Coordinates on last index are (%d,%d)\n", vert_x[num_vert - 1], vert_y[num_vert - 1]);
int deviceCount = 0;
cudaError_t error_id = cudaGetDeviceCount(&deviceCount);
if (error_id != cudaSuccess)
{
printf("cudaGetDeviceCount returned %d\n-> %s\n", (int)error_id, cudaGetErrorString(error_id));
printf("Result = FAIL\n");
exit(EXIT_FAILURE);
}
// This function call returns 0 if there are no CUDA capable devices.
if (deviceCount == 0)
{
printf("There are no available device(s) that support CUDA\n");
}
else
{
printf("Detected %d CUDA Capable device(s)\n", deviceCount);
}
for (int dev = 0; dev < deviceCount; ++dev)
{
cudaSetDevice(dev);
cudaDeviceProp deviceProp;
cudaGetDeviceProperties(&deviceProp, dev);
printf("\nDevice %d: %s \n", dev, deviceProp.name);
printf("\nMaxThreadsPerBlock: %d \n", deviceProp.maxThreadsPerBlock);
printf("\nMaxThreadDim (%d,%d,%d) \n", deviceProp.maxThreadsDim[0], deviceProp.maxThreadsDim[1], deviceProp.maxThreadsDim[2]);
printf("\nMaxGridSize (%d,%d,%d) \n", deviceProp.maxGridSize[0], deviceProp.maxGridSize[1], deviceProp.maxGridSize[2]);
}
// finish
// cudaDeviceReset causes the driver to clean up all state. While
// not mandatory in normal operation, it is good practice. It is also
// needed to ensure correct operation when the application is being
// profiled. Calling cudaDeviceReset causes all profile data to be
// flushed before the application exits
cudaDeviceReset();
char ch;
std::cin >> ch;
exit(EXIT_SUCCESS);
} |
70250ec54c3ca4ce3a0a0674dba789e0840282e8.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// =============================================================================
// PROJECT CHRONO - http://projectchrono.org
//
// Copyright (c) 2014 projectchrono.org
// All rights reserved.
//
// Use of this source code is governed by a BSD-style license that can be found
// in the LICENSE file at the top level of the distribution and at
// http://projectchrono.org/license-chrono.txt.
//
// =============================================================================
// Author: Arman Pazouki, Wei Hu
// =============================================================================
#include <thrust/extrema.h>
#include <thrust/sort.h>
#include "chrono_fsi/physics/ChFsiForceExplicitSPH.cuh"
#include "chrono_fsi/physics/ChSphGeneral.cuh"
//================================================================================================================================
namespace chrono {
namespace fsi {
//--------------------------------------------------------------------------------------------------------------------------------
__device__ __inline__ void calc_G_Matrix(Real4* sortedPosRad,
Real3* sortedVelMas,
Real4* sortedRhoPreMu,
Real* G_i,
uint* cellStart,
uint* cellEnd,
uint* indexOfIndex) {
uint id = blockIdx.x * blockDim.x + threadIdx.x;
if (id >= numObjectsD.numAllMarkers - numObjectsD.numBoundaryMarkers)
return;
uint index = indexOfIndex[id];
Real3 posRadA = mR3(sortedPosRad[index]);
Real h_i = sortedPosRad[index].w;
Real SuppRadii = RESOLUTION_LENGTH_MULT * paramsD.HSML;
Real SqRadii = SuppRadii * SuppRadii;
// get address in grid
int3 gridPos = calcGridPos(posRadA);
// This is the elements of inverse of G
Real mGi[9] = {0.0};
// examine neighbouring cells
for (int z = -1; z <= 1; z++)
for (int y = -1; y <= 1; y++)
for (int x = -1; x <= 1; x++) {
int3 neighbourPos = gridPos + mI3(x, y, z);
uint gridHash = calcGridHash(neighbourPos);
// get start of bucket for this cell50
uint startIndex = cellStart[gridHash];
if (startIndex != 0xffffffff) { // cell is not empty
uint endIndex = cellEnd[gridHash];
for (uint j = startIndex; j < endIndex; j++) {
Real3 posRadB = mR3(sortedPosRad[j]);
Real3 rij = Distance(posRadA, posRadB);
Real dd = rij.x * rij.x + rij.y * rij.y + rij.z * rij.z;
if (dd > SqRadii || sortedRhoPreMu[j].w < -1.5)
continue;
Real3 grad_i_wij = GradWh(rij, h_i);
Real3 grw_vj = grad_i_wij * paramsD.volume0;
mGi[0] -= rij.x * grw_vj.x;
mGi[1] -= rij.x * grw_vj.y;
mGi[2] -= rij.x * grw_vj.z;
mGi[3] -= rij.y * grw_vj.x;
mGi[4] -= rij.y * grw_vj.y;
mGi[5] -= rij.y * grw_vj.z;
mGi[6] -= rij.z * grw_vj.x;
mGi[7] -= rij.z * grw_vj.y;
mGi[8] -= rij.z * grw_vj.z;
}
}
}
Real Det = (mGi[0] * mGi[4] * mGi[8] - mGi[0] * mGi[5] * mGi[7] -
mGi[1] * mGi[3] * mGi[8] + mGi[1] * mGi[5] * mGi[6] +
mGi[2] * mGi[3] * mGi[7] - mGi[2] * mGi[4] * mGi[6]);
if (abs(Det) > 0.01) {
Real OneOverDet = 1.0 / Det;
G_i[0] = (mGi[4] * mGi[8] - mGi[5] * mGi[7]) * OneOverDet;
G_i[1] = -(mGi[1] * mGi[8] - mGi[2] * mGi[7]) * OneOverDet;
G_i[2] = (mGi[1] * mGi[5] - mGi[2] * mGi[4]) * OneOverDet;
G_i[3] = -(mGi[3] * mGi[8] - mGi[5] * mGi[6]) * OneOverDet;
G_i[4] = (mGi[0] * mGi[8] - mGi[2] * mGi[6]) * OneOverDet;
G_i[5] = -(mGi[0] * mGi[5] - mGi[2] * mGi[3]) * OneOverDet;
G_i[6] = (mGi[3] * mGi[7] - mGi[4] * mGi[6]) * OneOverDet;
G_i[7] = -(mGi[0] * mGi[7] - mGi[1] * mGi[6]) * OneOverDet;
G_i[8] = (mGi[0] * mGi[4] - mGi[1] * mGi[3]) * OneOverDet;
} else {
for (int i = 0; i < 9; i++) {
G_i[i] = 0.0;
}
G_i[0] = 1;
G_i[4] = 1;
G_i[8] = 1;
}
}
//--------------------------------------------------------------------------------------------------------------------------------
__device__ __inline__ void calc_A_Matrix(Real4* sortedPosRad,
Real3* sortedVelMas,
Real4* sortedRhoPreMu,
Real* A_i,
Real* G_i,
uint* cellStart,
uint* cellEnd,
uint* indexOfIndex) {
uint id = blockIdx.x * blockDim.x + threadIdx.x;
if (id >= numObjectsD.numAllMarkers - numObjectsD.numBoundaryMarkers)
return;
uint index = indexOfIndex[id];
Real3 posRadA = mR3(sortedPosRad[index]);
Real h_i = sortedPosRad[index].w;
Real SuppRadii = RESOLUTION_LENGTH_MULT * paramsD.HSML;
Real SqRadii = SuppRadii * SuppRadii;
// get address in grid
int3 gridPos = calcGridPos(posRadA);
// examine neighbouring cells
for (int z = -1; z <= 1; z++)
for (int y = -1; y <= 1; y++)
for (int x = -1; x <= 1; x++) {
int3 neighbourPos = gridPos + mI3(x, y, z);
uint gridHash = calcGridHash(neighbourPos);
// get start of bucket for this cell50
uint startIndex = cellStart[gridHash];
if (startIndex != 0xffffffff) { // cell is not empty
uint endIndex = cellEnd[gridHash];
for (uint j = startIndex; j < endIndex; j++) {
Real3 posRadB = mR3(sortedPosRad[j]);
Real3 rij = Distance(posRadA, posRadB);
Real dd = rij.x * rij.x + rij.y * rij.y + rij.z * rij.z;
if (dd > SqRadii || sortedRhoPreMu[j].w < -1.5)
continue;
Real h_j = sortedPosRad[j].w;
Real h_ij = 0.5 * (h_j + h_i);
Real3 grad_ij = GradWh(rij, h_ij);
Real V_j = paramsD.markerMass / paramsD.rho0;
Real com_part = 0;
com_part = (G_i[0] * grad_ij.x +
G_i[1] * grad_ij.y + G_i[2] * grad_ij.z) * V_j;
A_i[0] += rij.x * rij.x * com_part; // 111
A_i[1] += rij.x * rij.y * com_part; // 112
A_i[2] += rij.x * rij.z * com_part; // 113
A_i[3] += rij.y * rij.x * com_part; // 121
A_i[4] += rij.y * rij.y * com_part; // 122
A_i[5] += rij.y * rij.z * com_part; // 123
A_i[6] += rij.z * rij.x * com_part; // 131
A_i[7] += rij.z * rij.y * com_part; // 132
A_i[8] += rij.z * rij.z * com_part; // 133
com_part = (G_i[3] * grad_ij.x +
G_i[4] * grad_ij.y + G_i[5] * grad_ij.z) * V_j;
A_i[9] += rij.x * rij.x * com_part; // 211
A_i[10] += rij.x * rij.y * com_part; // 212
A_i[11] += rij.x * rij.z * com_part; // 213
A_i[12] += rij.y * rij.x * com_part; // 221
A_i[13] += rij.y * rij.y * com_part; // 222
A_i[14] += rij.y * rij.z * com_part; // 223
A_i[15] += rij.z * rij.x * com_part; // 231
A_i[16] += rij.z * rij.y * com_part; // 232
A_i[17] += rij.z * rij.z * com_part; // 233
com_part = (G_i[6] * grad_ij.x +
G_i[7] * grad_ij.y + G_i[8] * grad_ij.z) * V_j;
A_i[18] += rij.x * rij.x * com_part; // 311
A_i[19] += rij.x * rij.y * com_part; // 312
A_i[20] += rij.x * rij.z * com_part; // 313
A_i[21] += rij.y * rij.x * com_part; // 321
A_i[22] += rij.y * rij.y * com_part; // 322
A_i[23] += rij.y * rij.z * com_part; // 323
A_i[24] += rij.z * rij.x * com_part; // 331
A_i[25] += rij.z * rij.y * com_part; // 332
A_i[26] += rij.z * rij.z * com_part; // 333
}
}
}
}
//--------------------------------------------------------------------------------------------------------------------------------
__device__ __inline__ void calc_L_Matrix(Real4* sortedPosRad,
Real3* sortedVelMas,
Real4* sortedRhoPreMu,
Real* A_i,
Real* L_i,
Real* G_i,
uint* cellStart,
uint* cellEnd,
uint* indexOfIndex) {
uint id = blockIdx.x * blockDim.x + threadIdx.x;
if (id >= numObjectsD.numAllMarkers - numObjectsD.numBoundaryMarkers)
return;
uint index = indexOfIndex[id];
Real3 posRadA = mR3(sortedPosRad[index]);
Real h_i = sortedPosRad[index].w;
Real SuppRadii = RESOLUTION_LENGTH_MULT * paramsD.HSML;
Real SqRadii = SuppRadii * SuppRadii;
Real B[36] = {0.0};
Real L[6] = {0.0};
// get address in grid
int3 gridPos = calcGridPos(posRadA);
// examine neighbouring cells
for (int z = -1; z <= 1; z++)
for (int y = -1; y <= 1; y++)
for (int x = -1; x <= 1; x++) {
int3 neighbourPos = gridPos + mI3(x, y, z);
uint gridHash = calcGridHash(neighbourPos);
// get start of bucket for this cell50
uint startIndex = cellStart[gridHash];
if (startIndex != 0xffffffff) { // cell is not empty
uint endIndex = cellEnd[gridHash];
for (uint j = startIndex; j < endIndex; j++) {
Real3 posRadB = mR3(sortedPosRad[j]);
Real3 rij = Distance(posRadA, posRadB);
Real dd = rij.x * rij.x + rij.y * rij.y + rij.z * rij.z;
if (dd > SqRadii || sortedRhoPreMu[j].w < -1.5)
continue;
Real d = length(rij);
Real3 eij = rij / d;
Real h_j = sortedPosRad[j].w;
// Real m_j = paramsD.markerMass;
Real h_ij = 0.5 * (h_j + h_i);
Real3 grad_ij = GradWh(rij, h_ij);
Real V_j = paramsD.markerMass / paramsD.rho0;
Real com_part = 0;
// mn=11
Real XX = (eij.x * grad_ij.x);
Real XY = (eij.x * grad_ij.y + eij.y * grad_ij.x);
Real XZ = (eij.x * grad_ij.z + eij.z * grad_ij.x);
Real YY = (eij.y * grad_ij.y);
Real YZ = (eij.y * grad_ij.z + eij.z * grad_ij.y);
Real ZZ = (eij.z * grad_ij.z);
com_part = (A_i[0] * eij.x + A_i[9] * eij.y +
A_i[18] * eij.z + rij.x * eij.x) * V_j;
B[6 * 0 + 0] += com_part * XX; // 11
B[6 * 0 + 1] += com_part * XY; // 12
B[6 * 0 + 2] += com_part * XZ; // 13
B[6 * 0 + 3] += com_part * YY; // 14
B[6 * 0 + 4] += com_part * YZ; // 15
B[6 * 0 + 5] += com_part * ZZ; // 15
// mn=12
com_part = (A_i[1] * eij.x + A_i[10] * eij.y +
A_i[19] * eij.z + rij.x * eij.y) * V_j;
B[6 * 1 + 0] += com_part * XX; // 21
B[6 * 1 + 1] += com_part * XY; // 22
B[6 * 1 + 2] += com_part * XZ; // 23
B[6 * 1 + 3] += com_part * YY; // 24
B[6 * 1 + 4] += com_part * YZ; // 25
B[6 * 1 + 5] += com_part * ZZ; // 25
// mn=13
com_part = (A_i[2] * eij.x + A_i[11] * eij.y +
A_i[20] * eij.z + rij.x * eij.z) * V_j;
B[6 * 2 + 0] += com_part * XX; // 31
B[6 * 2 + 1] += com_part * XY; // 32
B[6 * 2 + 2] += com_part * XZ; // 33
B[6 * 2 + 3] += com_part * YY; // 34
B[6 * 2 + 4] += com_part * YZ; // 35
B[6 * 2 + 5] += com_part * ZZ; // 36
// Note that we skip mn=21 since it is similar to mn=12
// mn=22
com_part = (A_i[4] * eij.x + A_i[13] * eij.y +
A_i[22] * eij.z + rij.y * eij.y) * V_j;
B[6 * 3 + 0] += com_part * XX; // 41
B[6 * 3 + 1] += com_part * XY; // 42
B[6 * 3 + 2] += com_part * XZ; // 43
B[6 * 3 + 3] += com_part * YY; // 44
B[6 * 3 + 4] += com_part * YZ; // 45
B[6 * 3 + 5] += com_part * ZZ; // 46
// mn=23
com_part = (A_i[5] * eij.x + A_i[14] * eij.y +
A_i[23] * eij.z + rij.y * eij.z) * V_j;
B[6 * 4 + 0] += com_part * XX; // 51
B[6 * 4 + 1] += com_part * XY; // 52
B[6 * 4 + 2] += com_part * XZ; // 53
B[6 * 4 + 3] += com_part * YY; // 54
B[6 * 4 + 4] += com_part * YZ; // 55
B[6 * 4 + 5] += com_part * ZZ; // 56
// mn=33
com_part = (A_i[8] * eij.x + A_i[17] * eij.y +
A_i[26] * eij.z + rij.z * eij.z) * V_j;
B[6 * 5 + 0] += com_part * XX; // 61
B[6 * 5 + 1] += com_part * XY; // 62
B[6 * 5 + 2] += com_part * XZ; // 63
B[6 * 5 + 3] += com_part * YY; // 64
B[6 * 5 + 4] += com_part * YZ; // 65
B[6 * 5 + 5] += com_part * ZZ; // 66
}
}
}
inv6xdelta_mn(B, L);
L_i[0] = L[0];
L_i[1] = L[1];
L_i[2] = L[2];
L_i[3] = L[1];
L_i[4] = L[3];
L_i[5] = L[4];
L_i[6] = L[2];
L_i[7] = L[4];
L_i[8] = L[5];
// Real Det = (L_i[0] * L_i[4] * L_i[8] - L_i[0] * L_i[5] * L_i[7] - L_i[1] * L_i[3] * L_i[8] +
// L_i[1] * L_i[5] * L_i[6] + L_i[2] * L_i[3] * L_i[7] - L_i[2] * L_i[4] * L_i[6]);
// if (abs(Det) < 0.01) {
// for (int i = 0; i < 9; i++) {
// L_i[0 * 9 + i] = 0.0;
// L_i[0 * 9 + 0] = 1;
// L_i[0 * 9 + 4] = 1;
// L_i[0 * 9 + 8] = 1;
// }
// }
// printf("L Det %f\n", Det);
}
//--------------------------------------------------------------------------------------------------------------------------------
__global__ void calIndexOfIndex(uint* indexOfIndex,
uint* identityOfIndex,
uint* gridMarkerIndex) {
uint id = blockIdx.x * blockDim.x + threadIdx.x;
if (id >= numObjectsD.numAllMarkers)
return;
indexOfIndex[id] = id;
if (gridMarkerIndex[id] >= numObjectsD.numFluidMarkers &&
gridMarkerIndex[id] < numObjectsD.numFluidMarkers + numObjectsD.numBoundaryMarkers) {
identityOfIndex[id] = 1;
} else {
identityOfIndex[id] = 0;
}
}
//--------------------------------------------------------------------------------------------------------------------------------
__global__ void Shear_Stress_Rate(uint* indexOfIndex,
Real4* sortedPosRad,
Real4* sortedRhoPreMu,
Real3* sortedVelMas,
Real3* velMas_ModifiedBCE,
Real4* rhoPreMu_ModifiedBCE,
Real3* sortedTauXxYyZz,
Real3* sortedTauXyXzYz,
Real3* sortedDerivTauXxYyZz,
Real3* sortedDerivTauXyXzYz,
uint* gridMarkerIndex,
uint* cellStart,
uint* cellEnd) {
uint id = blockIdx.x * blockDim.x + threadIdx.x;
if (id >= numObjectsD.numAllMarkers - numObjectsD.numBoundaryMarkers)
return;
uint index = indexOfIndex[id];
if (sortedRhoPreMu[index].w > -0.5)
return;
Real3 posRadA = mR3(sortedPosRad[index]);
Real3 velMasA = sortedVelMas[index];
Real hA = sortedPosRad[index].w;
Real SuppRadii = RESOLUTION_LENGTH_MULT * paramsD.HSML;
Real SqRadii = SuppRadii * SuppRadii;
Real tauxx = sortedTauXxYyZz[index].x;
Real tauyy = sortedTauXxYyZz[index].y;
Real tauzz = sortedTauXxYyZz[index].z;
Real tauxy = sortedTauXyXzYz[index].x;
Real tauxz = sortedTauXyXzYz[index].y;
Real tauyz = sortedTauXyXzYz[index].z;
Real tauzx = tauxz;
Real tauzy = tauyz;
Real tauyx = tauxy;
Real dTauxx = 0.0;
Real dTauyy = 0.0;
Real dTauzz = 0.0;
Real dTauxy = 0.0;
Real dTauxz = 0.0;
Real dTauyz = 0.0;
Real G_i[9] = {0.0};
calc_G_Matrix(sortedPosRad, sortedVelMas, sortedRhoPreMu, G_i, cellStart,
cellEnd, indexOfIndex);
// get address in grid
int3 gridPos = calcGridPos(posRadA);
for (int z = -1; z <= 1; z++) {
for (int y = -1; y <= 1; y++) {
for (int x = -1; x <= 1; x++) {
int3 neighbourPos = gridPos + mI3(x, y, z);
uint gridHash = calcGridHash(neighbourPos);
uint startIndex = cellStart[gridHash];
uint endIndex = cellEnd[gridHash];
for (uint j = startIndex; j < endIndex; j++) {
if (j != index) {
Real3 posRadB = mR3(sortedPosRad[j]);
Real3 dist3 = Distance(posRadA, posRadB);
Real dd = dist3.x * dist3.x + dist3.y * dist3.y + dist3.z * dist3.z;
if (dd > SqRadii)
continue;
Real3 velMasB = sortedVelMas[j];
Real4 rhoPresMuB = sortedRhoPreMu[j];
if (rhoPresMuB.w > -0.5) {
int bceIndexB = gridMarkerIndex[j] - numObjectsD.numFluidMarkers;
if (!(bceIndexB >= 0 && bceIndexB <
numObjectsD.numBoundaryMarkers + numObjectsD.numRigidMarkers + numObjectsD.numFlexMarkers)) {
printf("Error! bceIndex out of bound, collideCell !\n");
}
rhoPresMuB = rhoPreMu_ModifiedBCE[bceIndexB];
velMasB = velMas_ModifiedBCE[bceIndexB];
velMasB = 2.0 * velMasB - velMasA;
}
Real rhoB = rhoPresMuB.x;
Real mB = paramsD.markerMass;
Real3 gradW = GradWh(dist3, hA);
Real3 gradW_new;
gradW_new.x = G_i[0] * gradW.x + G_i[1] * gradW.y + G_i[2] * gradW.z;
gradW_new.y = G_i[3] * gradW.x + G_i[4] * gradW.y + G_i[5] * gradW.z;
gradW_new.z = G_i[6] * gradW.x + G_i[7] * gradW.y + G_i[8] * gradW.z;
gradW = gradW_new;
// start to calculate the rate
Real Gm = paramsD.G_shear; // shear modulus of the material
Real half_mB_over_rhoB = 0.5 * (mB / rhoB);
Real3 vAB = velMasA - velMasB;
Real3 vAB_h = (velMasA - velMasB) * half_mB_over_rhoB;
// entries of strain rate tensor
Real exx = -2.0 * vAB_h.x * gradW.x;
Real eyy = -2.0 * vAB_h.y * gradW.y;
Real ezz = -2.0 * vAB_h.z * gradW.z;
Real exy = -vAB_h.x * gradW.y - vAB_h.y * gradW.x;
Real exz = -vAB_h.x * gradW.z - vAB_h.z * gradW.x;
Real eyz = -vAB_h.y * gradW.z - vAB_h.z * gradW.y;
// entries of rotation rate (spin) tensor
// Real wxx = 0.0;
// Real wyy = 0.0;
// Real wzz = 0.0;
Real wxy = -vAB_h.x * gradW.y + vAB_h.y * gradW.x;
Real wxz = -vAB_h.x * gradW.z + vAB_h.z * gradW.x;
Real wyz = -vAB_h.y * gradW.z + vAB_h.z * gradW.y;
Real wyx = -wxy;
// Real wzx = -wxz;
Real wzy = -wyz;
Real edia = 1.0 / 3.0 * (exx + eyy + ezz);
Real twoGm = 2.0 * Gm;
Real K_edia = paramsD.K_bulk * 1.0 * edia;
dTauxx += twoGm * (exx - edia) + 2.0 * (tauxy * wxy + tauxz * wxz) + K_edia;
dTauyy += twoGm * (eyy - edia) - 2.0 * (tauyx * wxy - tauyz * wyz) + K_edia;
dTauzz += twoGm * (ezz - edia) - 2.0 * (tauzx * wxz + tauzy * wyz) + K_edia;
dTauxy += twoGm * exy - (tauxx * wxy + tauxz * wzy) + (wxy * tauyy + wxz * tauzy);
dTauxz += twoGm * exz - (tauxx * wxz + tauxy * wyz) + (wxy * tauyz + wxz * tauzz);
dTauyz += twoGm * eyz - (tauyx * wxz + tauyy * wyz) + (wyx * tauxz + wyz * tauzz);
}
}
}
}
}
sortedDerivTauXxYyZz[index] = mR3(dTauxx, dTauyy, dTauzz);
sortedDerivTauXyXzYz[index] = mR3(dTauxy, dTauxz, dTauyz);
}
//--------------------------------------------------------------------------------------------------------------------------------
__global__ void calcRho_kernel(Real4* sortedPosRad,
Real4* sortedRhoPreMu,
Real4* sortedRhoPreMu_old,
uint* cellStart,
uint* cellEnd,
int density_reinit,
volatile bool* isErrorD) {
uint index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= numObjectsD.numAllMarkers)
return;
if (sortedRhoPreMu[index].w > -0.5 && sortedRhoPreMu[index].w < 0.5)
return;
sortedRhoPreMu_old[index].y =
Eos(sortedRhoPreMu_old[index].x, sortedRhoPreMu_old[index].w);
Real3 posRadA = mR3(sortedPosRad[index]);
Real h_i = sortedPosRad[index].w;
Real SuppRadii = RESOLUTION_LENGTH_MULT * paramsD.HSML;
Real SqRadii = SuppRadii * SuppRadii;
Real sum_mW = 0;
Real sum_mW_rho = 0.0000001;
Real sum_W = 0.0;
// get address in grid
int3 gridPos = calcGridPos(posRadA);
for (int z = -1; z <= 1; z++) {
for (int y = -1; y <= 1; y++) {
for (int x = -1; x <= 1; x++) {
int3 neighbourPos = gridPos + mI3(x, y, z);
uint gridHash = calcGridHash(neighbourPos);
uint startIndex = cellStart[gridHash];
if (startIndex != 0xffffffff) {
uint endIndex = cellEnd[gridHash];
for (uint j = startIndex; j < endIndex; j++) {
Real3 posRadB = mR3(sortedPosRad[j]);
Real3 dist3 = Distance(posRadA, posRadB);
Real dd = dist3.x * dist3.x + dist3.y * dist3.y + dist3.z * dist3.z;
if (dd > SqRadii)
continue;
if (sortedRhoPreMu_old[j].w > -1.5 && sortedRhoPreMu_old[j].w < -0.5) {
Real h_j = sortedPosRad[j].w;
Real m_j = paramsD.markerMass;
Real d = length(dist3);
Real W3 = W3h(d, 0.5 * (h_j + h_i));
sum_mW += m_j * W3;
sum_W += W3;
sum_mW_rho += m_j * W3 / sortedRhoPreMu_old[j].x;
}
}
}
}
}
}
// sortedRhoPreMu[index].x = sum_mW;
if ((density_reinit == 0) &&
(sortedRhoPreMu[index].w > -1.5) && (sortedRhoPreMu[index].w < -0.5))
sortedRhoPreMu[index].x = sum_mW / sum_mW_rho;
if ((sortedRhoPreMu[index].x > 3 * paramsD.rho0 ||
sortedRhoPreMu[index].x < 0.01 * paramsD.rho0) &&
(sortedRhoPreMu[index].w > -1.5) && (sortedRhoPreMu[index].w < -0.5))
printf("(calcRho_kernel)density marker %d, sum_mW=%f, sum_W=%f, h_i=%f\n",
index, sum_mW, sum_W, h_i);
}
//--------------------------------------------------------------------------------------------------------------------------------
__global__ void calcKernelSupport(Real4* sortedPosRad,
Real4* sortedRhoPreMu,
Real3* sortedKernelSupport,
uint* cellStart,
uint* cellEnd,
volatile bool* isErrorD) {
uint index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= numObjectsD.numAllMarkers)
return;
Real h_i = sortedPosRad[index].w;
Real SuppRadii = RESOLUTION_LENGTH_MULT * paramsD.HSML;
Real SqRadii = SuppRadii * SuppRadii;
Real3 posRadA = mR3(sortedPosRad[index]);
Real W0 = W3h(0, h_i);
Real sum_W_all = W0;
Real sum_W_identical = W0;
// get address in grid
int3 gridPos = calcGridPos(posRadA);
for (int z = -1; z <= 1; z++) {
for (int y = -1; y <= 1; y++) {
for (int x = -1; x <= 1; x++) {
int3 neighbourPos = gridPos + mI3(x, y, z);
uint gridHash = calcGridHash(neighbourPos);
uint startIndex = cellStart[gridHash];
if (startIndex != 0xffffffff) {
uint endIndex = cellEnd[gridHash];
for (uint j = startIndex; j < endIndex; j++) {
Real3 posRadB = mR3(sortedPosRad[j]);
Real3 dist3 = Distance(posRadA, posRadB);
Real dd = dist3.x*dist3.x + dist3.y*dist3.y + dist3.z*dist3.z;
if (dd > SqRadii)
continue;
Real d = length(dist3);
Real h_j = sortedPosRad[j].w;
Real W3 = W3h(d, 0.5 * (h_j + h_i));
sum_W_all += W3;
if (abs(sortedRhoPreMu[index].w - sortedRhoPreMu[j].w) < 0.001) {
sum_W_identical += W3;
}
}
}
}
}
}
sortedKernelSupport[index].x = sum_W_all;
sortedKernelSupport[index].y = sum_W_identical;
}
//--------------------------------------------------------------------------------------------------------------------------------
__device__ __inline__ void modifyPressure(Real4& rhoPresMuB, const Real3& dist3Alpha) {
// body force in x direction
rhoPresMuB.y = (dist3Alpha.x > 0.5 * paramsD.boxDims.x) ?
(rhoPresMuB.y - paramsD.deltaPress.x) : rhoPresMuB.y;
rhoPresMuB.y = (dist3Alpha.x < -0.5 * paramsD.boxDims.x) ?
(rhoPresMuB.y + paramsD.deltaPress.x) : rhoPresMuB.y;
// body force in x direction
rhoPresMuB.y = (dist3Alpha.y > 0.5 * paramsD.boxDims.y) ?
(rhoPresMuB.y - paramsD.deltaPress.y) : rhoPresMuB.y;
rhoPresMuB.y = (dist3Alpha.y < -0.5 * paramsD.boxDims.y) ?
(rhoPresMuB.y + paramsD.deltaPress.y) : rhoPresMuB.y;
// body force in x direction
rhoPresMuB.y = (dist3Alpha.z > 0.5 * paramsD.boxDims.z) ?
(rhoPresMuB.y - paramsD.deltaPress.z) : rhoPresMuB.y;
rhoPresMuB.y = (dist3Alpha.z < -0.5 * paramsD.boxDims.z) ?
(rhoPresMuB.y + paramsD.deltaPress.z) : rhoPresMuB.y;
}
//--------------------------------------------------------------------------------------------------------------------------------
__device__ inline Real3 CubicSolve(Real aa, Real bb, Real cc, Real dd) {
Real disc, q, r, dum1, dum2, term1, r13;
bb /= aa;
cc /= aa;
dd /= aa;
if (aa == 0) {
return mR3(0, 0, 0);
}
if (abs(bb) < 1e-9) {
return mR3(0, 0, 0);
}
if (abs(cc) < 1e-9) {
return mR3(0, 0, 0);
}
if (abs(dd) < 1e-9) {
return mR3(0, 0, 0);
}
q = (3.0 * cc - (bb * bb)) / 9.0;
r = -(27.0 * dd) + bb * (9.0 * cc - 2.0 * (bb * bb));
r /= 54.0;
disc = q * q * q + r * r;
term1 = (bb / 3.0);
/* dataForm.x1Im.value = 0; //The first root is always real.
if (disc > 0) { // one root real, two are complex
s = r + Math.sqrt(disc);
s = ((s < 0) ? -Math.pow(-s, (1.0/3.0)) : Math.pow(s, (1.0/3.0)));
t = r - Math.sqrt(disc);
t = ((t < 0) ? -Math.pow(-t, (1.0/3.0)) : Math.pow(t, (1.0/3.0)));
dataForm.x1Re.value = -term1 + s + t;
term1 += (s + t)/2.0;
dataForm.x3Re.value = dataForm.x2Re.value = -term1;
term1 = Math.sqrt(3.0)*(-t + s)/2;
dataForm.x2Im.value = term1;
dataForm.x3Im.value = -term1;
return;
}
// End if (disc > 0)
// The remaining options are all real
dataForm.x3Im.value = dataForm.x2Im.value = 0;
if (disc == 0){ // All roots real, at least two are equal.
r13 = ((r < 0) ? -Math.pow(-r,(1.0/3.0)) : Math.pow(r,(1.0/3.0)));
dataForm.x1Re.value = -term1 + 2.0*r13;
dataForm.x3Re.value = dataForm.x2Re.value = -(r13 + term1);
return;
} // End if (disc == 0)
*/
Real xRex, xRey, xRez;
// have complex root
if (disc > 0) {
xRex = 0.0;
xRey = 0.0;
xRez = 0.0;
return mR3(xRex, xRey, xRez);
}
// All roots real, at least two are equal.
if (disc == 0) {
if (r < 0) {
r13 = pow(-r, (1.0 / 3.0));
} else {
r13 = pow(r, (1.0 / 3.0));
}
xRex = -term1 + 2.0 * r13;
xRey = -(r13 + term1);
xRez = xRey;
return mR3(xRex, xRey, xRez);
}
// All roots are real and unequal (to get here, q < 0)
q = -q;
dum1 = q * q * q;
dum2 = r / (sqrt(dum1 + 1.0e-9));
if ((dum2 >= 0) && (dum2 <= 1)) {
dum1 = acos(dum2);
} else {
xRex = 0.0;
xRey = 0.0;
xRez = 0.0;
return mR3(xRex, xRey, xRez);
}
r13 = 2.0 * sqrt(q);
xRex = -term1 + r13 * cos(dum1 / 3.0);
xRey = -term1 + r13 * cos((dum1 + 2.0 * 3.1415926) / 3.0);
xRez = -term1 + r13 * cos((dum1 + 4.0 * 3.1415926) / 3.0);
return mR3(xRex, xRey, xRez);
}
//--------------------------------------------------------------------------------------------------------------------------------
__device__ inline Real3 CubicEigen(Real4 c1, Real4 c2, Real4 c3) {
Real a = c1.x;
Real b = c1.y;
Real c = c1.z;
Real d = c1.w;
Real l = c2.x;
Real m = c2.y;
Real n = c2.z;
Real k = c2.w;
Real p = c3.x;
Real q = c3.y;
Real r = c3.z;
Real s = c3.w;
Real D = (a * m * r + b * p * n + c * l * q) - (a * n * q + b * l * r + c * m * p) + 1.0e-9;
Real x = ((b * r * k + c * m * s + d * n * q) - (b * n * s + c * q * k + d * m * r)) / D;
Real y = ((a * n * s + c * p * k + d * l * r) - (a * r * k + c * l * s + d * n * p)) / D;
Real z = ((a * q * k + b * l * s + d * m * p) - (a * m * s + b * p * k + d * l * q)) / D;
b = b + 1.0e-9;
x = 1.0e0;
z = (-l + a * m / b) / (n - c * m / b);
y = (-a - c * z) / b;
Real R = sqrt(x * x + y * y + z * z);
x = x / R;
y = y / R;
z = z / R;
// if(abs(D) < 1){
// return mR3(0,0,0);
// }
// if(abs(m) < 0.1){
// x=0;
// y=1;
// z=0;
// return mR3(x,y,z);
// }
// else{
// y=0;
// if(abs(c) > 0.1){
// x=1;
// z=-a/c;
// return mR3(x,y,z);
// }
// if(abs(a) > 0.1){
// z=1;
// x=-c/a;
// return mR3(x,y,z);
// }
// }
return mR3(x, y, z);
}
//--------------------------------------------------------------------------------------------------------------------------------
__device__ inline Real4 DifVelocityRho(float G_i[9],
Real3 dist3,
Real d,
Real4 posRadA,
Real4 posRadB,
Real3 velMasA,
Real3 velMasB,
Real4 rhoPresMuA,
Real4 rhoPresMuB,
Real multViscosity) {
if (rhoPresMuA.w > -0.5 && rhoPresMuB.w > -0.5)
return mR4(0.0);
Real3 gradW = GradWh(dist3, (posRadA.w + posRadB.w) * 0.5);
// Continuty equation
Real derivRho = paramsD.markerMass * dot(velMasA - velMasB, gradW);
// Viscosity
Real rAB_Dot_GradWh = dot(dist3, gradW);
Real rAB_Dot_GradWh_OverDist =
rAB_Dot_GradWh / (d * d + paramsD.epsMinMarkersDis * paramsD.HSML * paramsD.HSML);
Real3 derivV = -paramsD.markerMass * (rhoPresMuA.y / (rhoPresMuA.x * rhoPresMuA.x) +
rhoPresMuB.y / (rhoPresMuB.x * rhoPresMuB.x)) * gradW +
paramsD.markerMass * (8.0f * multViscosity) * paramsD.mu0 *
rAB_Dot_GradWh_OverDist * (velMasA - velMasB) / square(rhoPresMuA.x + rhoPresMuB.x);
// Artificial viscosity
Real vAB_Dot_rAB = dot(velMasA - velMasB, dist3);
// change to 1==1 if needs artificial viscosity
if ((vAB_Dot_rAB < 0.0) && (1 == 1)) {
Real alpha = paramsD.Ar_vis_alpha;
Real c_ab = paramsD.Cs;
Real rho = 0.5f * (rhoPresMuA.x * rhoPresMuB.x);
Real nu = -alpha * paramsD.HSML * c_ab / rho;
Real derivM1 = -paramsD.markerMass * (nu * vAB_Dot_rAB / (d * d +
paramsD.epsMinMarkersDis * paramsD.HSML * paramsD.HSML));
derivV.x += derivM1 * gradW.x;
derivV.y += derivM1 * gradW.y;
derivV.z += derivM1 * gradW.z;
}
return mR4(derivV, derivRho);
}
//--------------------------------------------------------------------------------------------------------------------------------
__device__ inline Real4 DifVelocityRho_ElasticSPH(Real W_ini_inv,
Real W_AB,
Real3 gradW,
Real3 dist3,
Real d,
Real invd,
Real4 posRadA,
Real4 posRadB,
Real3 velMasA_in,
Real3 velMasB_in,
Real4 rhoPresMuA,
Real4 rhoPresMuB,
Real3 tauXxYyZz_A_in,
Real3 tauXyXzYz_A_in,
Real3 tauXxYyZz_B_in,
Real3 tauXyXzYz_B_in) {
if (rhoPresMuA.w > -0.5 && rhoPresMuB.w > -0.5)
return mR4(0.0);
Real3 velMasA = velMasA_in;
Real3 velMasB = velMasB_in;
Real3 tauXxYyZz_A = tauXxYyZz_A_in;
Real3 tauXxYyZz_B = tauXxYyZz_B_in;
Real3 tauXyXzYz_A = tauXyXzYz_A_in;
Real3 tauXyXzYz_B = tauXyXzYz_B_in;
/*if (rhoPresMuA.w < -0.5 && rhoPresMuB.w > -0.5) {
tauXxYyZz_B = tauXxYyZz_A;
tauXyXzYz_B = tauXyXzYz_A;
// velMasB = 2.0*velMasB - velMasA; // noslip BC
}
if (rhoPresMuA.w > -0.5 && rhoPresMuB.w < -0.5) {
tauXxYyZz_A = tauXxYyZz_B;
tauXyXzYz_A = tauXyXzYz_B;
// velMasA = 2.0*velMasA - velMasB; // noslip BC
}*/
Real Mass = paramsD.markerMass;
Real MassOverRho = Mass * paramsD.invrho0 * paramsD.invrho0;
Real3 MA_gradW = gradW * MassOverRho;
Real derivVx = (tauXxYyZz_A.x + tauXxYyZz_B.x) * MA_gradW.x +
(tauXyXzYz_A.x + tauXyXzYz_B.x) * MA_gradW.y +
(tauXyXzYz_A.y + tauXyXzYz_B.y) * MA_gradW.z;
Real derivVy = (tauXyXzYz_A.x + tauXyXzYz_B.x) * MA_gradW.x +
(tauXxYyZz_A.y + tauXxYyZz_B.y) * MA_gradW.y +
(tauXyXzYz_A.z + tauXyXzYz_B.z) * MA_gradW.z;
Real derivVz = (tauXyXzYz_A.y + tauXyXzYz_B.y) * MA_gradW.x +
(tauXyXzYz_A.z + tauXyXzYz_B.z) * MA_gradW.y +
(tauXxYyZz_A.z + tauXxYyZz_B.z) * MA_gradW.z;
// TODO: Visco-plastic model
// Real vel = length(velMasA);
// if(vel > 0.3){
// Real rAB_Dot_GradWh = dot(dist3, gradW);
// Real rAB_Dot_GradWh_OverDist = rAB_Dot_GradWh / (d * d + paramsD.epsMinMarkersDis * paramsD.HSML *
// paramsD.HSML); Real3 derivV = - paramsD.markerMass *(rhoPresMuA.y / (rhoPresMuA.x * rhoPresMuA.x) +
// rhoPresMuB.y / (rhoPresMuB.x * rhoPresMuB.x)) * gradW
// + paramsD.markerMass * (8.0f * multViscosity) * paramsD.mu_fric_s
// * pow(rhoPresMuA.x + rhoPresMuB.x, Real(-2)) * rAB_Dot_GradWh_OverDist * (velMasA - velMasB);
// derivVx = derivV.x;
// derivVy = derivV.y;
// derivVz = derivV.z;
// }
// Artificial viscosity
Real vAB_rAB = dot(velMasA - velMasB, dist3);
// if (vAB_rAB < 0.0) {
Real nu = -paramsD.Ar_vis_alpha * paramsD.HSML * paramsD.Cs * paramsD.invrho0;
Real derivM1 = -Mass * (nu * vAB_rAB * (invd * invd)); //+ paramsD.epsMinMarkersDis * paramsD.HSML * paramsD.HSML
derivVx += derivM1 * gradW.x;
derivVy += derivM1 * gradW.y;
derivVz += derivM1 * gradW.z;
// }
// Artifical pressure to handle tensile instability issue.
// A complete artifical stress should be implemented in the future.
/*if (paramsD.Coh_coeff > 1e-5) {
Real Pa = -1.0 / 3.0 * (tauXxYyZz_A.x + tauXxYyZz_A.y + tauXxYyZz_A.z);
if (Pa < 0.0) {
Real Pb = -1.0 / 3.0 * (tauXxYyZz_B.x + tauXxYyZz_B.y + tauXxYyZz_B.z);
Real epsi = 0.5;
Real Ra = Pa * epsi * paramsD.invrho0 * paramsD.invrho0;
Real Rb = Pb * epsi * paramsD.invrho0 * paramsD.invrho0;
Real fAB = W_AB * W_ini_inv;
Real small_F = Mass * pow(fAB, 3.0) * (Ra + Rb);
derivVx += small_F * gradW.x;
derivVy += small_F * gradW.y;
derivVz += small_F * gradW.z;
}
}*/
// TOTO: Damping force
// if (1 == 0) {
// Real xi0 = paramsD.Vis_Dam;
// Real E0 = paramsD.E_young;
// Real h0 = paramsD.HSML;
// Real Cd = xi0 * sqrt(E0 / (rhoA * h0 * h0));
// derivVx -= Cd * velMasA.x;
// derivVy -= Cd * velMasA.y;
// derivVz -= Cd * velMasA.z;
// }
// Real derivRho = Mass * dot(vel_XSPH_A - vel_XSPH_B, gradW);
return mR4(derivVx, derivVy, derivVz, 0.0);
}
//--------------------------------------------------------------------------------------------------------------------------------
__device__ inline Real3 GradientOperator(float G_i[9],
Real3 dist3,
Real4 posRadA,
Real4 posRadB,
Real fA,
Real fB,
Real4 rhoPresMuA,
Real4 rhoPresMuB) {
Real3 gradW = GradWh(dist3, (posRadA.w + posRadB.w) * 0.5);
Real3 gradW_new;
gradW_new.x = G_i[0] * gradW.x + G_i[1] * gradW.y + G_i[2] * gradW.z;
gradW_new.y = G_i[3] * gradW.x + G_i[4] * gradW.y + G_i[5] * gradW.z;
gradW_new.z = G_i[6] * gradW.x + G_i[7] * gradW.y + G_i[8] * gradW.z;
Real Vol = paramsD.markerMass / rhoPresMuB.x;
Real fji = fB - fA;
Real Gra_ij_x = fji * gradW_new.x * Vol;
Real Gra_ij_y = fji * gradW_new.y * Vol;
Real Gra_ij_z = fji * gradW_new.z * Vol;
return mR3(Gra_ij_x, Gra_ij_y, Gra_ij_z);
}
//--------------------------------------------------------------------------------------------------------------------------------
__device__ inline Real4 LaplacianOperator(float G_i[9],
float L_i[9],
Real3 dist3,
Real4 posRadA,
Real4 posRadB,
Real fA,
Real fB,
Real4 rhoPresMuA,
Real4 rhoPresMuB) {
Real3 gradW = GradWh(dist3, (posRadA.w + posRadB.w) * 0.5);
Real d = length(dist3);
Real3 eij = dist3 / d;
Real Vol = paramsD.markerMass / rhoPresMuB.x;
Real fij = fA - fB;
Real ex_Gwx = eij.x * gradW.x;
Real ex_Gwy = eij.x * gradW.y;
Real ex_Gwz = eij.x * gradW.z;
Real ey_Gwx = eij.y * gradW.x;
Real ey_Gwy = eij.y * gradW.y;
Real ey_Gwz = eij.y * gradW.z;
Real ez_Gwx = eij.z * gradW.x;
Real ez_Gwy = eij.z * gradW.y;
Real ez_Gwz = eij.z * gradW.z;
Real Part1 = L_i[0] * ex_Gwx + L_i[1] * ex_Gwy + L_i[2] * ex_Gwz +
L_i[3] * ey_Gwx + L_i[4] * ey_Gwy + L_i[5] * ey_Gwz +
L_i[6] * ez_Gwx + L_i[7] * ez_Gwy + L_i[8] * ez_Gwz;
Real Part2 = fij / d * Vol;
Real3 Part3 = mR3(-eij.x, -eij.y, -eij.z) * Vol;
return mR4(2.0 * Part1 * Part2, Part3.x * (2.0 * Part1),
Part3.y * (2.0 * Part1), Part3.z * (2.0 * Part1));
}
//--------------------------------------------------------------------------------------------------------------------------------
__global__ void EOS(Real4* sortedRhoPreMu, volatile bool* isErrorD) {
uint index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= numObjectsD.numAllMarkers)
return;
sortedRhoPreMu[index].y = Eos(sortedRhoPreMu[index].x, sortedRhoPreMu[index].w);
}
//--------------------------------------------------------------------------------------------------------------------------------
__global__ void Navier_Stokes(uint* indexOfIndex,
Real4* sortedDerivVelRho,
Real3* sortedXSPHandShift,
Real4* sortedPosRad,
Real3* sortedVelMas,
Real4* sortedRhoPreMu,
Real3* velMas_ModifiedBCE,
Real4* rhoPreMu_ModifiedBCE,
uint* gridMarkerIndex,
uint* cellStart,
uint* cellEnd,
volatile bool* isErrorD) {
uint id = blockIdx.x * blockDim.x + threadIdx.x;
if (id >= numObjectsD.numAllMarkers - numObjectsD.numBoundaryMarkers)
return;
uint index = indexOfIndex[id];
// Do nothing for fixed wall BCE particles
if (sortedRhoPreMu[index].w > -0.5 && sortedRhoPreMu[index].w < 0.5) {
sortedDerivVelRho[index] = mR4(0.0);
return;
}
Real3 posRadA = mR3(sortedPosRad[index]);
Real3 velMasA = sortedVelMas[index];
Real4 rhoPresMuA = sortedRhoPreMu[index];
Real4 derivVelRho = mR4(0.0);
Real SuppRadii = RESOLUTION_LENGTH_MULT * paramsD.HSML;
Real SqRadii = SuppRadii * SuppRadii;
Real G_i[9] = {1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 1.0};
Real L_i[9] = {1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 1.0};
if (paramsD.USE_Consistent_G)
calc_G_Matrix(sortedPosRad, sortedVelMas, sortedRhoPreMu, G_i, cellStart,
cellEnd, indexOfIndex);
if (paramsD.USE_Consistent_L) {
Real A_i[27] = {0.0};
calc_A_Matrix(sortedPosRad, sortedVelMas, sortedRhoPreMu, A_i, G_i, cellStart,
cellEnd, indexOfIndex);
calc_L_Matrix(sortedPosRad, sortedVelMas, sortedRhoPreMu, A_i, L_i, G_i, cellStart,
cellEnd, indexOfIndex);
}
float Gi[9] = {1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 1.0};
float Li[9] = {1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 1.0};
Gi[0] = G_i[0];
Gi[1] = G_i[1];
Gi[2] = G_i[2];
Gi[3] = G_i[3];
Gi[4] = G_i[4];
Gi[5] = G_i[5];
Gi[6] = G_i[6];
Gi[7] = G_i[7];
Gi[8] = G_i[8];
Li[0] = L_i[0];
Li[1] = L_i[1];
Li[2] = L_i[2];
Li[3] = L_i[3];
Li[4] = L_i[4];
Li[5] = L_i[5];
Li[6] = L_i[6];
Li[7] = L_i[7];
Li[8] = L_i[8];
Real3 preGra = mR3(0.0);
Real3 velxGra = mR3(0.0);
Real3 velyGra = mR3(0.0);
Real3 velzGra = mR3(0.0);
Real4 velxLap = mR4(0.0);
Real4 velyLap = mR4(0.0);
Real4 velzLap = mR4(0.0);
Real vA = length(velMasA);
Real vAdT = vA * paramsD.dT;
// get address in grid
int3 gridPos = calcGridPos(posRadA);
Real3 inner_sum = mR3(0.0);
Real sum_w_i = W3h(0.0, sortedPosRad[index].w) * paramsD.volume0;
for (int x = -1; x <= 1; x++) {
for (int y = -1; y <= 1; y++) {
for (int z = -1; z <= 1; z++) {
int3 neighbourPos = gridPos + mI3(x, y, z);
uint gridHash = calcGridHash(neighbourPos);
uint startIndex = cellStart[gridHash];
uint endIndex = cellEnd[gridHash];
for (uint j = startIndex; j < endIndex; j++) {
if (j != index) {
Real3 posRadB = mR3(sortedPosRad[j]);
Real3 dist3 = Distance(posRadA, posRadB);
Real dd = dist3.x * dist3.x + dist3.y * dist3.y + dist3.z * dist3.z;
if (dd > SqRadii)
continue;
Real4 rhoPresMuB = sortedRhoPreMu[j];
// no rigid-rigid force
if (rhoPresMuA.w > -0.5 && rhoPresMuB.w > -0.5)
continue;
Real d = length(dist3);
// modifyPressure(rhoPresMuB, dist3Alpha);
// if (!(isfinite(rhoPresMuB.x) && isfinite(rhoPresMuB.y) && isfinite(rhoPresMuB.z))) {
// printf("Error! particle rhoPresMuB is NAN: thrown from modifyPressure !\n");
// }
Real3 velMasB = sortedVelMas[j];
if (rhoPresMuB.w > -0.5) {
int bceIndexB = gridMarkerIndex[j] - numObjectsD.numFluidMarkers;
if (!(bceIndexB >= 0 && bceIndexB <
numObjectsD.numAllMarkers - numObjectsD.numFluidMarkers)) {
printf("Error! bceIndex out of bound, collideCell !\n");
}
rhoPresMuB = rhoPreMu_ModifiedBCE[bceIndexB];
velMasB = velMas_ModifiedBCE[bceIndexB];
}
Real multViscosit = 1;
derivVelRho += DifVelocityRho(Gi, dist3, d, sortedPosRad[index], sortedPosRad[j],
velMasA, velMasB, rhoPresMuA, rhoPresMuB, multViscosit);
preGra += GradientOperator(Gi, dist3, sortedPosRad[index], sortedPosRad[j],
-rhoPresMuA.y, rhoPresMuB.y, rhoPresMuA, rhoPresMuB);
velxGra += GradientOperator(Gi, dist3, sortedPosRad[index], sortedPosRad[j],
velMasA.x, velMasB.x, rhoPresMuA, rhoPresMuB);
velyGra += GradientOperator(Gi, dist3, sortedPosRad[index], sortedPosRad[j],
velMasA.y, velMasB.y, rhoPresMuA, rhoPresMuB);
velzGra += GradientOperator(Gi, dist3, sortedPosRad[index], sortedPosRad[j],
velMasA.z, velMasB.z, rhoPresMuA, rhoPresMuB);
velxLap += LaplacianOperator(Gi, Li, dist3, sortedPosRad[index], sortedPosRad[j],
velMasA.x, velMasB.x, rhoPresMuA, rhoPresMuB);
velyLap += LaplacianOperator(Gi, Li, dist3, sortedPosRad[index], sortedPosRad[j],
velMasA.y, velMasB.y, rhoPresMuA, rhoPresMuB);
velzLap += LaplacianOperator(Gi, Li, dist3, sortedPosRad[index], sortedPosRad[j],
velMasA.z, velMasB.z, rhoPresMuA, rhoPresMuB);
if (d > paramsD.HSML * 1.0e-9)
sum_w_i = sum_w_i + W3h(d, sortedPosRad[index].w) * paramsD.volume0;
}
}
}
}
}
Real nu = paramsD.mu0 / paramsD.rho0;
Real dvxdt = -preGra.x / rhoPresMuA.x + (velxLap.x + velxGra.x * velxLap.y +
velxGra.y * velxLap.z + velxGra.z * velxLap.w) * nu;
Real dvydt = -preGra.y / rhoPresMuA.x + (velyLap.x + velyGra.x * velyLap.y +
velyGra.y * velyLap.z + velyGra.z * velyLap.w) * nu;
Real dvzdt = -preGra.z / rhoPresMuA.x + (velzLap.x + velzGra.x * velzLap.y +
velzGra.y * velzLap.z + velzGra.z * velzLap.w) * nu;
Real drhodt = -paramsD.rho0 * (velxGra.x + velyGra.y + velzGra.z);
Real Det_G = (Gi[0] * Gi[4] * Gi[8] - Gi[0] * Gi[5] * Gi[7] - Gi[1] * Gi[3] * Gi[8] +
Gi[1] * Gi[5] * Gi[6] + Gi[2] * Gi[3] * Gi[7] - Gi[2] * Gi[4] * Gi[6]);
Real Det_L = (Li[0] * Li[4] * Li[8] - Li[0] * Li[5] * Li[7] - Li[1] * Li[3] * Li[8] +
Li[1] * Li[5] * Li[6] + Li[2] * Li[3] * Li[7] - Li[2] * Li[4] * Li[6]);
if (rhoPresMuA.w > -1.5 && rhoPresMuA.w < -0.5) {
if (Det_G > 0.9 && Det_G < 1.1 && Det_L > 0.9 && Det_L < 1.1 && sum_w_i > 0.9) {
derivVelRho = mR4(dvxdt, dvydt, dvzdt, drhodt);
}
}
if (!(isfinite(derivVelRho.x) && isfinite(derivVelRho.y) && isfinite(derivVelRho.z))) {
printf("Error! particle derivVel is NAN: thrown from ChFsiForceExplicitSPH.cu, collideD !\n");
*isErrorD = true;
}
if (!(isfinite(derivVelRho.w))) {
printf("Error! particle derivRho is NAN: thrown from ChFsiForceExplicitSPH.cu, collideD !\n");
*isErrorD = true;
}
// add gravity and other body force to fluid markers
if (rhoPresMuA.w > -1.5 && rhoPresMuA.w < -0.5) {
Real3 totalFluidBodyForce3 = paramsD.bodyForce3 + paramsD.gravity;
derivVelRho += mR4(totalFluidBodyForce3);
}
sortedDerivVelRho[index] = derivVelRho;
Real det_r_max = 0.05 * vAdT;
Real det_r_A = length(inner_sum);
if (det_r_A < det_r_max) {
sortedXSPHandShift[index] = inner_sum;
} else {
sortedXSPHandShift[index] = inner_sum * det_r_max / (det_r_A + 1e-9);
}
}
//--------------------------------------------------------------------------------------------------------------------------------
__global__ void NS_SSR(uint* activityIdentifierD,
Real4* sortedDerivVelRho,
Real3* sortedDerivTauXxYyZz,
Real3* sortedDerivTauXyXzYz,
Real3* sortedXSPHandShift,
Real3* sortedKernelSupport,
Real4* sortedPosRad,
Real3* sortedVelMas,
Real4* sortedRhoPreMu,
Real3* velMas_ModifiedBCE,
Real4* rhoPreMu_ModifiedBCE,
Real3* tauXxYyZz_ModifiedBCE,
Real3* tauXyXzYz_ModifiedBCE,
Real3* sortedTauXxYyZz,
Real3* sortedTauXyXzYz,
uint* gridMarkerIndex,
uint* cellStart,
uint* cellEnd,
uint* mapOriginalToSorted,
uint* sortedFreeSurfaceIdD,
volatile bool* isErrorD) {
uint id = blockIdx.x * blockDim.x + threadIdx.x;
if (id >= numObjectsD.numAllMarkers)
return;
// no need to do anything if it is not an active particle
uint activity = activityIdentifierD[id];
if (activity == 0)
return;
// map original to sorted
uint index = mapOriginalToSorted[id];
if (sortedRhoPreMu[index].w > -0.5 && sortedRhoPreMu[index].w < 0.5)
return;
Real hA = sortedPosRad[index].w;
Real3 posRadA = mR3(sortedPosRad[index]);
Real3 velMasA = sortedVelMas[index];
Real4 rhoPresMuA = sortedRhoPreMu[index];
Real3 TauXxYyZzA = sortedTauXxYyZz[index];
Real3 TauXyXzYzA = sortedTauXyXzYz[index];
Real4 derivVelRho = mR4(0.0);
Real3 deltaV = mR3(0.0);
Real SuppRadii = RESOLUTION_LENGTH_MULT * paramsD.HSML;
Real SqRadii = SuppRadii * SuppRadii;
uint j_list[150];
uint j_num = 0;
// Get address in grid
int3 gridPos = calcGridPos(posRadA);
// Find the neighbor particle list
for (int x = -1; x <= 1; x++) {
for (int y = -1; y <= 1; y++) {
for (int z = -1; z <= 1; z++) {
int3 neighbourPos = gridPos + mI3(x, y, z);
uint gridHash = calcGridHash(neighbourPos);
uint startIndex = cellStart[gridHash];
uint endIndex = cellEnd[gridHash];
for (uint j = startIndex; j < endIndex; j++) {
if (j != index) {
Real3 posRadB = mR3(sortedPosRad[j]);
Real3 dist3 = Distance(posRadA, posRadB);
Real dd = dist3.x * dist3.x + dist3.y * dist3.y + dist3.z * dist3.z;
if (dd < SqRadii) {
j_list[j_num] = j;
j_num++;
}
}
}
}
}
}
Real tauxx = sortedTauXxYyZz[index].x;
Real tauyy = sortedTauXxYyZz[index].y;
Real tauzz = sortedTauXxYyZz[index].z;
Real tauxy = sortedTauXyXzYz[index].x;
Real tauxz = sortedTauXyXzYz[index].y;
Real tauyz = sortedTauXyXzYz[index].z;
Real dTauxx = 0.0;
Real dTauyy = 0.0;
Real dTauzz = 0.0;
Real dTauxy = 0.0;
Real dTauxz = 0.0;
Real dTauyz = 0.0;
// Calculate the correction matrix for gradient operator
Real G_i[9] = {1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 1.0};
if (paramsD.USE_Consistent_G) {
Real mGi[9] = {0.0};
for (uint n = 0; n < j_num; n++) {
uint j = j_list[n];
Real3 posRadB = mR3(sortedPosRad[j]);
Real3 rij = Distance(posRadA, posRadB);
Real3 grad_i_wij = GradWh(rij, hA);
Real3 grw_vj = grad_i_wij * paramsD.volume0;
mGi[0] -= rij.x * grw_vj.x;
mGi[1] -= rij.x * grw_vj.y;
mGi[2] -= rij.x * grw_vj.z;
mGi[3] -= rij.y * grw_vj.x;
mGi[4] -= rij.y * grw_vj.y;
mGi[5] -= rij.y * grw_vj.z;
mGi[6] -= rij.z * grw_vj.x;
mGi[7] -= rij.z * grw_vj.y;
mGi[8] -= rij.z * grw_vj.z;
}
Real Det = (mGi[0] * mGi[4] * mGi[8] - mGi[0] * mGi[5] * mGi[7] -
mGi[1] * mGi[3] * mGi[8] + mGi[1] * mGi[5] * mGi[6] +
mGi[2] * mGi[3] * mGi[7] - mGi[2] * mGi[4] * mGi[6]);
if (abs(Det) > 0.01) {
Real OneOverDet = 1.0 / Det;
G_i[0] = (mGi[4] * mGi[8] - mGi[5] * mGi[7]) * OneOverDet;
G_i[1] = -(mGi[1] * mGi[8] - mGi[2] * mGi[7]) * OneOverDet;
G_i[2] = (mGi[1] * mGi[5] - mGi[2] * mGi[4]) * OneOverDet;
G_i[3] = -(mGi[3] * mGi[8] - mGi[5] * mGi[6]) * OneOverDet;
G_i[4] = (mGi[0] * mGi[8] - mGi[2] * mGi[6]) * OneOverDet;
G_i[5] = -(mGi[0] * mGi[5] - mGi[2] * mGi[3]) * OneOverDet;
G_i[6] = (mGi[3] * mGi[7] - mGi[4] * mGi[6]) * OneOverDet;
G_i[7] = -(mGi[0] * mGi[7] - mGi[1] * mGi[6]) * OneOverDet;
G_i[8] = (mGi[0] * mGi[4] - mGi[1] * mGi[3]) * OneOverDet;
}
}
Real radii = paramsD.INITSPACE * 1.241; // 1.129;//1.241
Real invRadii = 1.0 / 1.241 * paramsD.INV_INIT; // 1.0 / radii
Real vA = length(velMasA);
Real vAdT = vA * paramsD.dT;
Real bs_vAdT = paramsD.beta_shifting * vAdT;
Real3 inner_sum = mR3(0.0);
Real sum_w_i = W3h(0.0, hA) * paramsD.volume0;
Real w_ini_inv = 1.0 / W3h(paramsD.INITSPACE, hA);
int N_ = 1;
int N_s = 0;
// Get the interaction from neighbor particles
for (uint n = 0; n < j_num; n++) {
uint j = j_list[n];
Real4 rhoPresMuB = sortedRhoPreMu[j];
if (rhoPresMuA.w > -0.5 && rhoPresMuB.w > -0.5)
continue; // No BCE-BCE interaction
Real3 posRadB = mR3(sortedPosRad[j]);
Real3 dist3 = Distance(posRadA, posRadB);
Real d = length(dist3);
Real invd = 1.0 / d;
Real3 velMasB = sortedVelMas[j];
Real3 TauXxYyZzB = sortedTauXxYyZz[j];
Real3 TauXyXzYzB = sortedTauXyXzYz[j];
if (rhoPresMuB.w > -0.5) {
int bceIndexB = gridMarkerIndex[j] - numObjectsD.numFluidMarkers;
rhoPresMuB = rhoPreMu_ModifiedBCE[bceIndexB];
velMasB = velMas_ModifiedBCE[bceIndexB];
TauXxYyZzB = tauXxYyZz_ModifiedBCE[bceIndexB];
TauXyXzYzB = tauXyXzYz_ModifiedBCE[bceIndexB];
// Extrapolated from velocity of fluid particle
if(rhoPresMuB.w > 0.5 && paramsD.bceType == BceVersion::ADAMI){
velMasB = sortedVelMas[j];
Real chi_A = sortedKernelSupport[index].y / sortedKernelSupport[index].x;
Real chi_B = sortedKernelSupport[j].y / sortedKernelSupport[j].x;
Real dA = SuppRadii * (2.0 * chi_A - 1.0);
if (dA < 0.0)
dA = 0.01 * SuppRadii;
Real dB = SuppRadii * (2.0 * chi_B - 1.0);
if (dB < 0.0)
dB = 0.01 * SuppRadii;
Real dAB = dB / dA;
if (dAB > 0.5)
dAB = 0.5;
Real3 velMasB_new = dAB * (velMasB - velMasA) + velMasB;
velMasB = velMasB_new;
}
if(rhoPresMuB.w < 0.5 && paramsD.bceTypeWall == BceVersion::ADAMI){
velMasB = sortedVelMas[j];
Real chi_A = sortedKernelSupport[index].y / sortedKernelSupport[index].x;
Real chi_B = sortedKernelSupport[j].y / sortedKernelSupport[j].x;
Real dA = SuppRadii * (2.0 * chi_A - 1.0);
if (dA < 0.0)
dA = 0.01 * SuppRadii;
Real dB = SuppRadii * (2.0 * chi_B - 1.0);
if (dB < 0.0)
dB = 0.01 * SuppRadii;
Real dAB = dB / dA;
if (dAB > 0.5)
dAB = 0.5;
Real3 velMasB_new = dAB * (velMasB - velMasA) + velMasB;
velMasB = velMasB_new;
}
}
// Correct the kernel function gradient
Real w_AB = W3h(d, hA);
Real3 gradW = GradWh(dist3, hA);
if (paramsD.USE_Consistent_G) {
Real3 gradW_new;
gradW_new.x = G_i[0] * gradW.x + G_i[1] * gradW.y + G_i[2] * gradW.z;
gradW_new.y = G_i[3] * gradW.x + G_i[4] * gradW.y + G_i[5] * gradW.z;
gradW_new.z = G_i[6] * gradW.x + G_i[7] * gradW.y + G_i[8] * gradW.z;
gradW = gradW_new;
}
// Calculate dv/dt
derivVelRho += DifVelocityRho_ElasticSPH(w_ini_inv, w_AB, gradW, dist3, d, invd,
sortedPosRad[index], sortedPosRad[j], velMasA, velMasB, rhoPresMuA,
rhoPresMuB, TauXxYyZzA, TauXyXzYzA, TauXxYyZzB, TauXyXzYzB);
// Calculate dsigma/dt
if (sortedRhoPreMu[index].w < -0.5) {
// start to calculate the stress rate
Real3 vAB = velMasA - velMasB;
Real3 vAB_h = 0.5 * vAB * paramsD.volume0;
// entries of strain rate tensor
Real exx = -2.0 * vAB_h.x * gradW.x;
Real eyy = -2.0 * vAB_h.y * gradW.y;
Real ezz = -2.0 * vAB_h.z * gradW.z;
Real exy = -vAB_h.x * gradW.y - vAB_h.y * gradW.x;
Real exz = -vAB_h.x * gradW.z - vAB_h.z * gradW.x;
Real eyz = -vAB_h.y * gradW.z - vAB_h.z * gradW.y;
// entries of rotation rate (spin) tensor
Real wxy = -vAB_h.x * gradW.y + vAB_h.y * gradW.x;
Real wxz = -vAB_h.x * gradW.z + vAB_h.z * gradW.x;
Real wyz = -vAB_h.y * gradW.z + vAB_h.z * gradW.y;
Real edia = 0.3333333333333 * (exx + eyy + ezz);
Real twoG = 2.0 * paramsD.G_shear;
Real K_edia = paramsD.K_bulk * 1.0 * edia;
dTauxx += twoG * (exx - edia) + 2.0 * (tauxy * wxy + tauxz * wxz) + K_edia;
dTauyy += twoG * (eyy - edia) - 2.0 * (tauxy * wxy - tauyz * wyz) + K_edia;
dTauzz += twoG * (ezz - edia) - 2.0 * (tauxz * wxz + tauyz * wyz) + K_edia;
dTauxy += twoG * exy - (tauxx * wxy - tauxz * wyz) + (wxy * tauyy + wxz * tauyz);
dTauxz += twoG * exz - (tauxx * wxz + tauxy * wyz) + (wxy * tauyz + wxz * tauzz);
dTauyz += twoG * eyz - (tauxy * wxz + tauyy * wyz) - (wxy * tauxz - wyz * tauzz);
}
// Do integration for the kernel function, calculate the XSPH term
if (d > paramsD.HSML * 1.0e-9) {
Real Wab = W3h(d, hA);
// Integration of the kernel function
sum_w_i += Wab * paramsD.volume0;
// XSPH
if (rhoPresMuB.w > -1.5 && rhoPresMuB.w < -0.5)
deltaV += paramsD.volume0 * (velMasB - velMasA) * Wab;
N_ = N_ + 1;
}
// Find particles that have contact with this particle
if (d < 1.25 * radii && rhoPresMuB.w < -0.5) {
Real Pen = (radii - d) * invRadii;
Real3 r_0 = bs_vAdT * invd * dist3;
Real3 r_s = r_0 * Pen;
if (d < 1.0 * radii) {
inner_sum += 3.0 * r_s;
} else if (d < 1.1 * radii) {
inner_sum += 1.0 * r_s;
} else {
inner_sum += 0.1 * 1.0 * (-r_0);
}
N_s = N_s + 1;
}
}
// Check particles who have not enough neighbor particles (only for granular now)
if (sum_w_i < paramsD.C_Wi) {
sortedFreeSurfaceIdD[index] = 1;
} else {
sortedFreeSurfaceIdD[index] = 0;
}
// Calculate the shifting vector
Real det_r_max = 0.05 * vAdT;
Real det_r_A = length(inner_sum);
if (det_r_A < det_r_max) {
sortedXSPHandShift[index] = inner_sum;
} else {
sortedXSPHandShift[index] = inner_sum * det_r_max / (det_r_A + 1e-9);
}
// Add the XSPH term into the shifting vector
sortedXSPHandShift[index] += paramsD.EPS_XSPH * deltaV * paramsD.dT;
// Get the shifting velocity
sortedXSPHandShift[index] = sortedXSPHandShift[index] * paramsD.INV_dT;
// Add gravity and other body force to fluid markers
if (rhoPresMuA.w > -1.5 && rhoPresMuA.w < -0.5) {
Real3 totalFluidBodyForce3 = paramsD.bodyForce3 + paramsD.gravity;
derivVelRho += mR4(totalFluidBodyForce3, 0.0);
}
sortedDerivVelRho[index] = derivVelRho;
sortedDerivTauXxYyZz[index] = mR3(dTauxx, dTauyy, dTauzz);
sortedDerivTauXyXzYz[index] = mR3(dTauxy, dTauxz, dTauyz);
}
//--------------------------------------------------------------------------------------------------------------------------------
__global__ void CalcVel_XSPH_D(uint* indexOfIndex,
Real3* vel_XSPH_Sorted_D,
Real4* sortedPosRad,
Real3* sortedVelMas,
Real4* sortedRhoPreMu,
Real3* sortedXSPHandShift,
uint* gridMarkerIndex,
uint* cellStart,
uint* cellEnd,
volatile bool* isErrorD) {
uint id = blockIdx.x * blockDim.x + threadIdx.x;
if (id >= numObjectsD.numAllMarkers - numObjectsD.numBoundaryMarkers)
return;
uint index = indexOfIndex[id];
Real4 rhoPreMuA = sortedRhoPreMu[index];
Real3 velMasA = sortedVelMas[index];
Real SuppRadii = RESOLUTION_LENGTH_MULT * paramsD.HSML;
Real SqRadii = SuppRadii * SuppRadii;
Real3 posRadA = mR3(sortedPosRad[index]);
Real3 deltaV = mR3(0);
// get address in grid
int3 gridPos = calcGridPos(posRadA);
Real3 inner_sum = mR3(0.0);
// Real mi_bar = 0.0, r0 = 0.0;
Real3 dV = mR3(0.0f);
// examine neighbouring cells
for (int z = -1; z <= 1; z++) {
for (int y = -1; y <= 1; y++) {
for (int x = -1; x <= 1; x++) {
int3 neighbourPos = gridPos + mI3(x, y, z);
uint gridHash = calcGridHash(neighbourPos);
uint startIndex = cellStart[gridHash];
uint endIndex = cellEnd[gridHash];
for (uint j = startIndex; j < endIndex; j++) {
if (j != index) { // check not colliding with self
Real3 posRadB = mR3(sortedPosRad[j]);
Real3 dist3 = Distance(posRadA, posRadB);
Real dd = dist3.x * dist3.x + dist3.y * dist3.y + dist3.z * dist3.z;
if (dd > SqRadii)
continue;
Real4 rhoPresMuB = sortedRhoPreMu[j];
if (rhoPresMuB.w > -0.5 || rhoPresMuB.w < -1.5)
continue;
Real3 velMasB = sortedVelMas[j];
Real rho_bar = 0.5 * (rhoPreMuA.x + rhoPresMuB.x);
Real d = length(dist3);
deltaV += paramsD.markerMass * (velMasB - velMasA) *
W3h(d, paramsD.HSML) / rho_bar;
}
}
}
}
}
vel_XSPH_Sorted_D[index] =
paramsD.EPS_XSPH * deltaV + sortedXSPHandShift[index] * paramsD.INV_dT;
if (!(isfinite(vel_XSPH_Sorted_D[index].x) &&
isfinite(vel_XSPH_Sorted_D[index].y) && isfinite(vel_XSPH_Sorted_D[index].z))) {
printf("Error! particle vXSPH is NAN: thrown from ChFsiForceExplicitSPH.cu, CalcVel_XSPH_D !\n");
*isErrorD = true;
}
}
//--------------------------------------------------------------------------------------------------------------------------------
__global__ void CopySortedToOriginal_D(Real4* sortedDerivVelRho,
Real3* sortedDerivTauXxYyZz,
Real3* sortedDerivTauXyXzYz,
Real4* originalDerivVelRho,
Real3* originalDerivTauXxYyZz,
Real3* originalDerivTauXyXzYz,
uint* gridMarkerIndex,
uint* activityIdentifierD,
uint* mapOriginalToSorted,
uint* originalFreeSurfaceId,
uint* sortedFreeSurfaceId) {
uint id = blockIdx.x * blockDim.x + threadIdx.x;
if (id >= numObjectsD.numAllMarkers)
return;
// Check the activity of this particle
uint activity = activityIdentifierD[id];
if (activity == 0)
return;
uint index = mapOriginalToSorted[id];
originalDerivVelRho[id] = sortedDerivVelRho[index];
if (paramsD.elastic_SPH) {
originalDerivTauXxYyZz[id] = sortedDerivTauXxYyZz[index];
originalDerivTauXyXzYz[id] = sortedDerivTauXyXzYz[index];
originalFreeSurfaceId[id] = sortedFreeSurfaceId[index];
}
return;
}
//--------------------------------------------------------------------------------------------------------------------------------
__global__ void CopySortedToOriginal_XSPH_D(Real3* sortedXSPH,
Real3* originalXSPH,
uint* gridMarkerIndex,
uint* activityIdentifierD,
uint* mapOriginalToSorted) {
uint id = blockIdx.x * blockDim.x + threadIdx.x;
if (id >= numObjectsD.numAllMarkers)
return;
// Check the activity of this particle
uint activity = activityIdentifierD[id];
if (activity == 0)
return;
uint index = mapOriginalToSorted[id];
originalXSPH[id] = sortedXSPH[index];
}
//--------------------------------------------------------------------------------------------------------------------------------
ChFsiForceExplicitSPH::ChFsiForceExplicitSPH(std::shared_ptr<ChBce> otherBceWorker,
std::shared_ptr<SphMarkerDataD> otherSortedSphMarkersD,
std::shared_ptr<ProximityDataD> otherMarkersProximityD,
std::shared_ptr<FsiGeneralData> otherFsiGeneralData,
std::shared_ptr<SimParams> otherParamsH,
std::shared_ptr<ChCounters> otherNumObjects,
bool verb)
: ChFsiForce(otherBceWorker,
otherSortedSphMarkersD,
otherMarkersProximityD,
otherFsiGeneralData,
otherParamsH,
otherNumObjects,
verb) {
CopyParams_NumberOfObjects(paramsH, numObjectsH);
density_initialization = 0;
}
ChFsiForceExplicitSPH::~ChFsiForceExplicitSPH() {}
//--------------------------------------------------------------------------------------------------------------------------------
void ChFsiForceExplicitSPH::Initialize() {
ChFsiForce::Initialize();
hipMemcpyToSymbolAsync(paramsD, paramsH.get(), sizeof(SimParams));
hipMemcpyToSymbolAsync(numObjectsD, numObjectsH.get(), sizeof(ChCounters));
hipMemcpyFromSymbol(paramsH.get(), paramsD, sizeof(SimParams));
hipDeviceSynchronize();
}
//--------------------------------------------------------------------------------------------------------------------------------
void ChFsiForceExplicitSPH::ForceSPH(std::shared_ptr<SphMarkerDataD> otherSphMarkersD,
std::shared_ptr<FsiBodiesDataD> otherFsiBodiesD,
std::shared_ptr<FsiMeshDataD> otherFsiMeshD) {
sphMarkersD = otherSphMarkersD;
fsiCollisionSystem->ArrangeData(sphMarkersD);
bceWorker->ModifyBceVelocityPressureStress(
sphMarkersD, otherFsiBodiesD, otherFsiMeshD);
CollideWrapper();
CalculateXSPH_velocity();
// AddGravityToFluid();
}
//--------------------------------------------------------------------------------------------------------------------------------
void ChFsiForceExplicitSPH::CollideWrapper() {
bool *isErrorH, *isErrorD;
isErrorH = (bool*)malloc(sizeof(bool));
hipMalloc((void**)&isErrorD, sizeof(bool));
*isErrorH = false;
hipMemcpy(isErrorD, isErrorH, sizeof(bool), hipMemcpyHostToDevice);
//------------------------------------------------------------------------
// thread per particle
uint numBlocks, numThreads;
computeGridSize((int)numObjectsH->numAllMarkers, 256, numBlocks, numThreads);
uint numBlocks1, numThreads1;
computeGridSize((int)numObjectsH->numAllMarkers -
(int)numObjectsH->numBoundaryMarkers, 256, numBlocks1, numThreads1);
// Execute the kernel
thrust::device_vector<Real4> sortedDerivVelRho(numObjectsH->numAllMarkers);
thrust::device_vector<Real3> sortedDerivTauXxYyZz(numObjectsH->numAllMarkers);
thrust::device_vector<Real3> sortedDerivTauXyXzYz(numObjectsH->numAllMarkers);
thrust::device_vector<Real3> sortedKernelSupport(numObjectsH->numAllMarkers);
thrust::device_vector<uint> sortedFreeSurfaceId(numObjectsH->numAllMarkers);
sortedXSPHandShift.resize(numObjectsH->numAllMarkers);
// Calculate the kernel support of each particle
if (paramsH->bceTypeWall == BceVersion::ADAMI || paramsH->bceType == BceVersion::ADAMI){
hipLaunchKernelGGL(( calcKernelSupport), dim3(numBlocks), dim3(numThreads), 0, 0,
mR4CAST(sortedSphMarkersD->posRadD), mR4CAST(sortedSphMarkersD->rhoPresMuD),
mR3CAST(sortedKernelSupport), U1CAST(markersProximityD->cellStartD),
U1CAST(markersProximityD->cellEndD), isErrorD);
ChUtilsDevice::Sync_CheckError(isErrorH, isErrorD, "calcKernelSupport");
}
// Re-Initialize the density after several time steps
if (density_initialization >= paramsH->densityReinit) {
thrust::device_vector<Real4> rhoPresMuD_old = sortedSphMarkersD->rhoPresMuD;
printf("Re-initializing density after %d steps.\n", paramsH->densityReinit);
hipLaunchKernelGGL(( calcRho_kernel), dim3(numBlocks), dim3(numThreads), 0, 0,
mR4CAST(sortedSphMarkersD->posRadD), mR4CAST(sortedSphMarkersD->rhoPresMuD),
mR4CAST(rhoPresMuD_old), U1CAST(markersProximityD->cellStartD),
U1CAST(markersProximityD->cellEndD), density_initialization, isErrorD);
ChUtilsDevice::Sync_CheckError(isErrorH, isErrorD, "calcRho_kernel");
density_initialization = 0;
}
density_initialization++;
// Execute the kernel
if (paramsH->elastic_SPH) { // For granular material
*isErrorH = false;
hipMemcpy(isErrorD, isErrorH, sizeof(bool), hipMemcpyHostToDevice);
// execute the kernel Navier_Stokes and Shear_Stress_Rate in one kernel
hipLaunchKernelGGL(( NS_SSR), dim3(numBlocks), dim3(numThreads), 0, 0,
U1CAST(fsiGeneralData->activityIdentifierD), mR4CAST(sortedDerivVelRho),
mR3CAST(sortedDerivTauXxYyZz), mR3CAST(sortedDerivTauXyXzYz), mR3CAST(sortedXSPHandShift),
mR3CAST(sortedKernelSupport), mR4CAST(sortedSphMarkersD->posRadD),
mR3CAST(sortedSphMarkersD->velMasD), mR4CAST(sortedSphMarkersD->rhoPresMuD),
mR3CAST(bceWorker->velMas_ModifiedBCE), mR4CAST(bceWorker->rhoPreMu_ModifiedBCE),
mR3CAST(bceWorker->tauXxYyZz_ModifiedBCE), mR3CAST(bceWorker->tauXyXzYz_ModifiedBCE),
mR3CAST(sortedSphMarkersD->tauXxYyZzD), mR3CAST(sortedSphMarkersD->tauXyXzYzD),
U1CAST(markersProximityD->gridMarkerIndexD), U1CAST(markersProximityD->cellStartD),
U1CAST(markersProximityD->cellEndD), U1CAST(markersProximityD->mapOriginalToSorted),
U1CAST(sortedFreeSurfaceId), isErrorD);
ChUtilsDevice::Sync_CheckError(isErrorH, isErrorD, "Navier_Stokes and Shear_Stress_Rate");
} else { // For fluid
*isErrorH = false;
hipMemcpy(isErrorD, isErrorH, sizeof(bool), hipMemcpyHostToDevice);
// Find the index which is related to the wall boundary particle
thrust::device_vector<uint> indexOfIndex(numObjectsH->numAllMarkers);
thrust::device_vector<uint> identityOfIndex(numObjectsH->numAllMarkers);
hipLaunchKernelGGL(( calIndexOfIndex), dim3(numBlocks), dim3(numThreads), 0, 0,
U1CAST(indexOfIndex), U1CAST(identityOfIndex), U1CAST(markersProximityD->gridMarkerIndexD));
thrust::remove_if(indexOfIndex.begin(), indexOfIndex.end(),
identityOfIndex.begin(), thrust::identity<int>());
// execute the kernel
hipLaunchKernelGGL(( Navier_Stokes), dim3(numBlocks1), dim3(numThreads1), 0, 0,
U1CAST(indexOfIndex), mR4CAST(sortedDerivVelRho), mR3CAST(sortedXSPHandShift),
mR4CAST(sortedSphMarkersD->posRadD), mR3CAST(sortedSphMarkersD->velMasD),
mR4CAST(sortedSphMarkersD->rhoPresMuD), mR3CAST(bceWorker->velMas_ModifiedBCE),
mR4CAST(bceWorker->rhoPreMu_ModifiedBCE), U1CAST(markersProximityD->gridMarkerIndexD),
U1CAST(markersProximityD->cellStartD), U1CAST(markersProximityD->cellEndD), isErrorD);
ChUtilsDevice::Sync_CheckError(isErrorH, isErrorD, "Navier_Stokes");
}
// Launch a kernel to copy data from sorted arrays to original arrays.
// This is faster than using thrust::sort_by_key()
hipLaunchKernelGGL(( CopySortedToOriginal_D), dim3(numBlocks), dim3(numThreads), 0, 0,
mR4CAST(sortedDerivVelRho), mR3CAST(sortedDerivTauXxYyZz), mR3CAST(sortedDerivTauXyXzYz),
mR4CAST(fsiGeneralData->derivVelRhoD), mR3CAST(fsiGeneralData->derivTauXxYyZzD),
mR3CAST(fsiGeneralData->derivTauXyXzYzD), U1CAST(markersProximityD->gridMarkerIndexD),
U1CAST(fsiGeneralData->activityIdentifierD), U1CAST(markersProximityD->mapOriginalToSorted),
U1CAST(fsiGeneralData->freeSurfaceIdD), U1CAST(sortedFreeSurfaceId));
sortedDerivVelRho.clear();
sortedDerivTauXxYyZz.clear();
sortedDerivTauXyXzYz.clear();
sortedKernelSupport.clear();
sortedFreeSurfaceId.clear();
hipFree(isErrorD);
free(isErrorH);
}
//--------------------------------------------------------------------------------------------------------------------------------
void ChFsiForceExplicitSPH::CalculateXSPH_velocity() {
// Calculate vel_XSPH
if (vel_XSPH_Sorted_D.size() != numObjectsH->numAllMarkers) {
printf("vel_XSPH_Sorted_D.size() %zd numObjectsH->numAllMarkers %zd \n",
vel_XSPH_Sorted_D.size(), numObjectsH->numAllMarkers);
throw std::runtime_error(
"Error! size error vel_XSPH_Sorted_D Thrown from "
"CalculateXSPH_velocity!\n");
}
bool *isErrorH, *isErrorD;
isErrorH = (bool*)malloc(sizeof(bool));
hipMalloc((void**)&isErrorD, sizeof(bool));
*isErrorH = false;
hipMemcpy(isErrorD, isErrorH, sizeof(bool), hipMemcpyHostToDevice);
// thread per particle
uint numBlocks, numThreads;
computeGridSize((int)numObjectsH->numAllMarkers, 256, numBlocks, numThreads);
//------------------------------------------------------------------------
if (paramsH->elastic_SPH) {
// The XSPH vector already included in the shifting vector
hipLaunchKernelGGL(( CopySortedToOriginal_XSPH_D), dim3(numBlocks), dim3(numThreads), 0, 0,
mR3CAST(sortedXSPHandShift), mR3CAST(fsiGeneralData->vel_XSPH_D),
U1CAST(markersProximityD->gridMarkerIndexD),
U1CAST(fsiGeneralData->activityIdentifierD),
U1CAST(markersProximityD->mapOriginalToSorted));
} else {
uint numBlocks1, numThreads1;
computeGridSize((int)numObjectsH->numAllMarkers -
(int)numObjectsH->numBoundaryMarkers, 256, numBlocks1, numThreads1);
thrust::fill(vel_XSPH_Sorted_D.begin(), vel_XSPH_Sorted_D.end(), mR3(0.0));
// Find the index which is related to the wall boundary particle
thrust::device_vector<uint> indexOfIndex(numObjectsH->numAllMarkers);
thrust::device_vector<uint> identityOfIndex(numObjectsH->numAllMarkers);
hipLaunchKernelGGL(( calIndexOfIndex), dim3(numBlocks), dim3(numThreads), 0, 0,
U1CAST(indexOfIndex), U1CAST(identityOfIndex),
U1CAST(markersProximityD->gridMarkerIndexD));
thrust::remove_if(indexOfIndex.begin(), indexOfIndex.end(),
identityOfIndex.begin(), thrust::identity<int>());
// Execute the kernel
hipLaunchKernelGGL(( CalcVel_XSPH_D), dim3(numBlocks1), dim3(numThreads1), 0, 0,
U1CAST(indexOfIndex), mR3CAST(vel_XSPH_Sorted_D),
mR4CAST(sortedSphMarkersD->posRadD), mR3CAST(sortedSphMarkersD->velMasD),
mR4CAST(sortedSphMarkersD->rhoPresMuD), mR3CAST(sortedXSPHandShift),
U1CAST(markersProximityD->gridMarkerIndexD), U1CAST(markersProximityD->cellStartD),
U1CAST(markersProximityD->cellEndD), isErrorD);
ChUtilsDevice::Sync_CheckError(isErrorH, isErrorD, "CalcVel_XSPH_D");
hipLaunchKernelGGL(( CopySortedToOriginal_XSPH_D), dim3(numBlocks), dim3(numThreads), 0, 0,
mR3CAST(vel_XSPH_Sorted_D), mR3CAST(fsiGeneralData->vel_XSPH_D),
U1CAST(markersProximityD->gridMarkerIndexD),
U1CAST(fsiGeneralData->activityIdentifierD),
U1CAST(markersProximityD->mapOriginalToSorted));
}
if (density_initialization % paramsH->densityReinit == 0)
CopySortedToOriginal_NonInvasive_R4(sphMarkersD->rhoPresMuD,
sortedSphMarkersD->rhoPresMuD, markersProximityD->gridMarkerIndexD);
hipFree(isErrorD);
free(isErrorH);
}
//--------------------------------------------------------------------------------------------------------------------------------
void ChFsiForceExplicitSPH::AddGravityToFluid() {
// add gravity to fluid markers
/* Add outside forces. Don't add gravity to rigids, BCE, and boundaries, it is
* added in ChSystem */
Real3 totalFluidBodyForce3 = paramsH->bodyForce3 + paramsH->gravity;
thrust::device_vector<Real4> bodyForceD(numObjectsH->numAllMarkers);
thrust::fill(bodyForceD.begin(), bodyForceD.end(), mR4(totalFluidBodyForce3));
thrust::transform(
fsiGeneralData->derivVelRhoD.begin() + fsiGeneralData->referenceArray[0].x,
fsiGeneralData->derivVelRhoD.begin() + fsiGeneralData->referenceArray[0].y, bodyForceD.begin(),
fsiGeneralData->derivVelRhoD.begin() + fsiGeneralData->referenceArray[0].x, thrust::plus<Real4>());
bodyForceD.clear();
}
} // namespace fsi
} // namespace chrono
//================================================================================================================================
| 70250ec54c3ca4ce3a0a0674dba789e0840282e8.cu | // =============================================================================
// PROJECT CHRONO - http://projectchrono.org
//
// Copyright (c) 2014 projectchrono.org
// All rights reserved.
//
// Use of this source code is governed by a BSD-style license that can be found
// in the LICENSE file at the top level of the distribution and at
// http://projectchrono.org/license-chrono.txt.
//
// =============================================================================
// Author: Arman Pazouki, Wei Hu
// =============================================================================
#include <thrust/extrema.h>
#include <thrust/sort.h>
#include "chrono_fsi/physics/ChFsiForceExplicitSPH.cuh"
#include "chrono_fsi/physics/ChSphGeneral.cuh"
//================================================================================================================================
namespace chrono {
namespace fsi {
//--------------------------------------------------------------------------------------------------------------------------------
__device__ __inline__ void calc_G_Matrix(Real4* sortedPosRad,
Real3* sortedVelMas,
Real4* sortedRhoPreMu,
Real* G_i,
uint* cellStart,
uint* cellEnd,
uint* indexOfIndex) {
uint id = blockIdx.x * blockDim.x + threadIdx.x;
if (id >= numObjectsD.numAllMarkers - numObjectsD.numBoundaryMarkers)
return;
uint index = indexOfIndex[id];
Real3 posRadA = mR3(sortedPosRad[index]);
Real h_i = sortedPosRad[index].w;
Real SuppRadii = RESOLUTION_LENGTH_MULT * paramsD.HSML;
Real SqRadii = SuppRadii * SuppRadii;
// get address in grid
int3 gridPos = calcGridPos(posRadA);
// This is the elements of inverse of G
Real mGi[9] = {0.0};
// examine neighbouring cells
for (int z = -1; z <= 1; z++)
for (int y = -1; y <= 1; y++)
for (int x = -1; x <= 1; x++) {
int3 neighbourPos = gridPos + mI3(x, y, z);
uint gridHash = calcGridHash(neighbourPos);
// get start of bucket for this cell50
uint startIndex = cellStart[gridHash];
if (startIndex != 0xffffffff) { // cell is not empty
uint endIndex = cellEnd[gridHash];
for (uint j = startIndex; j < endIndex; j++) {
Real3 posRadB = mR3(sortedPosRad[j]);
Real3 rij = Distance(posRadA, posRadB);
Real dd = rij.x * rij.x + rij.y * rij.y + rij.z * rij.z;
if (dd > SqRadii || sortedRhoPreMu[j].w < -1.5)
continue;
Real3 grad_i_wij = GradWh(rij, h_i);
Real3 grw_vj = grad_i_wij * paramsD.volume0;
mGi[0] -= rij.x * grw_vj.x;
mGi[1] -= rij.x * grw_vj.y;
mGi[2] -= rij.x * grw_vj.z;
mGi[3] -= rij.y * grw_vj.x;
mGi[4] -= rij.y * grw_vj.y;
mGi[5] -= rij.y * grw_vj.z;
mGi[6] -= rij.z * grw_vj.x;
mGi[7] -= rij.z * grw_vj.y;
mGi[8] -= rij.z * grw_vj.z;
}
}
}
Real Det = (mGi[0] * mGi[4] * mGi[8] - mGi[0] * mGi[5] * mGi[7] -
mGi[1] * mGi[3] * mGi[8] + mGi[1] * mGi[5] * mGi[6] +
mGi[2] * mGi[3] * mGi[7] - mGi[2] * mGi[4] * mGi[6]);
if (abs(Det) > 0.01) {
Real OneOverDet = 1.0 / Det;
G_i[0] = (mGi[4] * mGi[8] - mGi[5] * mGi[7]) * OneOverDet;
G_i[1] = -(mGi[1] * mGi[8] - mGi[2] * mGi[7]) * OneOverDet;
G_i[2] = (mGi[1] * mGi[5] - mGi[2] * mGi[4]) * OneOverDet;
G_i[3] = -(mGi[3] * mGi[8] - mGi[5] * mGi[6]) * OneOverDet;
G_i[4] = (mGi[0] * mGi[8] - mGi[2] * mGi[6]) * OneOverDet;
G_i[5] = -(mGi[0] * mGi[5] - mGi[2] * mGi[3]) * OneOverDet;
G_i[6] = (mGi[3] * mGi[7] - mGi[4] * mGi[6]) * OneOverDet;
G_i[7] = -(mGi[0] * mGi[7] - mGi[1] * mGi[6]) * OneOverDet;
G_i[8] = (mGi[0] * mGi[4] - mGi[1] * mGi[3]) * OneOverDet;
} else {
for (int i = 0; i < 9; i++) {
G_i[i] = 0.0;
}
G_i[0] = 1;
G_i[4] = 1;
G_i[8] = 1;
}
}
//--------------------------------------------------------------------------------------------------------------------------------
__device__ __inline__ void calc_A_Matrix(Real4* sortedPosRad,
Real3* sortedVelMas,
Real4* sortedRhoPreMu,
Real* A_i,
Real* G_i,
uint* cellStart,
uint* cellEnd,
uint* indexOfIndex) {
uint id = blockIdx.x * blockDim.x + threadIdx.x;
if (id >= numObjectsD.numAllMarkers - numObjectsD.numBoundaryMarkers)
return;
uint index = indexOfIndex[id];
Real3 posRadA = mR3(sortedPosRad[index]);
Real h_i = sortedPosRad[index].w;
Real SuppRadii = RESOLUTION_LENGTH_MULT * paramsD.HSML;
Real SqRadii = SuppRadii * SuppRadii;
// get address in grid
int3 gridPos = calcGridPos(posRadA);
// examine neighbouring cells
for (int z = -1; z <= 1; z++)
for (int y = -1; y <= 1; y++)
for (int x = -1; x <= 1; x++) {
int3 neighbourPos = gridPos + mI3(x, y, z);
uint gridHash = calcGridHash(neighbourPos);
// get start of bucket for this cell50
uint startIndex = cellStart[gridHash];
if (startIndex != 0xffffffff) { // cell is not empty
uint endIndex = cellEnd[gridHash];
for (uint j = startIndex; j < endIndex; j++) {
Real3 posRadB = mR3(sortedPosRad[j]);
Real3 rij = Distance(posRadA, posRadB);
Real dd = rij.x * rij.x + rij.y * rij.y + rij.z * rij.z;
if (dd > SqRadii || sortedRhoPreMu[j].w < -1.5)
continue;
Real h_j = sortedPosRad[j].w;
Real h_ij = 0.5 * (h_j + h_i);
Real3 grad_ij = GradWh(rij, h_ij);
Real V_j = paramsD.markerMass / paramsD.rho0;
Real com_part = 0;
com_part = (G_i[0] * grad_ij.x +
G_i[1] * grad_ij.y + G_i[2] * grad_ij.z) * V_j;
A_i[0] += rij.x * rij.x * com_part; // 111
A_i[1] += rij.x * rij.y * com_part; // 112
A_i[2] += rij.x * rij.z * com_part; // 113
A_i[3] += rij.y * rij.x * com_part; // 121
A_i[4] += rij.y * rij.y * com_part; // 122
A_i[5] += rij.y * rij.z * com_part; // 123
A_i[6] += rij.z * rij.x * com_part; // 131
A_i[7] += rij.z * rij.y * com_part; // 132
A_i[8] += rij.z * rij.z * com_part; // 133
com_part = (G_i[3] * grad_ij.x +
G_i[4] * grad_ij.y + G_i[5] * grad_ij.z) * V_j;
A_i[9] += rij.x * rij.x * com_part; // 211
A_i[10] += rij.x * rij.y * com_part; // 212
A_i[11] += rij.x * rij.z * com_part; // 213
A_i[12] += rij.y * rij.x * com_part; // 221
A_i[13] += rij.y * rij.y * com_part; // 222
A_i[14] += rij.y * rij.z * com_part; // 223
A_i[15] += rij.z * rij.x * com_part; // 231
A_i[16] += rij.z * rij.y * com_part; // 232
A_i[17] += rij.z * rij.z * com_part; // 233
com_part = (G_i[6] * grad_ij.x +
G_i[7] * grad_ij.y + G_i[8] * grad_ij.z) * V_j;
A_i[18] += rij.x * rij.x * com_part; // 311
A_i[19] += rij.x * rij.y * com_part; // 312
A_i[20] += rij.x * rij.z * com_part; // 313
A_i[21] += rij.y * rij.x * com_part; // 321
A_i[22] += rij.y * rij.y * com_part; // 322
A_i[23] += rij.y * rij.z * com_part; // 323
A_i[24] += rij.z * rij.x * com_part; // 331
A_i[25] += rij.z * rij.y * com_part; // 332
A_i[26] += rij.z * rij.z * com_part; // 333
}
}
}
}
//--------------------------------------------------------------------------------------------------------------------------------
__device__ __inline__ void calc_L_Matrix(Real4* sortedPosRad,
Real3* sortedVelMas,
Real4* sortedRhoPreMu,
Real* A_i,
Real* L_i,
Real* G_i,
uint* cellStart,
uint* cellEnd,
uint* indexOfIndex) {
uint id = blockIdx.x * blockDim.x + threadIdx.x;
if (id >= numObjectsD.numAllMarkers - numObjectsD.numBoundaryMarkers)
return;
uint index = indexOfIndex[id];
Real3 posRadA = mR3(sortedPosRad[index]);
Real h_i = sortedPosRad[index].w;
Real SuppRadii = RESOLUTION_LENGTH_MULT * paramsD.HSML;
Real SqRadii = SuppRadii * SuppRadii;
Real B[36] = {0.0};
Real L[6] = {0.0};
// get address in grid
int3 gridPos = calcGridPos(posRadA);
// examine neighbouring cells
for (int z = -1; z <= 1; z++)
for (int y = -1; y <= 1; y++)
for (int x = -1; x <= 1; x++) {
int3 neighbourPos = gridPos + mI3(x, y, z);
uint gridHash = calcGridHash(neighbourPos);
// get start of bucket for this cell50
uint startIndex = cellStart[gridHash];
if (startIndex != 0xffffffff) { // cell is not empty
uint endIndex = cellEnd[gridHash];
for (uint j = startIndex; j < endIndex; j++) {
Real3 posRadB = mR3(sortedPosRad[j]);
Real3 rij = Distance(posRadA, posRadB);
Real dd = rij.x * rij.x + rij.y * rij.y + rij.z * rij.z;
if (dd > SqRadii || sortedRhoPreMu[j].w < -1.5)
continue;
Real d = length(rij);
Real3 eij = rij / d;
Real h_j = sortedPosRad[j].w;
// Real m_j = paramsD.markerMass;
Real h_ij = 0.5 * (h_j + h_i);
Real3 grad_ij = GradWh(rij, h_ij);
Real V_j = paramsD.markerMass / paramsD.rho0;
Real com_part = 0;
// mn=11
Real XX = (eij.x * grad_ij.x);
Real XY = (eij.x * grad_ij.y + eij.y * grad_ij.x);
Real XZ = (eij.x * grad_ij.z + eij.z * grad_ij.x);
Real YY = (eij.y * grad_ij.y);
Real YZ = (eij.y * grad_ij.z + eij.z * grad_ij.y);
Real ZZ = (eij.z * grad_ij.z);
com_part = (A_i[0] * eij.x + A_i[9] * eij.y +
A_i[18] * eij.z + rij.x * eij.x) * V_j;
B[6 * 0 + 0] += com_part * XX; // 11
B[6 * 0 + 1] += com_part * XY; // 12
B[6 * 0 + 2] += com_part * XZ; // 13
B[6 * 0 + 3] += com_part * YY; // 14
B[6 * 0 + 4] += com_part * YZ; // 15
B[6 * 0 + 5] += com_part * ZZ; // 15
// mn=12
com_part = (A_i[1] * eij.x + A_i[10] * eij.y +
A_i[19] * eij.z + rij.x * eij.y) * V_j;
B[6 * 1 + 0] += com_part * XX; // 21
B[6 * 1 + 1] += com_part * XY; // 22
B[6 * 1 + 2] += com_part * XZ; // 23
B[6 * 1 + 3] += com_part * YY; // 24
B[6 * 1 + 4] += com_part * YZ; // 25
B[6 * 1 + 5] += com_part * ZZ; // 25
// mn=13
com_part = (A_i[2] * eij.x + A_i[11] * eij.y +
A_i[20] * eij.z + rij.x * eij.z) * V_j;
B[6 * 2 + 0] += com_part * XX; // 31
B[6 * 2 + 1] += com_part * XY; // 32
B[6 * 2 + 2] += com_part * XZ; // 33
B[6 * 2 + 3] += com_part * YY; // 34
B[6 * 2 + 4] += com_part * YZ; // 35
B[6 * 2 + 5] += com_part * ZZ; // 36
// Note that we skip mn=21 since it is similar to mn=12
// mn=22
com_part = (A_i[4] * eij.x + A_i[13] * eij.y +
A_i[22] * eij.z + rij.y * eij.y) * V_j;
B[6 * 3 + 0] += com_part * XX; // 41
B[6 * 3 + 1] += com_part * XY; // 42
B[6 * 3 + 2] += com_part * XZ; // 43
B[6 * 3 + 3] += com_part * YY; // 44
B[6 * 3 + 4] += com_part * YZ; // 45
B[6 * 3 + 5] += com_part * ZZ; // 46
// mn=23
com_part = (A_i[5] * eij.x + A_i[14] * eij.y +
A_i[23] * eij.z + rij.y * eij.z) * V_j;
B[6 * 4 + 0] += com_part * XX; // 51
B[6 * 4 + 1] += com_part * XY; // 52
B[6 * 4 + 2] += com_part * XZ; // 53
B[6 * 4 + 3] += com_part * YY; // 54
B[6 * 4 + 4] += com_part * YZ; // 55
B[6 * 4 + 5] += com_part * ZZ; // 56
// mn=33
com_part = (A_i[8] * eij.x + A_i[17] * eij.y +
A_i[26] * eij.z + rij.z * eij.z) * V_j;
B[6 * 5 + 0] += com_part * XX; // 61
B[6 * 5 + 1] += com_part * XY; // 62
B[6 * 5 + 2] += com_part * XZ; // 63
B[6 * 5 + 3] += com_part * YY; // 64
B[6 * 5 + 4] += com_part * YZ; // 65
B[6 * 5 + 5] += com_part * ZZ; // 66
}
}
}
inv6xdelta_mn(B, L);
L_i[0] = L[0];
L_i[1] = L[1];
L_i[2] = L[2];
L_i[3] = L[1];
L_i[4] = L[3];
L_i[5] = L[4];
L_i[6] = L[2];
L_i[7] = L[4];
L_i[8] = L[5];
// Real Det = (L_i[0] * L_i[4] * L_i[8] - L_i[0] * L_i[5] * L_i[7] - L_i[1] * L_i[3] * L_i[8] +
// L_i[1] * L_i[5] * L_i[6] + L_i[2] * L_i[3] * L_i[7] - L_i[2] * L_i[4] * L_i[6]);
// if (abs(Det) < 0.01) {
// for (int i = 0; i < 9; i++) {
// L_i[0 * 9 + i] = 0.0;
// L_i[0 * 9 + 0] = 1;
// L_i[0 * 9 + 4] = 1;
// L_i[0 * 9 + 8] = 1;
// }
// }
// printf("L Det %f\n", Det);
}
//--------------------------------------------------------------------------------------------------------------------------------
__global__ void calIndexOfIndex(uint* indexOfIndex,
uint* identityOfIndex,
uint* gridMarkerIndex) {
uint id = blockIdx.x * blockDim.x + threadIdx.x;
if (id >= numObjectsD.numAllMarkers)
return;
indexOfIndex[id] = id;
if (gridMarkerIndex[id] >= numObjectsD.numFluidMarkers &&
gridMarkerIndex[id] < numObjectsD.numFluidMarkers + numObjectsD.numBoundaryMarkers) {
identityOfIndex[id] = 1;
} else {
identityOfIndex[id] = 0;
}
}
//--------------------------------------------------------------------------------------------------------------------------------
__global__ void Shear_Stress_Rate(uint* indexOfIndex,
Real4* sortedPosRad,
Real4* sortedRhoPreMu,
Real3* sortedVelMas,
Real3* velMas_ModifiedBCE,
Real4* rhoPreMu_ModifiedBCE,
Real3* sortedTauXxYyZz,
Real3* sortedTauXyXzYz,
Real3* sortedDerivTauXxYyZz,
Real3* sortedDerivTauXyXzYz,
uint* gridMarkerIndex,
uint* cellStart,
uint* cellEnd) {
uint id = blockIdx.x * blockDim.x + threadIdx.x;
if (id >= numObjectsD.numAllMarkers - numObjectsD.numBoundaryMarkers)
return;
uint index = indexOfIndex[id];
if (sortedRhoPreMu[index].w > -0.5)
return;
Real3 posRadA = mR3(sortedPosRad[index]);
Real3 velMasA = sortedVelMas[index];
Real hA = sortedPosRad[index].w;
Real SuppRadii = RESOLUTION_LENGTH_MULT * paramsD.HSML;
Real SqRadii = SuppRadii * SuppRadii;
Real tauxx = sortedTauXxYyZz[index].x;
Real tauyy = sortedTauXxYyZz[index].y;
Real tauzz = sortedTauXxYyZz[index].z;
Real tauxy = sortedTauXyXzYz[index].x;
Real tauxz = sortedTauXyXzYz[index].y;
Real tauyz = sortedTauXyXzYz[index].z;
Real tauzx = tauxz;
Real tauzy = tauyz;
Real tauyx = tauxy;
Real dTauxx = 0.0;
Real dTauyy = 0.0;
Real dTauzz = 0.0;
Real dTauxy = 0.0;
Real dTauxz = 0.0;
Real dTauyz = 0.0;
Real G_i[9] = {0.0};
calc_G_Matrix(sortedPosRad, sortedVelMas, sortedRhoPreMu, G_i, cellStart,
cellEnd, indexOfIndex);
// get address in grid
int3 gridPos = calcGridPos(posRadA);
for (int z = -1; z <= 1; z++) {
for (int y = -1; y <= 1; y++) {
for (int x = -1; x <= 1; x++) {
int3 neighbourPos = gridPos + mI3(x, y, z);
uint gridHash = calcGridHash(neighbourPos);
uint startIndex = cellStart[gridHash];
uint endIndex = cellEnd[gridHash];
for (uint j = startIndex; j < endIndex; j++) {
if (j != index) {
Real3 posRadB = mR3(sortedPosRad[j]);
Real3 dist3 = Distance(posRadA, posRadB);
Real dd = dist3.x * dist3.x + dist3.y * dist3.y + dist3.z * dist3.z;
if (dd > SqRadii)
continue;
Real3 velMasB = sortedVelMas[j];
Real4 rhoPresMuB = sortedRhoPreMu[j];
if (rhoPresMuB.w > -0.5) {
int bceIndexB = gridMarkerIndex[j] - numObjectsD.numFluidMarkers;
if (!(bceIndexB >= 0 && bceIndexB <
numObjectsD.numBoundaryMarkers + numObjectsD.numRigidMarkers + numObjectsD.numFlexMarkers)) {
printf("Error! bceIndex out of bound, collideCell !\n");
}
rhoPresMuB = rhoPreMu_ModifiedBCE[bceIndexB];
velMasB = velMas_ModifiedBCE[bceIndexB];
velMasB = 2.0 * velMasB - velMasA;
}
Real rhoB = rhoPresMuB.x;
Real mB = paramsD.markerMass;
Real3 gradW = GradWh(dist3, hA);
Real3 gradW_new;
gradW_new.x = G_i[0] * gradW.x + G_i[1] * gradW.y + G_i[2] * gradW.z;
gradW_new.y = G_i[3] * gradW.x + G_i[4] * gradW.y + G_i[5] * gradW.z;
gradW_new.z = G_i[6] * gradW.x + G_i[7] * gradW.y + G_i[8] * gradW.z;
gradW = gradW_new;
// start to calculate the rate
Real Gm = paramsD.G_shear; // shear modulus of the material
Real half_mB_over_rhoB = 0.5 * (mB / rhoB);
Real3 vAB = velMasA - velMasB;
Real3 vAB_h = (velMasA - velMasB) * half_mB_over_rhoB;
// entries of strain rate tensor
Real exx = -2.0 * vAB_h.x * gradW.x;
Real eyy = -2.0 * vAB_h.y * gradW.y;
Real ezz = -2.0 * vAB_h.z * gradW.z;
Real exy = -vAB_h.x * gradW.y - vAB_h.y * gradW.x;
Real exz = -vAB_h.x * gradW.z - vAB_h.z * gradW.x;
Real eyz = -vAB_h.y * gradW.z - vAB_h.z * gradW.y;
// entries of rotation rate (spin) tensor
// Real wxx = 0.0;
// Real wyy = 0.0;
// Real wzz = 0.0;
Real wxy = -vAB_h.x * gradW.y + vAB_h.y * gradW.x;
Real wxz = -vAB_h.x * gradW.z + vAB_h.z * gradW.x;
Real wyz = -vAB_h.y * gradW.z + vAB_h.z * gradW.y;
Real wyx = -wxy;
// Real wzx = -wxz;
Real wzy = -wyz;
Real edia = 1.0 / 3.0 * (exx + eyy + ezz);
Real twoGm = 2.0 * Gm;
Real K_edia = paramsD.K_bulk * 1.0 * edia;
dTauxx += twoGm * (exx - edia) + 2.0 * (tauxy * wxy + tauxz * wxz) + K_edia;
dTauyy += twoGm * (eyy - edia) - 2.0 * (tauyx * wxy - tauyz * wyz) + K_edia;
dTauzz += twoGm * (ezz - edia) - 2.0 * (tauzx * wxz + tauzy * wyz) + K_edia;
dTauxy += twoGm * exy - (tauxx * wxy + tauxz * wzy) + (wxy * tauyy + wxz * tauzy);
dTauxz += twoGm * exz - (tauxx * wxz + tauxy * wyz) + (wxy * tauyz + wxz * tauzz);
dTauyz += twoGm * eyz - (tauyx * wxz + tauyy * wyz) + (wyx * tauxz + wyz * tauzz);
}
}
}
}
}
sortedDerivTauXxYyZz[index] = mR3(dTauxx, dTauyy, dTauzz);
sortedDerivTauXyXzYz[index] = mR3(dTauxy, dTauxz, dTauyz);
}
//--------------------------------------------------------------------------------------------------------------------------------
__global__ void calcRho_kernel(Real4* sortedPosRad,
Real4* sortedRhoPreMu,
Real4* sortedRhoPreMu_old,
uint* cellStart,
uint* cellEnd,
int density_reinit,
volatile bool* isErrorD) {
uint index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= numObjectsD.numAllMarkers)
return;
if (sortedRhoPreMu[index].w > -0.5 && sortedRhoPreMu[index].w < 0.5)
return;
sortedRhoPreMu_old[index].y =
Eos(sortedRhoPreMu_old[index].x, sortedRhoPreMu_old[index].w);
Real3 posRadA = mR3(sortedPosRad[index]);
Real h_i = sortedPosRad[index].w;
Real SuppRadii = RESOLUTION_LENGTH_MULT * paramsD.HSML;
Real SqRadii = SuppRadii * SuppRadii;
Real sum_mW = 0;
Real sum_mW_rho = 0.0000001;
Real sum_W = 0.0;
// get address in grid
int3 gridPos = calcGridPos(posRadA);
for (int z = -1; z <= 1; z++) {
for (int y = -1; y <= 1; y++) {
for (int x = -1; x <= 1; x++) {
int3 neighbourPos = gridPos + mI3(x, y, z);
uint gridHash = calcGridHash(neighbourPos);
uint startIndex = cellStart[gridHash];
if (startIndex != 0xffffffff) {
uint endIndex = cellEnd[gridHash];
for (uint j = startIndex; j < endIndex; j++) {
Real3 posRadB = mR3(sortedPosRad[j]);
Real3 dist3 = Distance(posRadA, posRadB);
Real dd = dist3.x * dist3.x + dist3.y * dist3.y + dist3.z * dist3.z;
if (dd > SqRadii)
continue;
if (sortedRhoPreMu_old[j].w > -1.5 && sortedRhoPreMu_old[j].w < -0.5) {
Real h_j = sortedPosRad[j].w;
Real m_j = paramsD.markerMass;
Real d = length(dist3);
Real W3 = W3h(d, 0.5 * (h_j + h_i));
sum_mW += m_j * W3;
sum_W += W3;
sum_mW_rho += m_j * W3 / sortedRhoPreMu_old[j].x;
}
}
}
}
}
}
// sortedRhoPreMu[index].x = sum_mW;
if ((density_reinit == 0) &&
(sortedRhoPreMu[index].w > -1.5) && (sortedRhoPreMu[index].w < -0.5))
sortedRhoPreMu[index].x = sum_mW / sum_mW_rho;
if ((sortedRhoPreMu[index].x > 3 * paramsD.rho0 ||
sortedRhoPreMu[index].x < 0.01 * paramsD.rho0) &&
(sortedRhoPreMu[index].w > -1.5) && (sortedRhoPreMu[index].w < -0.5))
printf("(calcRho_kernel)density marker %d, sum_mW=%f, sum_W=%f, h_i=%f\n",
index, sum_mW, sum_W, h_i);
}
//--------------------------------------------------------------------------------------------------------------------------------
__global__ void calcKernelSupport(Real4* sortedPosRad,
Real4* sortedRhoPreMu,
Real3* sortedKernelSupport,
uint* cellStart,
uint* cellEnd,
volatile bool* isErrorD) {
uint index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= numObjectsD.numAllMarkers)
return;
Real h_i = sortedPosRad[index].w;
Real SuppRadii = RESOLUTION_LENGTH_MULT * paramsD.HSML;
Real SqRadii = SuppRadii * SuppRadii;
Real3 posRadA = mR3(sortedPosRad[index]);
Real W0 = W3h(0, h_i);
Real sum_W_all = W0;
Real sum_W_identical = W0;
// get address in grid
int3 gridPos = calcGridPos(posRadA);
for (int z = -1; z <= 1; z++) {
for (int y = -1; y <= 1; y++) {
for (int x = -1; x <= 1; x++) {
int3 neighbourPos = gridPos + mI3(x, y, z);
uint gridHash = calcGridHash(neighbourPos);
uint startIndex = cellStart[gridHash];
if (startIndex != 0xffffffff) {
uint endIndex = cellEnd[gridHash];
for (uint j = startIndex; j < endIndex; j++) {
Real3 posRadB = mR3(sortedPosRad[j]);
Real3 dist3 = Distance(posRadA, posRadB);
Real dd = dist3.x*dist3.x + dist3.y*dist3.y + dist3.z*dist3.z;
if (dd > SqRadii)
continue;
Real d = length(dist3);
Real h_j = sortedPosRad[j].w;
Real W3 = W3h(d, 0.5 * (h_j + h_i));
sum_W_all += W3;
if (abs(sortedRhoPreMu[index].w - sortedRhoPreMu[j].w) < 0.001) {
sum_W_identical += W3;
}
}
}
}
}
}
sortedKernelSupport[index].x = sum_W_all;
sortedKernelSupport[index].y = sum_W_identical;
}
//--------------------------------------------------------------------------------------------------------------------------------
__device__ __inline__ void modifyPressure(Real4& rhoPresMuB, const Real3& dist3Alpha) {
// body force in x direction
rhoPresMuB.y = (dist3Alpha.x > 0.5 * paramsD.boxDims.x) ?
(rhoPresMuB.y - paramsD.deltaPress.x) : rhoPresMuB.y;
rhoPresMuB.y = (dist3Alpha.x < -0.5 * paramsD.boxDims.x) ?
(rhoPresMuB.y + paramsD.deltaPress.x) : rhoPresMuB.y;
// body force in x direction
rhoPresMuB.y = (dist3Alpha.y > 0.5 * paramsD.boxDims.y) ?
(rhoPresMuB.y - paramsD.deltaPress.y) : rhoPresMuB.y;
rhoPresMuB.y = (dist3Alpha.y < -0.5 * paramsD.boxDims.y) ?
(rhoPresMuB.y + paramsD.deltaPress.y) : rhoPresMuB.y;
// body force in x direction
rhoPresMuB.y = (dist3Alpha.z > 0.5 * paramsD.boxDims.z) ?
(rhoPresMuB.y - paramsD.deltaPress.z) : rhoPresMuB.y;
rhoPresMuB.y = (dist3Alpha.z < -0.5 * paramsD.boxDims.z) ?
(rhoPresMuB.y + paramsD.deltaPress.z) : rhoPresMuB.y;
}
//--------------------------------------------------------------------------------------------------------------------------------
__device__ inline Real3 CubicSolve(Real aa, Real bb, Real cc, Real dd) {
Real disc, q, r, dum1, dum2, term1, r13;
bb /= aa;
cc /= aa;
dd /= aa;
if (aa == 0) {
return mR3(0, 0, 0);
}
if (abs(bb) < 1e-9) {
return mR3(0, 0, 0);
}
if (abs(cc) < 1e-9) {
return mR3(0, 0, 0);
}
if (abs(dd) < 1e-9) {
return mR3(0, 0, 0);
}
q = (3.0 * cc - (bb * bb)) / 9.0;
r = -(27.0 * dd) + bb * (9.0 * cc - 2.0 * (bb * bb));
r /= 54.0;
disc = q * q * q + r * r;
term1 = (bb / 3.0);
/* dataForm.x1Im.value = 0; //The first root is always real.
if (disc > 0) { // one root real, two are complex
s = r + Math.sqrt(disc);
s = ((s < 0) ? -Math.pow(-s, (1.0/3.0)) : Math.pow(s, (1.0/3.0)));
t = r - Math.sqrt(disc);
t = ((t < 0) ? -Math.pow(-t, (1.0/3.0)) : Math.pow(t, (1.0/3.0)));
dataForm.x1Re.value = -term1 + s + t;
term1 += (s + t)/2.0;
dataForm.x3Re.value = dataForm.x2Re.value = -term1;
term1 = Math.sqrt(3.0)*(-t + s)/2;
dataForm.x2Im.value = term1;
dataForm.x3Im.value = -term1;
return;
}
// End if (disc > 0)
// The remaining options are all real
dataForm.x3Im.value = dataForm.x2Im.value = 0;
if (disc == 0){ // All roots real, at least two are equal.
r13 = ((r < 0) ? -Math.pow(-r,(1.0/3.0)) : Math.pow(r,(1.0/3.0)));
dataForm.x1Re.value = -term1 + 2.0*r13;
dataForm.x3Re.value = dataForm.x2Re.value = -(r13 + term1);
return;
} // End if (disc == 0)
*/
Real xRex, xRey, xRez;
// have complex root
if (disc > 0) {
xRex = 0.0;
xRey = 0.0;
xRez = 0.0;
return mR3(xRex, xRey, xRez);
}
// All roots real, at least two are equal.
if (disc == 0) {
if (r < 0) {
r13 = pow(-r, (1.0 / 3.0));
} else {
r13 = pow(r, (1.0 / 3.0));
}
xRex = -term1 + 2.0 * r13;
xRey = -(r13 + term1);
xRez = xRey;
return mR3(xRex, xRey, xRez);
}
// All roots are real and unequal (to get here, q < 0)
q = -q;
dum1 = q * q * q;
dum2 = r / (sqrt(dum1 + 1.0e-9));
if ((dum2 >= 0) && (dum2 <= 1)) {
dum1 = acos(dum2);
} else {
xRex = 0.0;
xRey = 0.0;
xRez = 0.0;
return mR3(xRex, xRey, xRez);
}
r13 = 2.0 * sqrt(q);
xRex = -term1 + r13 * cos(dum1 / 3.0);
xRey = -term1 + r13 * cos((dum1 + 2.0 * 3.1415926) / 3.0);
xRez = -term1 + r13 * cos((dum1 + 4.0 * 3.1415926) / 3.0);
return mR3(xRex, xRey, xRez);
}
//--------------------------------------------------------------------------------------------------------------------------------
__device__ inline Real3 CubicEigen(Real4 c1, Real4 c2, Real4 c3) {
Real a = c1.x;
Real b = c1.y;
Real c = c1.z;
Real d = c1.w;
Real l = c2.x;
Real m = c2.y;
Real n = c2.z;
Real k = c2.w;
Real p = c3.x;
Real q = c3.y;
Real r = c3.z;
Real s = c3.w;
Real D = (a * m * r + b * p * n + c * l * q) - (a * n * q + b * l * r + c * m * p) + 1.0e-9;
Real x = ((b * r * k + c * m * s + d * n * q) - (b * n * s + c * q * k + d * m * r)) / D;
Real y = ((a * n * s + c * p * k + d * l * r) - (a * r * k + c * l * s + d * n * p)) / D;
Real z = ((a * q * k + b * l * s + d * m * p) - (a * m * s + b * p * k + d * l * q)) / D;
b = b + 1.0e-9;
x = 1.0e0;
z = (-l + a * m / b) / (n - c * m / b);
y = (-a - c * z) / b;
Real R = sqrt(x * x + y * y + z * z);
x = x / R;
y = y / R;
z = z / R;
// if(abs(D) < 1){
// return mR3(0,0,0);
// }
// if(abs(m) < 0.1){
// x=0;
// y=1;
// z=0;
// return mR3(x,y,z);
// }
// else{
// y=0;
// if(abs(c) > 0.1){
// x=1;
// z=-a/c;
// return mR3(x,y,z);
// }
// if(abs(a) > 0.1){
// z=1;
// x=-c/a;
// return mR3(x,y,z);
// }
// }
return mR3(x, y, z);
}
//--------------------------------------------------------------------------------------------------------------------------------
__device__ inline Real4 DifVelocityRho(float G_i[9],
Real3 dist3,
Real d,
Real4 posRadA,
Real4 posRadB,
Real3 velMasA,
Real3 velMasB,
Real4 rhoPresMuA,
Real4 rhoPresMuB,
Real multViscosity) {
if (rhoPresMuA.w > -0.5 && rhoPresMuB.w > -0.5)
return mR4(0.0);
Real3 gradW = GradWh(dist3, (posRadA.w + posRadB.w) * 0.5);
// Continuty equation
Real derivRho = paramsD.markerMass * dot(velMasA - velMasB, gradW);
// Viscosity
Real rAB_Dot_GradWh = dot(dist3, gradW);
Real rAB_Dot_GradWh_OverDist =
rAB_Dot_GradWh / (d * d + paramsD.epsMinMarkersDis * paramsD.HSML * paramsD.HSML);
Real3 derivV = -paramsD.markerMass * (rhoPresMuA.y / (rhoPresMuA.x * rhoPresMuA.x) +
rhoPresMuB.y / (rhoPresMuB.x * rhoPresMuB.x)) * gradW +
paramsD.markerMass * (8.0f * multViscosity) * paramsD.mu0 *
rAB_Dot_GradWh_OverDist * (velMasA - velMasB) / square(rhoPresMuA.x + rhoPresMuB.x);
// Artificial viscosity
Real vAB_Dot_rAB = dot(velMasA - velMasB, dist3);
// change to 1==1 if needs artificial viscosity
if ((vAB_Dot_rAB < 0.0) && (1 == 1)) {
Real alpha = paramsD.Ar_vis_alpha;
Real c_ab = paramsD.Cs;
Real rho = 0.5f * (rhoPresMuA.x * rhoPresMuB.x);
Real nu = -alpha * paramsD.HSML * c_ab / rho;
Real derivM1 = -paramsD.markerMass * (nu * vAB_Dot_rAB / (d * d +
paramsD.epsMinMarkersDis * paramsD.HSML * paramsD.HSML));
derivV.x += derivM1 * gradW.x;
derivV.y += derivM1 * gradW.y;
derivV.z += derivM1 * gradW.z;
}
return mR4(derivV, derivRho);
}
//--------------------------------------------------------------------------------------------------------------------------------
__device__ inline Real4 DifVelocityRho_ElasticSPH(Real W_ini_inv,
Real W_AB,
Real3 gradW,
Real3 dist3,
Real d,
Real invd,
Real4 posRadA,
Real4 posRadB,
Real3 velMasA_in,
Real3 velMasB_in,
Real4 rhoPresMuA,
Real4 rhoPresMuB,
Real3 tauXxYyZz_A_in,
Real3 tauXyXzYz_A_in,
Real3 tauXxYyZz_B_in,
Real3 tauXyXzYz_B_in) {
if (rhoPresMuA.w > -0.5 && rhoPresMuB.w > -0.5)
return mR4(0.0);
Real3 velMasA = velMasA_in;
Real3 velMasB = velMasB_in;
Real3 tauXxYyZz_A = tauXxYyZz_A_in;
Real3 tauXxYyZz_B = tauXxYyZz_B_in;
Real3 tauXyXzYz_A = tauXyXzYz_A_in;
Real3 tauXyXzYz_B = tauXyXzYz_B_in;
/*if (rhoPresMuA.w < -0.5 && rhoPresMuB.w > -0.5) {
tauXxYyZz_B = tauXxYyZz_A;
tauXyXzYz_B = tauXyXzYz_A;
// velMasB = 2.0*velMasB - velMasA; // noslip BC
}
if (rhoPresMuA.w > -0.5 && rhoPresMuB.w < -0.5) {
tauXxYyZz_A = tauXxYyZz_B;
tauXyXzYz_A = tauXyXzYz_B;
// velMasA = 2.0*velMasA - velMasB; // noslip BC
}*/
Real Mass = paramsD.markerMass;
Real MassOverRho = Mass * paramsD.invrho0 * paramsD.invrho0;
Real3 MA_gradW = gradW * MassOverRho;
Real derivVx = (tauXxYyZz_A.x + tauXxYyZz_B.x) * MA_gradW.x +
(tauXyXzYz_A.x + tauXyXzYz_B.x) * MA_gradW.y +
(tauXyXzYz_A.y + tauXyXzYz_B.y) * MA_gradW.z;
Real derivVy = (tauXyXzYz_A.x + tauXyXzYz_B.x) * MA_gradW.x +
(tauXxYyZz_A.y + tauXxYyZz_B.y) * MA_gradW.y +
(tauXyXzYz_A.z + tauXyXzYz_B.z) * MA_gradW.z;
Real derivVz = (tauXyXzYz_A.y + tauXyXzYz_B.y) * MA_gradW.x +
(tauXyXzYz_A.z + tauXyXzYz_B.z) * MA_gradW.y +
(tauXxYyZz_A.z + tauXxYyZz_B.z) * MA_gradW.z;
// TODO: Visco-plastic model
// Real vel = length(velMasA);
// if(vel > 0.3){
// Real rAB_Dot_GradWh = dot(dist3, gradW);
// Real rAB_Dot_GradWh_OverDist = rAB_Dot_GradWh / (d * d + paramsD.epsMinMarkersDis * paramsD.HSML *
// paramsD.HSML); Real3 derivV = - paramsD.markerMass *(rhoPresMuA.y / (rhoPresMuA.x * rhoPresMuA.x) +
// rhoPresMuB.y / (rhoPresMuB.x * rhoPresMuB.x)) * gradW
// + paramsD.markerMass * (8.0f * multViscosity) * paramsD.mu_fric_s
// * pow(rhoPresMuA.x + rhoPresMuB.x, Real(-2)) * rAB_Dot_GradWh_OverDist * (velMasA - velMasB);
// derivVx = derivV.x;
// derivVy = derivV.y;
// derivVz = derivV.z;
// }
// Artificial viscosity
Real vAB_rAB = dot(velMasA - velMasB, dist3);
// if (vAB_rAB < 0.0) {
Real nu = -paramsD.Ar_vis_alpha * paramsD.HSML * paramsD.Cs * paramsD.invrho0;
Real derivM1 = -Mass * (nu * vAB_rAB * (invd * invd)); //+ paramsD.epsMinMarkersDis * paramsD.HSML * paramsD.HSML
derivVx += derivM1 * gradW.x;
derivVy += derivM1 * gradW.y;
derivVz += derivM1 * gradW.z;
// }
// Artifical pressure to handle tensile instability issue.
// A complete artifical stress should be implemented in the future.
/*if (paramsD.Coh_coeff > 1e-5) {
Real Pa = -1.0 / 3.0 * (tauXxYyZz_A.x + tauXxYyZz_A.y + tauXxYyZz_A.z);
if (Pa < 0.0) {
Real Pb = -1.0 / 3.0 * (tauXxYyZz_B.x + tauXxYyZz_B.y + tauXxYyZz_B.z);
Real epsi = 0.5;
Real Ra = Pa * epsi * paramsD.invrho0 * paramsD.invrho0;
Real Rb = Pb * epsi * paramsD.invrho0 * paramsD.invrho0;
Real fAB = W_AB * W_ini_inv;
Real small_F = Mass * pow(fAB, 3.0) * (Ra + Rb);
derivVx += small_F * gradW.x;
derivVy += small_F * gradW.y;
derivVz += small_F * gradW.z;
}
}*/
// TOTO: Damping force
// if (1 == 0) {
// Real xi0 = paramsD.Vis_Dam;
// Real E0 = paramsD.E_young;
// Real h0 = paramsD.HSML;
// Real Cd = xi0 * sqrt(E0 / (rhoA * h0 * h0));
// derivVx -= Cd * velMasA.x;
// derivVy -= Cd * velMasA.y;
// derivVz -= Cd * velMasA.z;
// }
// Real derivRho = Mass * dot(vel_XSPH_A - vel_XSPH_B, gradW);
return mR4(derivVx, derivVy, derivVz, 0.0);
}
//--------------------------------------------------------------------------------------------------------------------------------
__device__ inline Real3 GradientOperator(float G_i[9],
Real3 dist3,
Real4 posRadA,
Real4 posRadB,
Real fA,
Real fB,
Real4 rhoPresMuA,
Real4 rhoPresMuB) {
Real3 gradW = GradWh(dist3, (posRadA.w + posRadB.w) * 0.5);
Real3 gradW_new;
gradW_new.x = G_i[0] * gradW.x + G_i[1] * gradW.y + G_i[2] * gradW.z;
gradW_new.y = G_i[3] * gradW.x + G_i[4] * gradW.y + G_i[5] * gradW.z;
gradW_new.z = G_i[6] * gradW.x + G_i[7] * gradW.y + G_i[8] * gradW.z;
Real Vol = paramsD.markerMass / rhoPresMuB.x;
Real fji = fB - fA;
Real Gra_ij_x = fji * gradW_new.x * Vol;
Real Gra_ij_y = fji * gradW_new.y * Vol;
Real Gra_ij_z = fji * gradW_new.z * Vol;
return mR3(Gra_ij_x, Gra_ij_y, Gra_ij_z);
}
//--------------------------------------------------------------------------------------------------------------------------------
__device__ inline Real4 LaplacianOperator(float G_i[9],
float L_i[9],
Real3 dist3,
Real4 posRadA,
Real4 posRadB,
Real fA,
Real fB,
Real4 rhoPresMuA,
Real4 rhoPresMuB) {
Real3 gradW = GradWh(dist3, (posRadA.w + posRadB.w) * 0.5);
Real d = length(dist3);
Real3 eij = dist3 / d;
Real Vol = paramsD.markerMass / rhoPresMuB.x;
Real fij = fA - fB;
Real ex_Gwx = eij.x * gradW.x;
Real ex_Gwy = eij.x * gradW.y;
Real ex_Gwz = eij.x * gradW.z;
Real ey_Gwx = eij.y * gradW.x;
Real ey_Gwy = eij.y * gradW.y;
Real ey_Gwz = eij.y * gradW.z;
Real ez_Gwx = eij.z * gradW.x;
Real ez_Gwy = eij.z * gradW.y;
Real ez_Gwz = eij.z * gradW.z;
Real Part1 = L_i[0] * ex_Gwx + L_i[1] * ex_Gwy + L_i[2] * ex_Gwz +
L_i[3] * ey_Gwx + L_i[4] * ey_Gwy + L_i[5] * ey_Gwz +
L_i[6] * ez_Gwx + L_i[7] * ez_Gwy + L_i[8] * ez_Gwz;
Real Part2 = fij / d * Vol;
Real3 Part3 = mR3(-eij.x, -eij.y, -eij.z) * Vol;
return mR4(2.0 * Part1 * Part2, Part3.x * (2.0 * Part1),
Part3.y * (2.0 * Part1), Part3.z * (2.0 * Part1));
}
//--------------------------------------------------------------------------------------------------------------------------------
__global__ void EOS(Real4* sortedRhoPreMu, volatile bool* isErrorD) {
uint index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= numObjectsD.numAllMarkers)
return;
sortedRhoPreMu[index].y = Eos(sortedRhoPreMu[index].x, sortedRhoPreMu[index].w);
}
//--------------------------------------------------------------------------------------------------------------------------------
__global__ void Navier_Stokes(uint* indexOfIndex,
Real4* sortedDerivVelRho,
Real3* sortedXSPHandShift,
Real4* sortedPosRad,
Real3* sortedVelMas,
Real4* sortedRhoPreMu,
Real3* velMas_ModifiedBCE,
Real4* rhoPreMu_ModifiedBCE,
uint* gridMarkerIndex,
uint* cellStart,
uint* cellEnd,
volatile bool* isErrorD) {
uint id = blockIdx.x * blockDim.x + threadIdx.x;
if (id >= numObjectsD.numAllMarkers - numObjectsD.numBoundaryMarkers)
return;
uint index = indexOfIndex[id];
// Do nothing for fixed wall BCE particles
if (sortedRhoPreMu[index].w > -0.5 && sortedRhoPreMu[index].w < 0.5) {
sortedDerivVelRho[index] = mR4(0.0);
return;
}
Real3 posRadA = mR3(sortedPosRad[index]);
Real3 velMasA = sortedVelMas[index];
Real4 rhoPresMuA = sortedRhoPreMu[index];
Real4 derivVelRho = mR4(0.0);
Real SuppRadii = RESOLUTION_LENGTH_MULT * paramsD.HSML;
Real SqRadii = SuppRadii * SuppRadii;
Real G_i[9] = {1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 1.0};
Real L_i[9] = {1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 1.0};
if (paramsD.USE_Consistent_G)
calc_G_Matrix(sortedPosRad, sortedVelMas, sortedRhoPreMu, G_i, cellStart,
cellEnd, indexOfIndex);
if (paramsD.USE_Consistent_L) {
Real A_i[27] = {0.0};
calc_A_Matrix(sortedPosRad, sortedVelMas, sortedRhoPreMu, A_i, G_i, cellStart,
cellEnd, indexOfIndex);
calc_L_Matrix(sortedPosRad, sortedVelMas, sortedRhoPreMu, A_i, L_i, G_i, cellStart,
cellEnd, indexOfIndex);
}
float Gi[9] = {1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 1.0};
float Li[9] = {1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 1.0};
Gi[0] = G_i[0];
Gi[1] = G_i[1];
Gi[2] = G_i[2];
Gi[3] = G_i[3];
Gi[4] = G_i[4];
Gi[5] = G_i[5];
Gi[6] = G_i[6];
Gi[7] = G_i[7];
Gi[8] = G_i[8];
Li[0] = L_i[0];
Li[1] = L_i[1];
Li[2] = L_i[2];
Li[3] = L_i[3];
Li[4] = L_i[4];
Li[5] = L_i[5];
Li[6] = L_i[6];
Li[7] = L_i[7];
Li[8] = L_i[8];
Real3 preGra = mR3(0.0);
Real3 velxGra = mR3(0.0);
Real3 velyGra = mR3(0.0);
Real3 velzGra = mR3(0.0);
Real4 velxLap = mR4(0.0);
Real4 velyLap = mR4(0.0);
Real4 velzLap = mR4(0.0);
Real vA = length(velMasA);
Real vAdT = vA * paramsD.dT;
// get address in grid
int3 gridPos = calcGridPos(posRadA);
Real3 inner_sum = mR3(0.0);
Real sum_w_i = W3h(0.0, sortedPosRad[index].w) * paramsD.volume0;
for (int x = -1; x <= 1; x++) {
for (int y = -1; y <= 1; y++) {
for (int z = -1; z <= 1; z++) {
int3 neighbourPos = gridPos + mI3(x, y, z);
uint gridHash = calcGridHash(neighbourPos);
uint startIndex = cellStart[gridHash];
uint endIndex = cellEnd[gridHash];
for (uint j = startIndex; j < endIndex; j++) {
if (j != index) {
Real3 posRadB = mR3(sortedPosRad[j]);
Real3 dist3 = Distance(posRadA, posRadB);
Real dd = dist3.x * dist3.x + dist3.y * dist3.y + dist3.z * dist3.z;
if (dd > SqRadii)
continue;
Real4 rhoPresMuB = sortedRhoPreMu[j];
// no rigid-rigid force
if (rhoPresMuA.w > -0.5 && rhoPresMuB.w > -0.5)
continue;
Real d = length(dist3);
// modifyPressure(rhoPresMuB, dist3Alpha);
// if (!(isfinite(rhoPresMuB.x) && isfinite(rhoPresMuB.y) && isfinite(rhoPresMuB.z))) {
// printf("Error! particle rhoPresMuB is NAN: thrown from modifyPressure !\n");
// }
Real3 velMasB = sortedVelMas[j];
if (rhoPresMuB.w > -0.5) {
int bceIndexB = gridMarkerIndex[j] - numObjectsD.numFluidMarkers;
if (!(bceIndexB >= 0 && bceIndexB <
numObjectsD.numAllMarkers - numObjectsD.numFluidMarkers)) {
printf("Error! bceIndex out of bound, collideCell !\n");
}
rhoPresMuB = rhoPreMu_ModifiedBCE[bceIndexB];
velMasB = velMas_ModifiedBCE[bceIndexB];
}
Real multViscosit = 1;
derivVelRho += DifVelocityRho(Gi, dist3, d, sortedPosRad[index], sortedPosRad[j],
velMasA, velMasB, rhoPresMuA, rhoPresMuB, multViscosit);
preGra += GradientOperator(Gi, dist3, sortedPosRad[index], sortedPosRad[j],
-rhoPresMuA.y, rhoPresMuB.y, rhoPresMuA, rhoPresMuB);
velxGra += GradientOperator(Gi, dist3, sortedPosRad[index], sortedPosRad[j],
velMasA.x, velMasB.x, rhoPresMuA, rhoPresMuB);
velyGra += GradientOperator(Gi, dist3, sortedPosRad[index], sortedPosRad[j],
velMasA.y, velMasB.y, rhoPresMuA, rhoPresMuB);
velzGra += GradientOperator(Gi, dist3, sortedPosRad[index], sortedPosRad[j],
velMasA.z, velMasB.z, rhoPresMuA, rhoPresMuB);
velxLap += LaplacianOperator(Gi, Li, dist3, sortedPosRad[index], sortedPosRad[j],
velMasA.x, velMasB.x, rhoPresMuA, rhoPresMuB);
velyLap += LaplacianOperator(Gi, Li, dist3, sortedPosRad[index], sortedPosRad[j],
velMasA.y, velMasB.y, rhoPresMuA, rhoPresMuB);
velzLap += LaplacianOperator(Gi, Li, dist3, sortedPosRad[index], sortedPosRad[j],
velMasA.z, velMasB.z, rhoPresMuA, rhoPresMuB);
if (d > paramsD.HSML * 1.0e-9)
sum_w_i = sum_w_i + W3h(d, sortedPosRad[index].w) * paramsD.volume0;
}
}
}
}
}
Real nu = paramsD.mu0 / paramsD.rho0;
Real dvxdt = -preGra.x / rhoPresMuA.x + (velxLap.x + velxGra.x * velxLap.y +
velxGra.y * velxLap.z + velxGra.z * velxLap.w) * nu;
Real dvydt = -preGra.y / rhoPresMuA.x + (velyLap.x + velyGra.x * velyLap.y +
velyGra.y * velyLap.z + velyGra.z * velyLap.w) * nu;
Real dvzdt = -preGra.z / rhoPresMuA.x + (velzLap.x + velzGra.x * velzLap.y +
velzGra.y * velzLap.z + velzGra.z * velzLap.w) * nu;
Real drhodt = -paramsD.rho0 * (velxGra.x + velyGra.y + velzGra.z);
Real Det_G = (Gi[0] * Gi[4] * Gi[8] - Gi[0] * Gi[5] * Gi[7] - Gi[1] * Gi[3] * Gi[8] +
Gi[1] * Gi[5] * Gi[6] + Gi[2] * Gi[3] * Gi[7] - Gi[2] * Gi[4] * Gi[6]);
Real Det_L = (Li[0] * Li[4] * Li[8] - Li[0] * Li[5] * Li[7] - Li[1] * Li[3] * Li[8] +
Li[1] * Li[5] * Li[6] + Li[2] * Li[3] * Li[7] - Li[2] * Li[4] * Li[6]);
if (rhoPresMuA.w > -1.5 && rhoPresMuA.w < -0.5) {
if (Det_G > 0.9 && Det_G < 1.1 && Det_L > 0.9 && Det_L < 1.1 && sum_w_i > 0.9) {
derivVelRho = mR4(dvxdt, dvydt, dvzdt, drhodt);
}
}
if (!(isfinite(derivVelRho.x) && isfinite(derivVelRho.y) && isfinite(derivVelRho.z))) {
printf("Error! particle derivVel is NAN: thrown from ChFsiForceExplicitSPH.cu, collideD !\n");
*isErrorD = true;
}
if (!(isfinite(derivVelRho.w))) {
printf("Error! particle derivRho is NAN: thrown from ChFsiForceExplicitSPH.cu, collideD !\n");
*isErrorD = true;
}
// add gravity and other body force to fluid markers
if (rhoPresMuA.w > -1.5 && rhoPresMuA.w < -0.5) {
Real3 totalFluidBodyForce3 = paramsD.bodyForce3 + paramsD.gravity;
derivVelRho += mR4(totalFluidBodyForce3);
}
sortedDerivVelRho[index] = derivVelRho;
Real det_r_max = 0.05 * vAdT;
Real det_r_A = length(inner_sum);
if (det_r_A < det_r_max) {
sortedXSPHandShift[index] = inner_sum;
} else {
sortedXSPHandShift[index] = inner_sum * det_r_max / (det_r_A + 1e-9);
}
}
//--------------------------------------------------------------------------------------------------------------------------------
__global__ void NS_SSR(uint* activityIdentifierD,
Real4* sortedDerivVelRho,
Real3* sortedDerivTauXxYyZz,
Real3* sortedDerivTauXyXzYz,
Real3* sortedXSPHandShift,
Real3* sortedKernelSupport,
Real4* sortedPosRad,
Real3* sortedVelMas,
Real4* sortedRhoPreMu,
Real3* velMas_ModifiedBCE,
Real4* rhoPreMu_ModifiedBCE,
Real3* tauXxYyZz_ModifiedBCE,
Real3* tauXyXzYz_ModifiedBCE,
Real3* sortedTauXxYyZz,
Real3* sortedTauXyXzYz,
uint* gridMarkerIndex,
uint* cellStart,
uint* cellEnd,
uint* mapOriginalToSorted,
uint* sortedFreeSurfaceIdD,
volatile bool* isErrorD) {
uint id = blockIdx.x * blockDim.x + threadIdx.x;
if (id >= numObjectsD.numAllMarkers)
return;
// no need to do anything if it is not an active particle
uint activity = activityIdentifierD[id];
if (activity == 0)
return;
// map original to sorted
uint index = mapOriginalToSorted[id];
if (sortedRhoPreMu[index].w > -0.5 && sortedRhoPreMu[index].w < 0.5)
return;
Real hA = sortedPosRad[index].w;
Real3 posRadA = mR3(sortedPosRad[index]);
Real3 velMasA = sortedVelMas[index];
Real4 rhoPresMuA = sortedRhoPreMu[index];
Real3 TauXxYyZzA = sortedTauXxYyZz[index];
Real3 TauXyXzYzA = sortedTauXyXzYz[index];
Real4 derivVelRho = mR4(0.0);
Real3 deltaV = mR3(0.0);
Real SuppRadii = RESOLUTION_LENGTH_MULT * paramsD.HSML;
Real SqRadii = SuppRadii * SuppRadii;
uint j_list[150];
uint j_num = 0;
// Get address in grid
int3 gridPos = calcGridPos(posRadA);
// Find the neighbor particle list
for (int x = -1; x <= 1; x++) {
for (int y = -1; y <= 1; y++) {
for (int z = -1; z <= 1; z++) {
int3 neighbourPos = gridPos + mI3(x, y, z);
uint gridHash = calcGridHash(neighbourPos);
uint startIndex = cellStart[gridHash];
uint endIndex = cellEnd[gridHash];
for (uint j = startIndex; j < endIndex; j++) {
if (j != index) {
Real3 posRadB = mR3(sortedPosRad[j]);
Real3 dist3 = Distance(posRadA, posRadB);
Real dd = dist3.x * dist3.x + dist3.y * dist3.y + dist3.z * dist3.z;
if (dd < SqRadii) {
j_list[j_num] = j;
j_num++;
}
}
}
}
}
}
Real tauxx = sortedTauXxYyZz[index].x;
Real tauyy = sortedTauXxYyZz[index].y;
Real tauzz = sortedTauXxYyZz[index].z;
Real tauxy = sortedTauXyXzYz[index].x;
Real tauxz = sortedTauXyXzYz[index].y;
Real tauyz = sortedTauXyXzYz[index].z;
Real dTauxx = 0.0;
Real dTauyy = 0.0;
Real dTauzz = 0.0;
Real dTauxy = 0.0;
Real dTauxz = 0.0;
Real dTauyz = 0.0;
// Calculate the correction matrix for gradient operator
Real G_i[9] = {1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 1.0};
if (paramsD.USE_Consistent_G) {
Real mGi[9] = {0.0};
for (uint n = 0; n < j_num; n++) {
uint j = j_list[n];
Real3 posRadB = mR3(sortedPosRad[j]);
Real3 rij = Distance(posRadA, posRadB);
Real3 grad_i_wij = GradWh(rij, hA);
Real3 grw_vj = grad_i_wij * paramsD.volume0;
mGi[0] -= rij.x * grw_vj.x;
mGi[1] -= rij.x * grw_vj.y;
mGi[2] -= rij.x * grw_vj.z;
mGi[3] -= rij.y * grw_vj.x;
mGi[4] -= rij.y * grw_vj.y;
mGi[5] -= rij.y * grw_vj.z;
mGi[6] -= rij.z * grw_vj.x;
mGi[7] -= rij.z * grw_vj.y;
mGi[8] -= rij.z * grw_vj.z;
}
Real Det = (mGi[0] * mGi[4] * mGi[8] - mGi[0] * mGi[5] * mGi[7] -
mGi[1] * mGi[3] * mGi[8] + mGi[1] * mGi[5] * mGi[6] +
mGi[2] * mGi[3] * mGi[7] - mGi[2] * mGi[4] * mGi[6]);
if (abs(Det) > 0.01) {
Real OneOverDet = 1.0 / Det;
G_i[0] = (mGi[4] * mGi[8] - mGi[5] * mGi[7]) * OneOverDet;
G_i[1] = -(mGi[1] * mGi[8] - mGi[2] * mGi[7]) * OneOverDet;
G_i[2] = (mGi[1] * mGi[5] - mGi[2] * mGi[4]) * OneOverDet;
G_i[3] = -(mGi[3] * mGi[8] - mGi[5] * mGi[6]) * OneOverDet;
G_i[4] = (mGi[0] * mGi[8] - mGi[2] * mGi[6]) * OneOverDet;
G_i[5] = -(mGi[0] * mGi[5] - mGi[2] * mGi[3]) * OneOverDet;
G_i[6] = (mGi[3] * mGi[7] - mGi[4] * mGi[6]) * OneOverDet;
G_i[7] = -(mGi[0] * mGi[7] - mGi[1] * mGi[6]) * OneOverDet;
G_i[8] = (mGi[0] * mGi[4] - mGi[1] * mGi[3]) * OneOverDet;
}
}
Real radii = paramsD.INITSPACE * 1.241; // 1.129;//1.241
Real invRadii = 1.0 / 1.241 * paramsD.INV_INIT; // 1.0 / radii
Real vA = length(velMasA);
Real vAdT = vA * paramsD.dT;
Real bs_vAdT = paramsD.beta_shifting * vAdT;
Real3 inner_sum = mR3(0.0);
Real sum_w_i = W3h(0.0, hA) * paramsD.volume0;
Real w_ini_inv = 1.0 / W3h(paramsD.INITSPACE, hA);
int N_ = 1;
int N_s = 0;
// Get the interaction from neighbor particles
for (uint n = 0; n < j_num; n++) {
uint j = j_list[n];
Real4 rhoPresMuB = sortedRhoPreMu[j];
if (rhoPresMuA.w > -0.5 && rhoPresMuB.w > -0.5)
continue; // No BCE-BCE interaction
Real3 posRadB = mR3(sortedPosRad[j]);
Real3 dist3 = Distance(posRadA, posRadB);
Real d = length(dist3);
Real invd = 1.0 / d;
Real3 velMasB = sortedVelMas[j];
Real3 TauXxYyZzB = sortedTauXxYyZz[j];
Real3 TauXyXzYzB = sortedTauXyXzYz[j];
if (rhoPresMuB.w > -0.5) {
int bceIndexB = gridMarkerIndex[j] - numObjectsD.numFluidMarkers;
rhoPresMuB = rhoPreMu_ModifiedBCE[bceIndexB];
velMasB = velMas_ModifiedBCE[bceIndexB];
TauXxYyZzB = tauXxYyZz_ModifiedBCE[bceIndexB];
TauXyXzYzB = tauXyXzYz_ModifiedBCE[bceIndexB];
// Extrapolated from velocity of fluid particle
if(rhoPresMuB.w > 0.5 && paramsD.bceType == BceVersion::ADAMI){
velMasB = sortedVelMas[j];
Real chi_A = sortedKernelSupport[index].y / sortedKernelSupport[index].x;
Real chi_B = sortedKernelSupport[j].y / sortedKernelSupport[j].x;
Real dA = SuppRadii * (2.0 * chi_A - 1.0);
if (dA < 0.0)
dA = 0.01 * SuppRadii;
Real dB = SuppRadii * (2.0 * chi_B - 1.0);
if (dB < 0.0)
dB = 0.01 * SuppRadii;
Real dAB = dB / dA;
if (dAB > 0.5)
dAB = 0.5;
Real3 velMasB_new = dAB * (velMasB - velMasA) + velMasB;
velMasB = velMasB_new;
}
if(rhoPresMuB.w < 0.5 && paramsD.bceTypeWall == BceVersion::ADAMI){
velMasB = sortedVelMas[j];
Real chi_A = sortedKernelSupport[index].y / sortedKernelSupport[index].x;
Real chi_B = sortedKernelSupport[j].y / sortedKernelSupport[j].x;
Real dA = SuppRadii * (2.0 * chi_A - 1.0);
if (dA < 0.0)
dA = 0.01 * SuppRadii;
Real dB = SuppRadii * (2.0 * chi_B - 1.0);
if (dB < 0.0)
dB = 0.01 * SuppRadii;
Real dAB = dB / dA;
if (dAB > 0.5)
dAB = 0.5;
Real3 velMasB_new = dAB * (velMasB - velMasA) + velMasB;
velMasB = velMasB_new;
}
}
// Correct the kernel function gradient
Real w_AB = W3h(d, hA);
Real3 gradW = GradWh(dist3, hA);
if (paramsD.USE_Consistent_G) {
Real3 gradW_new;
gradW_new.x = G_i[0] * gradW.x + G_i[1] * gradW.y + G_i[2] * gradW.z;
gradW_new.y = G_i[3] * gradW.x + G_i[4] * gradW.y + G_i[5] * gradW.z;
gradW_new.z = G_i[6] * gradW.x + G_i[7] * gradW.y + G_i[8] * gradW.z;
gradW = gradW_new;
}
// Calculate dv/dt
derivVelRho += DifVelocityRho_ElasticSPH(w_ini_inv, w_AB, gradW, dist3, d, invd,
sortedPosRad[index], sortedPosRad[j], velMasA, velMasB, rhoPresMuA,
rhoPresMuB, TauXxYyZzA, TauXyXzYzA, TauXxYyZzB, TauXyXzYzB);
// Calculate dsigma/dt
if (sortedRhoPreMu[index].w < -0.5) {
// start to calculate the stress rate
Real3 vAB = velMasA - velMasB;
Real3 vAB_h = 0.5 * vAB * paramsD.volume0;
// entries of strain rate tensor
Real exx = -2.0 * vAB_h.x * gradW.x;
Real eyy = -2.0 * vAB_h.y * gradW.y;
Real ezz = -2.0 * vAB_h.z * gradW.z;
Real exy = -vAB_h.x * gradW.y - vAB_h.y * gradW.x;
Real exz = -vAB_h.x * gradW.z - vAB_h.z * gradW.x;
Real eyz = -vAB_h.y * gradW.z - vAB_h.z * gradW.y;
// entries of rotation rate (spin) tensor
Real wxy = -vAB_h.x * gradW.y + vAB_h.y * gradW.x;
Real wxz = -vAB_h.x * gradW.z + vAB_h.z * gradW.x;
Real wyz = -vAB_h.y * gradW.z + vAB_h.z * gradW.y;
Real edia = 0.3333333333333 * (exx + eyy + ezz);
Real twoG = 2.0 * paramsD.G_shear;
Real K_edia = paramsD.K_bulk * 1.0 * edia;
dTauxx += twoG * (exx - edia) + 2.0 * (tauxy * wxy + tauxz * wxz) + K_edia;
dTauyy += twoG * (eyy - edia) - 2.0 * (tauxy * wxy - tauyz * wyz) + K_edia;
dTauzz += twoG * (ezz - edia) - 2.0 * (tauxz * wxz + tauyz * wyz) + K_edia;
dTauxy += twoG * exy - (tauxx * wxy - tauxz * wyz) + (wxy * tauyy + wxz * tauyz);
dTauxz += twoG * exz - (tauxx * wxz + tauxy * wyz) + (wxy * tauyz + wxz * tauzz);
dTauyz += twoG * eyz - (tauxy * wxz + tauyy * wyz) - (wxy * tauxz - wyz * tauzz);
}
// Do integration for the kernel function, calculate the XSPH term
if (d > paramsD.HSML * 1.0e-9) {
Real Wab = W3h(d, hA);
// Integration of the kernel function
sum_w_i += Wab * paramsD.volume0;
// XSPH
if (rhoPresMuB.w > -1.5 && rhoPresMuB.w < -0.5)
deltaV += paramsD.volume0 * (velMasB - velMasA) * Wab;
N_ = N_ + 1;
}
// Find particles that have contact with this particle
if (d < 1.25 * radii && rhoPresMuB.w < -0.5) {
Real Pen = (radii - d) * invRadii;
Real3 r_0 = bs_vAdT * invd * dist3;
Real3 r_s = r_0 * Pen;
if (d < 1.0 * radii) {
inner_sum += 3.0 * r_s;
} else if (d < 1.1 * radii) {
inner_sum += 1.0 * r_s;
} else {
inner_sum += 0.1 * 1.0 * (-r_0);
}
N_s = N_s + 1;
}
}
// Check particles who have not enough neighbor particles (only for granular now)
if (sum_w_i < paramsD.C_Wi) {
sortedFreeSurfaceIdD[index] = 1;
} else {
sortedFreeSurfaceIdD[index] = 0;
}
// Calculate the shifting vector
Real det_r_max = 0.05 * vAdT;
Real det_r_A = length(inner_sum);
if (det_r_A < det_r_max) {
sortedXSPHandShift[index] = inner_sum;
} else {
sortedXSPHandShift[index] = inner_sum * det_r_max / (det_r_A + 1e-9);
}
// Add the XSPH term into the shifting vector
sortedXSPHandShift[index] += paramsD.EPS_XSPH * deltaV * paramsD.dT;
// Get the shifting velocity
sortedXSPHandShift[index] = sortedXSPHandShift[index] * paramsD.INV_dT;
// Add gravity and other body force to fluid markers
if (rhoPresMuA.w > -1.5 && rhoPresMuA.w < -0.5) {
Real3 totalFluidBodyForce3 = paramsD.bodyForce3 + paramsD.gravity;
derivVelRho += mR4(totalFluidBodyForce3, 0.0);
}
sortedDerivVelRho[index] = derivVelRho;
sortedDerivTauXxYyZz[index] = mR3(dTauxx, dTauyy, dTauzz);
sortedDerivTauXyXzYz[index] = mR3(dTauxy, dTauxz, dTauyz);
}
//--------------------------------------------------------------------------------------------------------------------------------
__global__ void CalcVel_XSPH_D(uint* indexOfIndex,
Real3* vel_XSPH_Sorted_D,
Real4* sortedPosRad,
Real3* sortedVelMas,
Real4* sortedRhoPreMu,
Real3* sortedXSPHandShift,
uint* gridMarkerIndex,
uint* cellStart,
uint* cellEnd,
volatile bool* isErrorD) {
uint id = blockIdx.x * blockDim.x + threadIdx.x;
if (id >= numObjectsD.numAllMarkers - numObjectsD.numBoundaryMarkers)
return;
uint index = indexOfIndex[id];
Real4 rhoPreMuA = sortedRhoPreMu[index];
Real3 velMasA = sortedVelMas[index];
Real SuppRadii = RESOLUTION_LENGTH_MULT * paramsD.HSML;
Real SqRadii = SuppRadii * SuppRadii;
Real3 posRadA = mR3(sortedPosRad[index]);
Real3 deltaV = mR3(0);
// get address in grid
int3 gridPos = calcGridPos(posRadA);
Real3 inner_sum = mR3(0.0);
// Real mi_bar = 0.0, r0 = 0.0;
Real3 dV = mR3(0.0f);
// examine neighbouring cells
for (int z = -1; z <= 1; z++) {
for (int y = -1; y <= 1; y++) {
for (int x = -1; x <= 1; x++) {
int3 neighbourPos = gridPos + mI3(x, y, z);
uint gridHash = calcGridHash(neighbourPos);
uint startIndex = cellStart[gridHash];
uint endIndex = cellEnd[gridHash];
for (uint j = startIndex; j < endIndex; j++) {
if (j != index) { // check not colliding with self
Real3 posRadB = mR3(sortedPosRad[j]);
Real3 dist3 = Distance(posRadA, posRadB);
Real dd = dist3.x * dist3.x + dist3.y * dist3.y + dist3.z * dist3.z;
if (dd > SqRadii)
continue;
Real4 rhoPresMuB = sortedRhoPreMu[j];
if (rhoPresMuB.w > -0.5 || rhoPresMuB.w < -1.5)
continue;
Real3 velMasB = sortedVelMas[j];
Real rho_bar = 0.5 * (rhoPreMuA.x + rhoPresMuB.x);
Real d = length(dist3);
deltaV += paramsD.markerMass * (velMasB - velMasA) *
W3h(d, paramsD.HSML) / rho_bar;
}
}
}
}
}
vel_XSPH_Sorted_D[index] =
paramsD.EPS_XSPH * deltaV + sortedXSPHandShift[index] * paramsD.INV_dT;
if (!(isfinite(vel_XSPH_Sorted_D[index].x) &&
isfinite(vel_XSPH_Sorted_D[index].y) && isfinite(vel_XSPH_Sorted_D[index].z))) {
printf("Error! particle vXSPH is NAN: thrown from ChFsiForceExplicitSPH.cu, CalcVel_XSPH_D !\n");
*isErrorD = true;
}
}
//--------------------------------------------------------------------------------------------------------------------------------
__global__ void CopySortedToOriginal_D(Real4* sortedDerivVelRho,
Real3* sortedDerivTauXxYyZz,
Real3* sortedDerivTauXyXzYz,
Real4* originalDerivVelRho,
Real3* originalDerivTauXxYyZz,
Real3* originalDerivTauXyXzYz,
uint* gridMarkerIndex,
uint* activityIdentifierD,
uint* mapOriginalToSorted,
uint* originalFreeSurfaceId,
uint* sortedFreeSurfaceId) {
uint id = blockIdx.x * blockDim.x + threadIdx.x;
if (id >= numObjectsD.numAllMarkers)
return;
// Check the activity of this particle
uint activity = activityIdentifierD[id];
if (activity == 0)
return;
uint index = mapOriginalToSorted[id];
originalDerivVelRho[id] = sortedDerivVelRho[index];
if (paramsD.elastic_SPH) {
originalDerivTauXxYyZz[id] = sortedDerivTauXxYyZz[index];
originalDerivTauXyXzYz[id] = sortedDerivTauXyXzYz[index];
originalFreeSurfaceId[id] = sortedFreeSurfaceId[index];
}
return;
}
//--------------------------------------------------------------------------------------------------------------------------------
__global__ void CopySortedToOriginal_XSPH_D(Real3* sortedXSPH,
Real3* originalXSPH,
uint* gridMarkerIndex,
uint* activityIdentifierD,
uint* mapOriginalToSorted) {
uint id = blockIdx.x * blockDim.x + threadIdx.x;
if (id >= numObjectsD.numAllMarkers)
return;
// Check the activity of this particle
uint activity = activityIdentifierD[id];
if (activity == 0)
return;
uint index = mapOriginalToSorted[id];
originalXSPH[id] = sortedXSPH[index];
}
//--------------------------------------------------------------------------------------------------------------------------------
ChFsiForceExplicitSPH::ChFsiForceExplicitSPH(std::shared_ptr<ChBce> otherBceWorker,
std::shared_ptr<SphMarkerDataD> otherSortedSphMarkersD,
std::shared_ptr<ProximityDataD> otherMarkersProximityD,
std::shared_ptr<FsiGeneralData> otherFsiGeneralData,
std::shared_ptr<SimParams> otherParamsH,
std::shared_ptr<ChCounters> otherNumObjects,
bool verb)
: ChFsiForce(otherBceWorker,
otherSortedSphMarkersD,
otherMarkersProximityD,
otherFsiGeneralData,
otherParamsH,
otherNumObjects,
verb) {
CopyParams_NumberOfObjects(paramsH, numObjectsH);
density_initialization = 0;
}
ChFsiForceExplicitSPH::~ChFsiForceExplicitSPH() {}
//--------------------------------------------------------------------------------------------------------------------------------
void ChFsiForceExplicitSPH::Initialize() {
ChFsiForce::Initialize();
cudaMemcpyToSymbolAsync(paramsD, paramsH.get(), sizeof(SimParams));
cudaMemcpyToSymbolAsync(numObjectsD, numObjectsH.get(), sizeof(ChCounters));
cudaMemcpyFromSymbol(paramsH.get(), paramsD, sizeof(SimParams));
cudaDeviceSynchronize();
}
//--------------------------------------------------------------------------------------------------------------------------------
void ChFsiForceExplicitSPH::ForceSPH(std::shared_ptr<SphMarkerDataD> otherSphMarkersD,
std::shared_ptr<FsiBodiesDataD> otherFsiBodiesD,
std::shared_ptr<FsiMeshDataD> otherFsiMeshD) {
sphMarkersD = otherSphMarkersD;
fsiCollisionSystem->ArrangeData(sphMarkersD);
bceWorker->ModifyBceVelocityPressureStress(
sphMarkersD, otherFsiBodiesD, otherFsiMeshD);
CollideWrapper();
CalculateXSPH_velocity();
// AddGravityToFluid();
}
//--------------------------------------------------------------------------------------------------------------------------------
void ChFsiForceExplicitSPH::CollideWrapper() {
bool *isErrorH, *isErrorD;
isErrorH = (bool*)malloc(sizeof(bool));
cudaMalloc((void**)&isErrorD, sizeof(bool));
*isErrorH = false;
cudaMemcpy(isErrorD, isErrorH, sizeof(bool), cudaMemcpyHostToDevice);
//------------------------------------------------------------------------
// thread per particle
uint numBlocks, numThreads;
computeGridSize((int)numObjectsH->numAllMarkers, 256, numBlocks, numThreads);
uint numBlocks1, numThreads1;
computeGridSize((int)numObjectsH->numAllMarkers -
(int)numObjectsH->numBoundaryMarkers, 256, numBlocks1, numThreads1);
// Execute the kernel
thrust::device_vector<Real4> sortedDerivVelRho(numObjectsH->numAllMarkers);
thrust::device_vector<Real3> sortedDerivTauXxYyZz(numObjectsH->numAllMarkers);
thrust::device_vector<Real3> sortedDerivTauXyXzYz(numObjectsH->numAllMarkers);
thrust::device_vector<Real3> sortedKernelSupport(numObjectsH->numAllMarkers);
thrust::device_vector<uint> sortedFreeSurfaceId(numObjectsH->numAllMarkers);
sortedXSPHandShift.resize(numObjectsH->numAllMarkers);
// Calculate the kernel support of each particle
if (paramsH->bceTypeWall == BceVersion::ADAMI || paramsH->bceType == BceVersion::ADAMI){
calcKernelSupport<<<numBlocks, numThreads>>>(
mR4CAST(sortedSphMarkersD->posRadD), mR4CAST(sortedSphMarkersD->rhoPresMuD),
mR3CAST(sortedKernelSupport), U1CAST(markersProximityD->cellStartD),
U1CAST(markersProximityD->cellEndD), isErrorD);
ChUtilsDevice::Sync_CheckError(isErrorH, isErrorD, "calcKernelSupport");
}
// Re-Initialize the density after several time steps
if (density_initialization >= paramsH->densityReinit) {
thrust::device_vector<Real4> rhoPresMuD_old = sortedSphMarkersD->rhoPresMuD;
printf("Re-initializing density after %d steps.\n", paramsH->densityReinit);
calcRho_kernel<<<numBlocks, numThreads>>>(
mR4CAST(sortedSphMarkersD->posRadD), mR4CAST(sortedSphMarkersD->rhoPresMuD),
mR4CAST(rhoPresMuD_old), U1CAST(markersProximityD->cellStartD),
U1CAST(markersProximityD->cellEndD), density_initialization, isErrorD);
ChUtilsDevice::Sync_CheckError(isErrorH, isErrorD, "calcRho_kernel");
density_initialization = 0;
}
density_initialization++;
// Execute the kernel
if (paramsH->elastic_SPH) { // For granular material
*isErrorH = false;
cudaMemcpy(isErrorD, isErrorH, sizeof(bool), cudaMemcpyHostToDevice);
// execute the kernel Navier_Stokes and Shear_Stress_Rate in one kernel
NS_SSR<<<numBlocks, numThreads>>>(
U1CAST(fsiGeneralData->activityIdentifierD), mR4CAST(sortedDerivVelRho),
mR3CAST(sortedDerivTauXxYyZz), mR3CAST(sortedDerivTauXyXzYz), mR3CAST(sortedXSPHandShift),
mR3CAST(sortedKernelSupport), mR4CAST(sortedSphMarkersD->posRadD),
mR3CAST(sortedSphMarkersD->velMasD), mR4CAST(sortedSphMarkersD->rhoPresMuD),
mR3CAST(bceWorker->velMas_ModifiedBCE), mR4CAST(bceWorker->rhoPreMu_ModifiedBCE),
mR3CAST(bceWorker->tauXxYyZz_ModifiedBCE), mR3CAST(bceWorker->tauXyXzYz_ModifiedBCE),
mR3CAST(sortedSphMarkersD->tauXxYyZzD), mR3CAST(sortedSphMarkersD->tauXyXzYzD),
U1CAST(markersProximityD->gridMarkerIndexD), U1CAST(markersProximityD->cellStartD),
U1CAST(markersProximityD->cellEndD), U1CAST(markersProximityD->mapOriginalToSorted),
U1CAST(sortedFreeSurfaceId), isErrorD);
ChUtilsDevice::Sync_CheckError(isErrorH, isErrorD, "Navier_Stokes and Shear_Stress_Rate");
} else { // For fluid
*isErrorH = false;
cudaMemcpy(isErrorD, isErrorH, sizeof(bool), cudaMemcpyHostToDevice);
// Find the index which is related to the wall boundary particle
thrust::device_vector<uint> indexOfIndex(numObjectsH->numAllMarkers);
thrust::device_vector<uint> identityOfIndex(numObjectsH->numAllMarkers);
calIndexOfIndex<<<numBlocks, numThreads>>>(
U1CAST(indexOfIndex), U1CAST(identityOfIndex), U1CAST(markersProximityD->gridMarkerIndexD));
thrust::remove_if(indexOfIndex.begin(), indexOfIndex.end(),
identityOfIndex.begin(), thrust::identity<int>());
// execute the kernel
Navier_Stokes<<<numBlocks1, numThreads1>>>(
U1CAST(indexOfIndex), mR4CAST(sortedDerivVelRho), mR3CAST(sortedXSPHandShift),
mR4CAST(sortedSphMarkersD->posRadD), mR3CAST(sortedSphMarkersD->velMasD),
mR4CAST(sortedSphMarkersD->rhoPresMuD), mR3CAST(bceWorker->velMas_ModifiedBCE),
mR4CAST(bceWorker->rhoPreMu_ModifiedBCE), U1CAST(markersProximityD->gridMarkerIndexD),
U1CAST(markersProximityD->cellStartD), U1CAST(markersProximityD->cellEndD), isErrorD);
ChUtilsDevice::Sync_CheckError(isErrorH, isErrorD, "Navier_Stokes");
}
// Launch a kernel to copy data from sorted arrays to original arrays.
// This is faster than using thrust::sort_by_key()
CopySortedToOriginal_D<<<numBlocks, numThreads>>>(
mR4CAST(sortedDerivVelRho), mR3CAST(sortedDerivTauXxYyZz), mR3CAST(sortedDerivTauXyXzYz),
mR4CAST(fsiGeneralData->derivVelRhoD), mR3CAST(fsiGeneralData->derivTauXxYyZzD),
mR3CAST(fsiGeneralData->derivTauXyXzYzD), U1CAST(markersProximityD->gridMarkerIndexD),
U1CAST(fsiGeneralData->activityIdentifierD), U1CAST(markersProximityD->mapOriginalToSorted),
U1CAST(fsiGeneralData->freeSurfaceIdD), U1CAST(sortedFreeSurfaceId));
sortedDerivVelRho.clear();
sortedDerivTauXxYyZz.clear();
sortedDerivTauXyXzYz.clear();
sortedKernelSupport.clear();
sortedFreeSurfaceId.clear();
cudaFree(isErrorD);
free(isErrorH);
}
//--------------------------------------------------------------------------------------------------------------------------------
void ChFsiForceExplicitSPH::CalculateXSPH_velocity() {
// Calculate vel_XSPH
if (vel_XSPH_Sorted_D.size() != numObjectsH->numAllMarkers) {
printf("vel_XSPH_Sorted_D.size() %zd numObjectsH->numAllMarkers %zd \n",
vel_XSPH_Sorted_D.size(), numObjectsH->numAllMarkers);
throw std::runtime_error(
"Error! size error vel_XSPH_Sorted_D Thrown from "
"CalculateXSPH_velocity!\n");
}
bool *isErrorH, *isErrorD;
isErrorH = (bool*)malloc(sizeof(bool));
cudaMalloc((void**)&isErrorD, sizeof(bool));
*isErrorH = false;
cudaMemcpy(isErrorD, isErrorH, sizeof(bool), cudaMemcpyHostToDevice);
// thread per particle
uint numBlocks, numThreads;
computeGridSize((int)numObjectsH->numAllMarkers, 256, numBlocks, numThreads);
//------------------------------------------------------------------------
if (paramsH->elastic_SPH) {
// The XSPH vector already included in the shifting vector
CopySortedToOriginal_XSPH_D<<<numBlocks, numThreads>>>(
mR3CAST(sortedXSPHandShift), mR3CAST(fsiGeneralData->vel_XSPH_D),
U1CAST(markersProximityD->gridMarkerIndexD),
U1CAST(fsiGeneralData->activityIdentifierD),
U1CAST(markersProximityD->mapOriginalToSorted));
} else {
uint numBlocks1, numThreads1;
computeGridSize((int)numObjectsH->numAllMarkers -
(int)numObjectsH->numBoundaryMarkers, 256, numBlocks1, numThreads1);
thrust::fill(vel_XSPH_Sorted_D.begin(), vel_XSPH_Sorted_D.end(), mR3(0.0));
// Find the index which is related to the wall boundary particle
thrust::device_vector<uint> indexOfIndex(numObjectsH->numAllMarkers);
thrust::device_vector<uint> identityOfIndex(numObjectsH->numAllMarkers);
calIndexOfIndex<<<numBlocks, numThreads>>>(
U1CAST(indexOfIndex), U1CAST(identityOfIndex),
U1CAST(markersProximityD->gridMarkerIndexD));
thrust::remove_if(indexOfIndex.begin(), indexOfIndex.end(),
identityOfIndex.begin(), thrust::identity<int>());
// Execute the kernel
CalcVel_XSPH_D<<<numBlocks1, numThreads1>>>(
U1CAST(indexOfIndex), mR3CAST(vel_XSPH_Sorted_D),
mR4CAST(sortedSphMarkersD->posRadD), mR3CAST(sortedSphMarkersD->velMasD),
mR4CAST(sortedSphMarkersD->rhoPresMuD), mR3CAST(sortedXSPHandShift),
U1CAST(markersProximityD->gridMarkerIndexD), U1CAST(markersProximityD->cellStartD),
U1CAST(markersProximityD->cellEndD), isErrorD);
ChUtilsDevice::Sync_CheckError(isErrorH, isErrorD, "CalcVel_XSPH_D");
CopySortedToOriginal_XSPH_D<<<numBlocks, numThreads>>>(
mR3CAST(vel_XSPH_Sorted_D), mR3CAST(fsiGeneralData->vel_XSPH_D),
U1CAST(markersProximityD->gridMarkerIndexD),
U1CAST(fsiGeneralData->activityIdentifierD),
U1CAST(markersProximityD->mapOriginalToSorted));
}
if (density_initialization % paramsH->densityReinit == 0)
CopySortedToOriginal_NonInvasive_R4(sphMarkersD->rhoPresMuD,
sortedSphMarkersD->rhoPresMuD, markersProximityD->gridMarkerIndexD);
cudaFree(isErrorD);
free(isErrorH);
}
//--------------------------------------------------------------------------------------------------------------------------------
void ChFsiForceExplicitSPH::AddGravityToFluid() {
// add gravity to fluid markers
/* Add outside forces. Don't add gravity to rigids, BCE, and boundaries, it is
* added in ChSystem */
Real3 totalFluidBodyForce3 = paramsH->bodyForce3 + paramsH->gravity;
thrust::device_vector<Real4> bodyForceD(numObjectsH->numAllMarkers);
thrust::fill(bodyForceD.begin(), bodyForceD.end(), mR4(totalFluidBodyForce3));
thrust::transform(
fsiGeneralData->derivVelRhoD.begin() + fsiGeneralData->referenceArray[0].x,
fsiGeneralData->derivVelRhoD.begin() + fsiGeneralData->referenceArray[0].y, bodyForceD.begin(),
fsiGeneralData->derivVelRhoD.begin() + fsiGeneralData->referenceArray[0].x, thrust::plus<Real4>());
bodyForceD.clear();
}
} // namespace fsi
} // namespace chrono
//================================================================================================================================
|
ca9654addd9d6395e64184d2b9802dc626cd4329.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <hip/hip_runtime_api.h>
//#include "cuPrintf.hip"
#include <iostream>
#include <cmath>
#include <cstring>
#include <cstdio>
#include <stdint.h>
#include "cuchanmgr.h"
#include "errorhandler.h"
#include "auxil.h"
__device__ __constant__ int CHM_LCA_d;
__device__ __constant__ double CHM_T_d;
/*
*
* DEVICE FUNCTIONS
*
*/
__device__ double
CHM_Correct_Week_Crossover(double time) {
if (time > 302400.0) { return time-604800.0; }
else if (time < -302400.00) { return time+604800.0; }
else { return time; }
}
// Light-weight, parallelized lat-lon calculator that returns values in radians
__device__ void
CHM_Dev_ECEF2LL_Rad(const double *posECEF, double *posLL) {
// Compute lat
double p = norm(2, posECEF);
double theta = atan2(posECEF[2]*CONST_WGS84_A, p*CONST_WGS84_B);
posLL[0] = atan2((posECEF[2] + pow(CONST_WGS84_EP,2) * CONST_WGS84_B * pow(sin(theta),3)),
(p - pow(CONST_WGS84_E,2) * CONST_WGS84_A * pow(cos(theta), 3)));
// Compute lon
posLL[1] = atan2(posECEF[1], posECEF[0]);
return;
}
// Given latitude and longitude, return the elements of the ENU->ECEF rotation matrix
__device__ void
CHM_Dev_R_ENU2ECEF(const double *posLL, double *posENU) {
double sinLat = sin(posLL[0]);
double sinLon = sin(posLL[1]);
double cosLat = cos(posLL[0]);
double cosLon = cos(posLL[1]);
posENU[0] = -sinLon;
posENU[1] = -sinLat*cosLon;
posENU[2] = cosLat*cosLon;
posENU[3] = cosLon;
posENU[4] = -sinLat*sinLon;
posENU[5] = cosLat*sinLon;
posENU[6] = 0.0;
posENU[7] = cosLat;
posENU[8] = sinLat;
return;
}
// Function to be called within Update_Sat_Pos to prevent function saturation
// Note: this is hardcoded to compute GPS ephemerides only!
// See ephemeris.c in RTKLIB for potentially useful code for multi-constellation
//
// NOTE: IF THERE ARE ERRORS, "FMOD" is defined differently than np.mod! Check those lines!!!
//
__device__ int
CHM_Get_Sat_Pos(dsp::utils::state_t<double> *satState, dsp::utils::eph_t *satEph, double txTime) {
// Corrected mean motion
double n = sqrt(dsp::utils::MU_GPS/(CHM_CUBE(satEph->A))) + satEph->deln;
// Compute satellite clock corrections
double tc = CHM_Correct_Week_Crossover(txTime - satEph->tocs); // Without corrections
double clkb = satEph->f2*tc*tc + satEph->f1*tc + satEph->f0 - satEph->tgd[0]; // Without relativistic correction
double tk = CHM_Correct_Week_Crossover(txTime - clkb - satEph->toes); // Without relativistic correction
// Mean anomaly
double E, M, f, dfdE, dE = 1;
E = M = fmod((satEph->M0 + n*tk), CONST_2PI);
// Eccentric anomaly
for (int eccIdx = 0; eccIdx < CHM_MAX_ITER_KEPLER && abs(dE) > CHM_RTOL_KEPLER; eccIdx++) {
f = M - E + satEph->e * sin(E);
dfdE = -1.0 + satEph->e * cos(E);
dE = -f / dfdE;
E = fmod(E + dE, CONST_2PI);
//if (abs(dE) < SATPOS_RTOL_KEPLER) { break; } // Break here if convergence is achieved!
}
if (abs(dE) > CHM_RTOL_KEPLER) { return -1; }
// Add in relativistic corrections and compute clock drift
double dtr = CONST_F*(satEph->e)*(satEph->sqrt_A)*sin(E);
tc = txTime - (clkb + dtr) - satEph->tocs;
clkb = satEph->f2*tc*tc + satEph->f1*tc + satEph->f0 + dtr - satEph->tgd[0];
double clkd = satEph->f1 + 2.0*satEph->f2*tc;
// Recompute tk with relativisitic correction
tk = CHM_Correct_Week_Crossover(txTime - clkb - satEph->toes);
// Recompute mean anomaly with relativisitic correction
dE = 1;
E = M = fmod((satEph->M0 + n*tk), (CONST_2PI));
// Eccentric anomaly
for (int eccIdx = 0; eccIdx < CHM_MAX_ITER_KEPLER && abs(dE) > CHM_RTOL_KEPLER; eccIdx++) {
f = M - E + satEph->e * sin(E);
dfdE = -1.0 + satEph->e * cos(E);
dE = -f / dfdE;
E = fmod(E + dE, CONST_2PI);
//if (abs(dE) < SATPOS_RTOL_KEPLER) { break; } // Break here if convergence is achieved!
}
if (abs(dE) > CHM_RTOL_KEPLER) { return -1; }
// Compute helpers
double sinE = sin(E);
double cosE = cos(E);
// True anomaly
double v = atan2(sqrt(1.0-satEph->e_sqr) * sinE / (1.0-satEph->e*cosE), (cosE-satEph->e)/(1.0-satEph->e*cosE));
// Argument of latitude
double u = fmod(v + satEph->omg, CONST_2PI); // Don't need mod here? (Computing for guarantee)
// Second harmonic perturbations
double cos2u = cos(2.0*u);
double sin2u = sin(2.0*u);
// Argument of latitude correction -> corrected argument of latitude
u += satEph->cuc * cos2u + satEph->cus * sin2u;
// Radius correction -> corrected radius
double r = satEph->A * (1.0 - satEph->e * cosE) + satEph->crc * cos2u + satEph->crs * sin2u;
// Orbital inclination correction -> corrected inclination
double i = satEph->i0 + satEph->idot * tk + satEph->cic * cos2u + satEph->cis * sin2u;
// Corrected longitude of node
double omegak = fmod(satEph->OMG0 + (satEph->OMGd-CONST_OEDot)*tk - CONST_OEDot*satEph->toes, CONST_2PI);
// Positions in orbital plane
double x_op = r * cos(u);
double y_op = r * sin(u);
double cos_omegak = cos(omegak);
double sin_omegak = sin(omegak);
double cosi = cos(i);
double sini = sin(i);
// Assign position states
double state_x = x_op * cos_omegak - y_op * sin_omegak * cosi;
satState->x = state_x;
double state_y = x_op * sin_omegak + y_op * cos_omegak * cosi;
satState->y = state_y;
double state_z = y_op * sini;
satState->z = state_z;
satState->delta_t = clkb;
// Velocity calculation
// Second harmonic perturbations
cos2u = cos(2.0*u);
sin2u = sin(2.0*u);
double edot = n / (1.0 - satEph->e*cosE);
double vdot = sinE*edot*(1.0 + satEph->e*cos(v)) / (sin(v)*(1.0-satEph->e*cosE));
double udot = vdot + 2.0*(satEph->cus*cos2u - satEph->cuc*sin2u)*vdot;
double rdot = satEph->A*satEph->e*sinE*edot + 2.0*(satEph->crs*cos2u - satEph->crc*sin2u)*vdot;
double idotdot = satEph->idot + (satEph->cis*cos2u - satEph->cic*sin2u)*2*vdot;
double vx_op = rdot*cos(u) - y_op*udot;
double vy_op = rdot*sin(u) + x_op*udot;
double omegadot = satEph->OMGd - CONST_OEDot;
double tmpa = vx_op - y_op*cosi*omegadot;
double tmpb = x_op*omegadot + vy_op*cosi - y_op*sini*idotdot;
// Assign velocity states
double state_xdot = tmpa * cos_omegak - tmpb * sin_omegak;
satState->x_dot = state_xdot;
double state_ydot = tmpa * sin_omegak + tmpb * cos_omegak;
satState->y_dot = state_ydot;
double state_zdot = vy_op*sini + y_op*cosi*idotdot;
satState->z_dot = state_zdot;
satState->delta_t_dot = clkd;
// TODO: implement ionospheric corrections?
return 0;
}
/*
*
* DEVICE KERNELS
*
*/
/** \brief Compute the satellite state given the current estimate of the received signal
*
* The initial update -- propagates forward from scalar handoff
*
* \param navData Ptr to all ephemerides we have
* \param navDataSize The number of ephemerides in navData
* \param ephToUse Output: the ephemerides chosen (closest in time to the current state estimate)
* \param satStates Output: The states of the tracked satellites according to the channel parameters
* \param codePhase Ptr to the current estimate of the code phase of each channel
* \param cpElapsed The number of code periods for this sample set since the start of tracking
* \param cpRef The number of code periods elapsed since the start of tracking for the reference TOW
* \param cpRefTOW The TOW at the reference code period
* \param PRNs The satellite channels being tracked
* \param numChan The number of channels being tracked
* \param txTime Output: When each satellite sent the transmission that is now the sample set (computed by the channel parameters)
*
*/
__global__ void
CHM_ComputeSatStates(dsp::utils::ephSet_t *navData, int navDataSize, int *ephToUse,
dsp::utils::state_t<double> *satStates,
double *codePhase,
int *cpElapsed, int *cpRef, int *cpRefTOW,
uint8_t *PRNs, int numChan,
double *txTime) {
unsigned int i = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int stride = blockDim.x * gridDim.x;
int test = 0;
// only compute if within the range to estimate
while (i < numChan) {
// COMPUTE TXTIME
txTime[i] = cpRefTOW[i] +
((cpElapsed[i] - cpRef[i]) * CONST_T_CA) +
(codePhase[i] / CONST_F_CA);
// SATELLITE COMPUTATION
// Save the PRN number for this index
int currPrn = PRNs[i]-1;
// Reset eph to the unchosen index
ephToUse[i] = -1;
// Check all eph's and find the one closest to the passed-in time if we have valid ephems for it
// TODO: cleaner eph search that doesn't require the brute force search?
// or just stick with this to be thorough?
for(int currIdx = 0; currIdx < navDataSize; currIdx++) {
// Check if the ephems are valid for this PRN
if (navData[currIdx].ephValid[currPrn]) {
// If no eph chosen and this eph is valid, select this eph
if (ephToUse[i] == -1) {
ephToUse[i] = currIdx;
}
// Update the best eph if it's closer in time to the transmit time
else if (fabs(navData[currIdx].ephToes - txTime[i]) <
fabs(navData[ephToUse[i]].ephToes - txTime[i])) {
ephToUse[i] = currIdx;
}
}
}
// Make sure that an ephemeris actually got selected
if (ephToUse[i] == -1) {
// return error
// FIXME: Remove channel
test = 1;
}
CHM_Get_Sat_Pos(&(satStates[i]), &(navData[ephToUse[i]].eph[currPrn]), txTime[i]);
i += stride;
}
}
/** \brief The standard update: Performs MeasUpdate -> TimeUpdate -> txTime -> satState
*
* Propagates from EKF measurement; to be run in update
*
* \param navData Ptr to all ephemerides we have
* \param navDataSize The number of ephemerides in navData
* \param ephToUse The ephemerides chosen (closest in time to the current state estimate)
* \param centerPt The current best estimate on the receiver's state
* \param satStates Output: the states of the tracked satellites according to the channel parameters
* \param codePhaseStart Ptr to the estimate of the code phase of each channel at the beginning of the sample set
* \param codePhaseEnd Ptr to the estimate of the code phase of each channel at the end of the sample set
* \param codeFreq Ptr to the estimate of code frequency (under Doppler effects)
* \param carrPhaseStart Ptr to the estimate of the carrier phase of each channel at the beginning of the sample set
* \param carrPhaseEnd Ptr to the estimate of the carrier phase of each channel at the end of the sample set
* \param carrFreq Ptr to the estimate of the carrier frequency (under Doppler effects)
* \param dopplerSign The sign convention for Doppler frequency
* \param cpElapsedStart The number of code periods for the start of this sample set since the start of tracking
* \param cpElapsedEnd The number of code periods for the end of this sample set since the start of tracking
* \param cpRef The number of code periods elapsed since the start of tracking for the reference TOW
* \param cpRefTOW The TOW at the reference code period
* \param PRNs The satellite channels being tracked
* \param numChan The number of channels being tracked
* \param rxTime The current time of the receiver's internal clock (no delta-t applied)
* \param txTime Output: When each satellite sent the transmission that is now the sample set (computed by the channel parameters)
* \param T The length in seconds of one sample set
*
*/
__global__ void
CHM_PropagateChannels(dsp::utils::ephSet_t *navData, int navDataSize, int *ephToUse,
const double *centerPt, dsp::utils::state_t<double> *satStates,
double *codePhaseStart, double *codePhaseEnd, double *codeFreq,
double *carrPhaseStart, double *carrPhaseEnd, double *carrFreq, int *dopplerSign,
int *cpElapsedStart, int *cpElapsedEnd, int *cpRef, int *cpRefTOW,
uint8_t *PRNs, int numChan,
double rxTime, double *txTime, double T)
{
unsigned int i = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int stride = blockDim.x * gridDim.x;
int test = 0;
// Back-calculation values for finding scores
int currPrn;
double currPtVelECI[4];
double satPosTransmitTime;
dsp::utils::state_t<double> currPtSatState;
double bc_los[3];
double bc_range;
double bc_losrangerate;
double bc_fi;
double bc_rc0;
double bc_fc;
double bc_pseudorange;
double bc_txTime;
double bc_codeFracDiff;
double bc_rc;
double cos_tau_OEDot;
double sin_tau_OEDot;
// Since kernels are launched in multiples of 32 threads for efficiency,
// only compute if within the range to estimate
while (i < numChan) {
// MEASUREMENT UPDATE
// For this satellite position, compute the transmit time (TOF) to the candidate point
satPosTransmitTime = rxTime - (txTime[i] + (centerPt[3]/(double)CONST_C)) + satStates[i].delta_t;
// Convert the satellite and coordinate positions to ECI to add
cos_tau_OEDot = cos(-CONST_OEDot*satPosTransmitTime);
sin_tau_OEDot = sin(-CONST_OEDot*satPosTransmitTime);
// Rotate the satellite position over the earth for this point's pvt state
currPtSatState.x = cos_tau_OEDot * satStates[i].x
- sin_tau_OEDot * satStates[i].y;
currPtSatState.y = sin_tau_OEDot * satStates[i].x
+ cos_tau_OEDot * satStates[i].y;
currPtSatState.z = satStates[i].z;
currPtSatState.delta_t = satStates[i].delta_t;
currPtSatState.x_dot = cos_tau_OEDot * satStates[i].x_dot
- sin_tau_OEDot * satStates[i].y_dot
- CONST_OEDot * sin_tau_OEDot * satStates[i].x
- CONST_OEDot * cos_tau_OEDot * satStates[i].y;
currPtSatState.y_dot = sin_tau_OEDot * satStates[i].x_dot
+ cos_tau_OEDot * satStates[i].y_dot
+ CONST_OEDot * cos_tau_OEDot * satStates[i].x
- CONST_OEDot * sin_tau_OEDot * satStates[i].y;
currPtSatState.z_dot = satStates[i].z_dot;
currPtSatState.delta_t_dot = satStates[i].delta_t_dot;
// Also need to rotate the point's velocity into the inertial frame
currPtVelECI[0] = centerPt[4] - CONST_OEDot*centerPt[1];
currPtVelECI[1] = centerPt[5] + CONST_OEDot*centerPt[0];
currPtVelECI[2] = centerPt[6];
currPtVelECI[3] = centerPt[7];
// Back-calculate the channel parameters
// Find carrier frequency
bc_los[0] = currPtSatState.x - centerPt[0];
bc_los[1] = currPtSatState.y - centerPt[1];
bc_los[2] = currPtSatState.z - centerPt[2];
bc_range = norm(3, bc_los); // Might as well find range this way since the unit vector is needed.
bc_losrangerate = ((bc_los[0]/bc_range)*(currPtVelECI[0]-currPtSatState.x_dot)) +
((bc_los[1]/bc_range)*(currPtVelECI[1]-currPtSatState.y_dot)) +
((bc_los[2]/bc_range)*(currPtVelECI[2]-currPtSatState.z_dot));
bc_fi = CONST_F_L1 * ((bc_losrangerate - currPtVelECI[3])/CONST_C + currPtSatState.delta_t_dot) / (*dopplerSign);
// Find the code frequency elapsed since last timestep
bc_pseudorange = bc_range - CONST_C * currPtSatState.delta_t + centerPt[3];
bc_txTime = rxTime - bc_pseudorange/CONST_C;
bc_codeFracDiff = bc_txTime - cpRefTOW[i] - ((cpElapsedEnd[i] - cpRef[i]) * CONST_T_CA);
bc_rc = bc_codeFracDiff * CONST_F_CA;
// The back-calculated code frequency is the updated frequency for the channel
bc_fc = CONST_F_CA + (*dopplerSign * CONST_F_CA/CONST_F_L1) * bc_fi + (bc_rc - codePhaseEnd[i]) / CHM_T_d;
// Something's very wrong if the codeFreq is shifted by more than 10kHz from F_CA
if (abs(bc_fc-CONST_F_CA) > 10000) {
// TODO: Throw an error if we get here
test = 3;
}
// Update channels using the calculated results
carrFreq[i] = bc_fi;
codeFreq[i] = bc_fc;
// TIME UPDATE
// ***ENHANCED*** time update -- uses the back-calculated code phase instead
// Progress the channel params by one sample set (not ephem dependent; can still do regardless)
double temp1 = floor((codeFreq[i]*CHM_T_d + codePhaseEnd[i])/CHM_LCA_d);
if (temp1 > 30 || temp1 < 10) {
test = -1;
}
double cpElapsedPred = cpElapsedEnd[i] + temp1;
double temp2 = fmod((double)(codeFreq[i]*CHM_T_d + codePhaseEnd[i]), (double)CHM_LCA_d);
if (temp2 < 0.0) {
temp2 += (double)CHM_LCA_d;
}
double codePhasePred = temp2;
// COMPUTE TXTIME
// Reset the PRN flag
double txTimePred = cpRefTOW[i] +
((cpElapsedPred - cpRef[i]) * CONST_T_CA) +
(codePhasePred / CONST_F_CA);
// SATELLITE COMPUTATION
// Save the PRN number for this index
int currPrn = PRNs[i]-1;
// Reset eph to the unchosen index
ephToUse[i] = -1;
// Check all eph's and find the one closest to the passed-in time if we have valid ephems for it
// TODO: cleaner eph search that doesn't require the brute force search?
// or just stick with this to be thorough?
for(int currIdx = 0; currIdx < navDataSize; currIdx++) {
// Check if the ephems are valid for this PRN
if (navData[currIdx].ephValid[currPrn]) {
// If no eph chosen and this eph is valid, select this eph
if (ephToUse[i] == -1) {
ephToUse[i] = currIdx;
}
// Update the best eph if it's closer in time to the transmit time
else if (fabs(navData[currIdx].ephToes - txTimePred) <
fabs(navData[ephToUse[i]].ephToes - txTimePred)) {
ephToUse[i] = currIdx;
}
}
}
// Make sure that an ephemeris actually got selected
if (ephToUse[i] == -1) {
// return error
// FIXME: Remove channel
test = 1;
}
dsp::utils::state_t<double> satStatePred;
CHM_Get_Sat_Pos(&satStatePred, &(navData[ephToUse[i]].eph[currPrn]), txTimePred);
// (Internally) advance the rxTime by T and recompute the code phase
// For this satellite position, compute the transmit time (TOF) to the candidate point
satPosTransmitTime = rxTime + T - (txTimePred + (centerPt[3]/(double)CONST_C)) + satStatePred.delta_t;
// Convert the satellite and coordinate positions to ECI to add
cos_tau_OEDot = cos(-CONST_OEDot*satPosTransmitTime);
sin_tau_OEDot = sin(-CONST_OEDot*satPosTransmitTime);
// Rotate the satellite position over the earth for this point's pvt state
currPtSatState.x = cos_tau_OEDot * satStatePred.x
- sin_tau_OEDot * satStatePred.y;
currPtSatState.y = sin_tau_OEDot * satStatePred.x
+ cos_tau_OEDot * satStatePred.y;
currPtSatState.z = satStatePred.z;
currPtSatState.delta_t = satStatePred.delta_t;
// Back-calculate the channel parameters
// Find carrier frequency
bc_los[0] = currPtSatState.x - centerPt[0];
bc_los[1] = currPtSatState.y - centerPt[1];
bc_los[2] = currPtSatState.z - centerPt[2];
bc_range = norm(3, bc_los); // Might as well find range this way since the unit vector is needed.
bc_pseudorange = bc_range - CONST_C * currPtSatState.delta_t + centerPt[3];
bc_txTime = rxTime + T - bc_pseudorange/CONST_C;
bc_codeFracDiff = bc_txTime - cpRefTOW[i] - ((cpElapsedEnd[i] - cpRef[i]) * CONST_T_CA);
bc_rc = bc_codeFracDiff * CONST_F_CA;
double bc_rc0_check = bc_rc - codePhaseEnd[i];
// Find the code frequency elapsed since last timestep
bc_rc0 = CONST_F_CA * (satPosTransmitTime - (bc_range / CONST_C));
// Debug comparison
double temp1old = floor((codeFreq[i]*CHM_T_d + codePhaseEnd[i])/CHM_LCA_d);
if (temp1old > 30 || temp1old < 10) {
test = -1;
}
double temp2old = fmod((double)(codeFreq[i]*CHM_T_d + codePhaseEnd[i]), (double)CHM_LCA_d);
if (temp2old < 0.0) {
temp2old += (double)CHM_LCA_d;
}
// Progress the channel params by one sample set (not ephem dependent; can still do regardless)
cpElapsedStart[i] = cpElapsedEnd[i];
temp1 = floor(bc_rc/CHM_LCA_d);
if (temp1 > 30 || temp1 < 10) {
test = -1;
}
codePhaseStart[i] = codePhaseEnd[i];
temp2 = fmod(bc_rc, (double)CHM_LCA_d);
if (temp2 < 0.0) {
temp2 += (double)CHM_LCA_d;
}
cpElapsedEnd[i] += temp1;
codePhaseEnd[i] = temp2;
carrPhaseStart[i] = carrPhaseEnd[i];
double temp3 = fmod((double)(carrFreq[i]*CHM_T_d + carrPhaseEnd[i]), (double)1.0);
if (temp3 < 0.0) {
temp3 += 1.0;
}
carrPhaseEnd[i] = temp3;
// End ENHANCED
// COMPUTE TXTIME
// Reset the PRN flag
txTime[i] = cpRefTOW[i] +
((cpElapsedEnd[i] - cpRef[i]) * CONST_T_CA) +
(codePhaseEnd[i] / CONST_F_CA);
// SATELLITE COMPUTATION
// Save the PRN number for this index
currPrn = PRNs[i]-1;
CHM_Get_Sat_Pos(&(satStates[i]), &(navData[ephToUse[i]].eph[currPrn]), txTime[i]);
i += stride;
}
return;
}
/** \brief The initial update: Performs TimeUpdate -> txTime -> satState
*
* Propagates from EKF measurement; to be run in update
*
* \param navData Ptr to all ephemerides we have
* \param navDataSize The number of ephemerides in navData
* \param ephToUse The ephemerides chosen (closest in time to the current state estimate)
* \param centerPt The current best estimate on the receiver's state
* \param satStates Output: the states of the tracked satellites according to the channel parameters
* \param codePhaseStart Ptr to the estimate of the code phase of each channel at the beginning of the sample set
* \param codePhaseEnd Ptr to the estimate of the code phase of each channel at the end of the sample set
* \param codeFreq Ptr to the estimate of code frequency (under Doppler effects)
* \param carrPhaseStart Ptr to the estimate of the carrier phase of each channel at the beginning of the sample set
* \param carrPhaseEnd Ptr to the estimate of the carrier phase of each channel at the end of the sample set
* \param carrFreq Ptr to the estimate of the carrier frequency (under Doppler effects)
* \param dopplerSign The sign convention for Doppler frequency
* \param cpElapsedStart The number of code periods for the start of this sample set since the start of tracking
* \param cpElapsedEnd The number of code periods for the end of this sample set since the start of tracking
* \param cpRef The number of code periods elapsed since the start of tracking for the reference TOW
* \param cpRefTOW The TOW at the reference code period
* \param PRNs The satellite channels being tracked
* \param numChan The number of channels being tracked
* \param rxTime The current time of the receiver's internal clock (no delta-t applied)
* \param txTime Output: When each satellite sent the transmission that is now the sample set (computed by the channel parameters)
* \param T The length in seconds of one sample set
*
*/
__global__ void
CHM_TimeUpdateChannels(dsp::utils::ephSet_t *navData, int navDataSize, int *ephToUse,
const double *centerPt, dsp::utils::state_t<double> *satStates,
double *codePhaseStart, double *codePhaseEnd, double *codeFreq,
double *carrPhaseStart, double *carrPhaseEnd, double *carrFreq, int *dopplerSign,
int *cpElapsedStart, int *cpElapsedEnd, int *cpRef, int *cpRefTOW,
uint8_t *PRNs, int numChan,
double rxTime, double *txTime, double T)
{
unsigned int i = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int stride = blockDim.x * gridDim.x;
int test = 0;
// Back-calculation values for finding scores
int currPrn;
double currPtVelECI[4];
double satPosTransmitTime;
dsp::utils::state_t<double> currPtSatState;
double bc_los[3];
double bc_range;
double bc_losrangerate;
double bc_fi;
double bc_rc0;
double bc_fc;
double cos_tau_OEDot;
double sin_tau_OEDot;
// Since kernels are launched in multiples of 32 threads for efficiency,
// only compute if within the range to estimate
while (i < numChan) {
// TIME UPDATE
// ***ENHANCED*** time update -- uses the back-calculated code phase instead
// Progress the channel params by one sample set (not ephem dependent; can still do regardless)
//cpElapsed[i] += floor((BCS_S_d * (codeFreq[i]/BCS_FS_d) + codePhase[i])/BCS_LCA_d);
double temp1 = floor((codeFreq[i]*CHM_T_d + codePhaseEnd[i])/CHM_LCA_d);
if (temp1 > 30 || temp1 < 10) {
test = -1;
}
double cpElapsedPred = cpElapsedEnd[i] + temp1;
//codePhase[i] = fmod((double)(codePhase[i] + codeFreq[i]*BCS_T_d), (double)BCS_LCA_d);
double temp2 = fmod((double)(codeFreq[i]*CHM_T_d + codePhaseEnd[i]), (double)CHM_LCA_d);
if (temp2 < 0.0) {
temp2 += (double)CHM_LCA_d;
}
double codePhasePred = temp2;
// COMPUTE TXTIME
// Reset the PRN flag
double txTimePred = cpRefTOW[i] +
((cpElapsedPred - cpRef[i]) * CONST_T_CA) +
(codePhasePred / CONST_F_CA);
// SATELLITE COMPUTATION
// Save the PRN number for this index
int currPrn = PRNs[i]-1;
// Reset eph to the unchosen index
ephToUse[i] = -1;
// Check all eph's and find the one closest to the passed-in time if we have valid ephems for it
// TODO: cleaner eph search that doesn't require the brute force search?
// or just stick with this to be thorough?
for(int currIdx = 0; currIdx < navDataSize; currIdx++) {
// Check if the ephems are valid for this PRN
if (navData[currIdx].ephValid[currPrn]) {
// If no eph chosen and this eph is valid, select this eph
if (ephToUse[i] == -1) {
ephToUse[i] = currIdx;
}
// Update the best eph if it's closer in time to the transmit time
else if (fabs(navData[currIdx].ephToes - txTimePred) <
fabs(navData[ephToUse[i]].ephToes - txTimePred)) {
ephToUse[i] = currIdx;
}
}
}
// Make sure that an ephemeris actually got selected
if (ephToUse[i] == -1) {
// TODO: Return error
test = 1;
}
dsp::utils::state_t<double> satStatePred;
CHM_Get_Sat_Pos(&satStatePred, &(navData[ephToUse[i]].eph[currPrn]), txTimePred);
// (Internally) advance the rxTime by T and recompute the code phase
// For this satellite position, compute the transmit time (TOF) to the candidate point
satPosTransmitTime = rxTime + T - (txTimePred + (centerPt[3]/(double)CONST_C)) + satStatePred.delta_t;
// Convert the satellite and coordinate positions to ECI to add
cos_tau_OEDot = cos(-CONST_OEDot*satPosTransmitTime);
sin_tau_OEDot = sin(-CONST_OEDot*satPosTransmitTime);
// Rotate the satellite position over the earth for this point's pvt state
currPtSatState.x = cos_tau_OEDot * satStatePred.x
- sin_tau_OEDot * satStatePred.y;
currPtSatState.y = sin_tau_OEDot * satStatePred.x
+ cos_tau_OEDot * satStatePred.y;
currPtSatState.z = satStatePred.z;
currPtSatState.delta_t = satStatePred.delta_t;
// Back-calculate the channel parameters
// Find carrier frequency
bc_los[0] = currPtSatState.x - centerPt[0];
bc_los[1] = currPtSatState.y - centerPt[1];
bc_los[2] = currPtSatState.z - centerPt[2];
bc_range = norm(3, bc_los); // Might as well find range this way since the unit vector is needed.
double bc_pseudorange = bc_range - CONST_C * currPtSatState.delta_t + centerPt[3];
double bc_txTime = rxTime + T - bc_pseudorange/CONST_C;
// Not using the predicted values here because we want to see how much the code phase moved since last timestep
// based on the predicted satellite location and newly-predicted receiver location.
// This would be most accurate if done iteratively, as the new code phase estimate would update the satellite position.
// But, after re-applying the code phase estimate to the satellite position and calculating again, the bc-pseudorange
// shouldn't change much, because the satellites move at 4m/ms and we expect code chips of difference with bc_rc (1ms/1023)
double bc_codeFracDiff = bc_txTime - cpRefTOW[i] - ((cpElapsedEnd[i] - cpRef[i]) * CONST_T_CA);
double bc_rc = bc_codeFracDiff * CONST_F_CA;
double bc_rc0_check = bc_rc - codePhaseEnd[i];
double modCheck = fmod(bc_rc0_check, (double)CHM_LCA_d);
// Find the code frequency elapsed since last timestep
bc_rc0 = CONST_F_CA * (satPosTransmitTime - (bc_range / CONST_C));
// Advance the code phase
double temp2old = fmod((double)(codeFreq[i]*CHM_T_d + codePhaseEnd[i]), (double)CHM_LCA_d);
if (temp2old < 0.0) {
temp2old += (double)CHM_LCA_d;
}
// Progress the channel params by one sample set (not ephem dependent; can still do regardless)
cpElapsedStart[i] = cpElapsedEnd[i];
temp1 = floor(bc_rc/CHM_LCA_d);
if (temp1 > 30 || temp1 < 10) {
test = -1;
}
codePhaseStart[i] = codePhaseEnd[i];
temp2 = fmod(bc_rc, (double)CHM_LCA_d);
if (temp2 < 0.0) {
temp2 += (double)CHM_LCA_d;
}
cpElapsedEnd[i] += temp1;
codePhaseEnd[i] = temp2;
carrPhaseStart[i] = carrPhaseEnd[i];
double temp3 = fmod((double)(carrFreq[i]*CHM_T_d + carrPhaseEnd[i]), (double)1.0);
if (temp3 < 0.0) {
temp3 += 1.0;
}
carrPhaseEnd[i] = temp3;
// COMPUTE TXTIME
// Reset the PRN flag
txTime[i] = cpRefTOW[i] +
((cpElapsedEnd[i] - cpRef[i]) * CONST_T_CA) +
(codePhaseEnd[i] / CONST_F_CA);
// SATELLITE COMPUTATION
// Save the PRN number for this index
currPrn = PRNs[i]-1;
CHM_Get_Sat_Pos(&(satStates[i]), &(navData[ephToUse[i]].eph[currPrn]), txTime[i]);
i += stride;
}
return;
}
/** \brief Computes values needed by BCM grid evaluation -- ENU->ECEF rotation matrix and the satellite states
*
* \param rxTime The current time of the receiver's internal clock (no delta-t applied)
* \param txTime When each satellite sent the transmission that is now the sample set (computed by the channel parameters)
* \param centerPt The current best estimate on the receiver's state
* \param satStates The states of the tracked satellites according to the channel parameters
* \param numChan The number of channels being tracked
* \param timeGrid The specific offsets from centerPt being evaluated on the grid
* \param timeGridDim The number of time offsets being evaluated
* \param batchSatStates Output: The specific states of the satellites at all the offsets on the grid
* \param enu2ecefMat Output: The rotation matrix to convert the centerPt-ENU frame to ECEF
*
*/
__global__ void
CHM_GridPrep(double rxTime, double *txTime, double *centerPt, dsp::utils::state_t<double> *satStates,
int numChan, double *timeGrid, int timeGridDim,
dsp::utils::state_t<double> *batchSatStates, double *enu2ecefMat) {
unsigned int i = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int stride = blockDim.x * gridDim.x;
int currPt; // The point on the grid being examined
int currChan; // The channel this thread is processing
double satPosTransmitTime;
double cos_tau_OEDot;
double sin_tau_OEDot;
while(i < numChan * timeGridDim + 1) {
// Update index tracker
currPt = POSMOD(i, timeGridDim);
currChan = i / timeGridDim;
// If the last thread, compute the ENU->ECEF rotation matrix
// This looks like it goes ECEF->ENU, but ENU->ECEF is the transpose of ECEF->ENU,
// and ENU->ECEF gives the transformation from grid-space to satellite frame space,
// so that's the rotation we actually want.
if (i == numChan * timeGridDim) {
double posLL[2];
CHM_Dev_ECEF2LL_Rad(centerPt, posLL);
CHM_Dev_R_ENU2ECEF(posLL, enu2ecefMat);
}
// Otherwise, find the satellite state you're responsible for rotating
// We know the satellite's location from the channel parameters -- it's a function of the received signal.
// However, the state we compute from the channel parameters needs to consider the rotation of the Earth
// for EACH time being evaluated on the grid. So, the multiple satellite states being computed here are the same
// location in different ECEF frame-times.
else {
// For this satellite position, compute the transmit time (TOF) to the candidate point
satPosTransmitTime = rxTime - (txTime[currChan] + ((timeGrid[currPt] + centerPt[3])/(double)CONST_C)) + satStates[currChan].delta_t; // More conceptually accurate
// Convert the satellite and coordinate positions to ECI to add
cos_tau_OEDot = cos(-CONST_OEDot*satPosTransmitTime);
sin_tau_OEDot = sin(-CONST_OEDot*satPosTransmitTime);
// batchSatStates contains the states for all grid points for the first channel, then for the second channel, ...
// Rotate the satellite position over the earth for this point's pvt state
batchSatStates[i].x = cos_tau_OEDot * satStates[currChan].x
- sin_tau_OEDot * satStates[currChan].y;
batchSatStates[i].y = sin_tau_OEDot * satStates[currChan].x
+ cos_tau_OEDot * satStates[currChan].y;
batchSatStates[i].z = satStates[currChan].z;
batchSatStates[i].delta_t = satStates[currChan].delta_t;
batchSatStates[i].x_dot = cos_tau_OEDot * satStates[currChan].x_dot
- sin_tau_OEDot * satStates[currChan].y_dot
- CONST_OEDot * sin_tau_OEDot * satStates[currChan].x
- CONST_OEDot * cos_tau_OEDot * satStates[currChan].y;
batchSatStates[i].y_dot = sin_tau_OEDot * satStates[currChan].x_dot
+ cos_tau_OEDot * satStates[currChan].y_dot
+ CONST_OEDot * cos_tau_OEDot * satStates[currChan].x
- CONST_OEDot * sin_tau_OEDot * satStates[currChan].y;
batchSatStates[i].z_dot = satStates[currChan].z_dot;
batchSatStates[i].delta_t_dot = satStates[currChan].delta_t_dot;
}
i += stride;
}
}
dsp::cuChanMgr::cuChanMgr(){
ModuleName = "cuChanMgr";
AllocateInputs(15);
AllocateOutputs(18);
Started = 0;
/**
* INPUT CONFIGURATION
*/
// Configure inputs
ConfigExpectedInput(0, "InitEph", UNDEFINED_t, EPHEMS, VECTORLENGTH_ANY);
ConfigExpectedInput(1, "InitPRN", CHAR_t, VALUE, VECTORLENGTH_ANY);
ConfigExpectedInput(2, "InitCodePhase", DOUBLE_t, VALUE, VECTORLENGTH_ANY);
ConfigExpectedInput(3, "InitCarrierPhase", DOUBLE_t, VALUE, VECTORLENGTH_ANY);
ConfigExpectedInput(4, "InitCodeFrequency", DOUBLE_t, FREQUENCY_HZ, VECTORLENGTH_ANY);
ConfigExpectedInput(5, "InitCarrierFrequency", DOUBLE_t, FREQUENCY_HZ, VECTORLENGTH_ANY);
ConfigExpectedInput(6, "InitElapsedCodePeriods", INT_t, VALUE, VECTORLENGTH_ANY);
ConfigExpectedInput(7, "InitReferenceCodePeriods", INT_t, VALUE, VECTORLENGTH_ANY);
ConfigExpectedInput(8, "InitCPRefTOW", INT_t, VALUE, VECTORLENGTH_ANY);
ConfigExpectedInput(9, "InitRXTime", DOUBLE_t, VALUE, VECTORLENGTH_ANY);
ConfigExpectedInput(10, "xCurrk1k1", DOUBLE_t, STATE, VECTORLENGTH_ANY);
ConfigExpectedInput(11, "SampleLength", DOUBLE_t, VALUE, 1);
ConfigExpectedInput(12, "xCurrkk1", DOUBLE_t, STATE, VECTORLENGTH_ANY);
ConfigExpectedInput(13, "TimeGrid", DOUBLE_t, VALUE, VECTORLENGTH_ANY);
InsertParam("DopplerSign", (void*)&dopplerSign, INT_t, sizeof(int), sizeof(int));
std::clog << "[" << ModuleName << "] Configured inputs" << std::endl;
// Note: Phase and cpEla params are saved wrt both the start AND end of the sample set!
// BatchCorrScores wants references wrt the start so indices can be counted forwards
// State estimation is done wrt the end, since we have that full set of samples
// Frequency measurements are wrt the end, though this really just means they include the most recent measurement
// This is consistent with PyGNSS:
// rxTime+=T -> TimeUpdateState -> BatchCorr -> TimeUpdateChannels -> ManifoldEstimation -> MeasUpdateState -> MeasUpdateChannels
// PyGNSS does it that way so BatchCorr uses start-referenced params and ManifoldEstimation uses end-referenced params.
// Though, doing the param updating all at once and denoting which is start-referenced and end-referenced is clearer.
ConfigOutput(0, "rxTime", DOUBLE_t, VALUE, HOST, 1, NULL, 0);
ConfigOutput(1, "txTime", DOUBLE_t, VALUE, CUDA_DEVICE, VECTORLENGTH_ANY, NULL, 0);
ConfigOutput(2, "CodePhaseStart", DOUBLE_t, VALUE, CUDA_DEVICE, VECTORLENGTH_ANY, NULL, 0);
ConfigOutput(3, "CarrierPhaseStart", DOUBLE_t, VALUE, CUDA_DEVICE, VECTORLENGTH_ANY, NULL, 0);
ConfigOutput(4, "CodePhaseEnd", DOUBLE_t, VALUE, CUDA_DEVICE, VECTORLENGTH_ANY, NULL, 0);
ConfigOutput(5, "CarrierPhaseEnd", DOUBLE_t, VALUE, CUDA_DEVICE, VECTORLENGTH_ANY, NULL, 0);
ConfigOutput(6, "CodeFrequency", DOUBLE_t, FREQUENCY_HZ, CUDA_DEVICE, VECTORLENGTH_ANY, NULL, 0);
ConfigOutput(7, "CarrierFrequency", DOUBLE_t, FREQUENCY_HZ, CUDA_DEVICE, VECTORLENGTH_ANY, NULL, 0);
ConfigOutput(8, "SatStates", DOUBLE_t, STATE, CUDA_DEVICE, VECTORLENGTH_ANY, NULL, 0);
ConfigOutput(9, "DopplerSign", INT_t, VALUE, CUDA_DEVICE, 1, NULL, 0);
ConfigOutput(10, "ValidPRNs", CHAR_t, VALUE, CUDA_DEVICE, VECTORLENGTH_ANY, NULL, 0);
ConfigOutput(11, "cpReference", INT_t, VALUE, CUDA_DEVICE, VECTORLENGTH_ANY, NULL, 0);
ConfigOutput(12, "cpElapsedStart", INT_t, VALUE, CUDA_DEVICE, VECTORLENGTH_ANY, NULL, 0);
ConfigOutput(13, "cpElapsedEnd", INT_t, VALUE, CUDA_DEVICE, VECTORLENGTH_ANY, NULL, 0);
ConfigOutput(14, "ENU2ECEFMat", DOUBLE_t, VALUE, CUDA_DEVICE, 9, NULL, 0);
ConfigOutput(15, "SatStatesOld", DOUBLE_t, STATE, CUDA_DEVICE, VECTORLENGTH_ANY, NULL, 0);
ConfigOutput(16, "cpRef", INT_t, VALUE, CUDA_DEVICE, VECTORLENGTH_ANY, NULL, 0);
ConfigOutput(17, "cpRefTOW", INT_t, VALUE, CUDA_DEVICE, VECTORLENGTH_ANY, NULL, 0);
}
dsp::cuChanMgr::~cuChanMgr(){
if (Started) Stop();
delete [] ephSetArr;
delete [] inputs;
delete [] outputs;
delete [] expectedInputs;
}
int
dsp::cuChanMgr::Start(void* cuFlowStream) {
// Check module status and report accordingly
if (Started) {
std::clog << "[" << ModuleName << "] Start: Already Started." << std::endl;
return 0;
}
std::clog << "[" << ModuleName << "] Starting ... " << std::flush;
// Set the CUDA Stream for the GPU operations
cuStream = (hipStream_t*)cuFlowStream;
// Get inputs
// Determine how many channels are being tracked
numChan = inputs[1]->VectorLength;
// Get the pointer to the state (on device)
xk1k1_d = (double*)inputs[10]->Data;
xkk1_d = (double*)inputs[12]->Data;
// Get manifold info
timeGrid_d = (double*)inputs[13]->Data;
timeGridDim = inputs[13]->VectorLength;
// Get the starting time
rxTime = *((double*)(inputs[9]->Data));
// Length of one sample set (should be 1ms)
T = *((double*)(inputs[11]->Data));
// Round to the nearest 1us to ensure there is no trailing garbage precision
T = round(T*1.0e6)/1.0e6;
// Copy processing parameters to device constant memory
cuCheckMSt(hipMemcpyToSymbol(CHM_LCA_d, &numCA, sizeof(int), 0, hipMemcpyHostToDevice));
cuCheckMSt(hipMemcpyToSymbol(CHM_T_d, &T, sizeof(double), 0, hipMemcpyHostToDevice));
// Get the input parameters copied into local device memory
int size = sizeof(uint8_t)*CONST_PRN_MAX;
cuCheckMSt(hipMalloc((void**)&PRNs_d, size));
cuCheckMSt(hipMemcpyAsync(PRNs_d, (uint8_t*)inputs[1]->Data, size, hipMemcpyHostToDevice, *cuStream));
// Channel parameters and friends
size = sizeof(double)*CONST_PRN_MAX;
cuCheckMSt(hipMalloc((void**)&codePhaseStart_d, size));
cuCheckMSt(hipMalloc((void**)&codePhaseEnd_d, size));
cuCheckMSt(hipMemcpyAsync(codePhaseEnd_d, (double*)inputs[2]->Data, size, hipMemcpyHostToDevice, *cuStream));
cuCheckMSt(hipMalloc((void**)&carrierPhaseStart_d, size));
cuCheckMSt(hipMalloc((void**)&carrierPhaseEnd_d, size));
cuCheckMSt(hipMemcpyAsync(carrierPhaseEnd_d, (double*)inputs[3]->Data, size, hipMemcpyHostToDevice, *cuStream));
cuCheckMSt(hipMalloc((void**)&codeFrequency_d, size));
cuCheckMSt(hipMemcpyAsync(codeFrequency_d, (double*)inputs[4]->Data, size, hipMemcpyHostToDevice, *cuStream));
cuCheckMSt(hipMalloc((void**)&carrierFrequency_d, size));
cuCheckMSt(hipMemcpyAsync(carrierFrequency_d, (double*)inputs[5]->Data, size, hipMemcpyHostToDevice, *cuStream));
cuCheckMSt(hipMalloc((void**)&txTime_d, size));
size = sizeof(int)*CONST_PRN_MAX;
cuCheckMSt(hipMalloc((void**)&cpElapsedStart_d, size));
cuCheckMSt(hipMalloc((void**)&cpElapsedEnd_d, size));
cuCheckMSt(hipMemcpyAsync(cpElapsedEnd_d, (int*)inputs[6]->Data, size, hipMemcpyHostToDevice, *cuStream));
cuCheckMSt(hipMalloc((void**)&cpReference_d, size));
cuCheckMSt(hipMemcpyAsync(cpReference_d, (int*)inputs[7]->Data, size, hipMemcpyHostToDevice, *cuStream));
cuCheckMSt(hipMalloc((void**)&TOWcpReference_d, size));
cuCheckMSt(hipMemcpyAsync(TOWcpReference_d, (int*)inputs[8]->Data, size, hipMemcpyHostToDevice, *cuStream));
cuCheckMSt(hipMalloc((void**)&dopplerSign_d, size));
cuCheckMSt(hipMemcpyAsync(dopplerSign_d, &dopplerSign, sizeof(int), hipMemcpyHostToDevice, *cuStream));
cuCheckMSt(hipMalloc((void**)&ephToUse_d, size));
// PVT state of each satellite as calculated by the channel params
size = sizeof(dsp::utils::state_t<double>)*CONST_PRN_MAX;
cuCheckMSt(hipMalloc((void**)&satStates_d, size));
// Satellite state things
cuCheckMSt(hipMalloc((void**)&batchSatStates_d, sizeof(dsp::utils::state_t<double>)*CONST_PRN_MAX*timeGridDim));
cuCheckMSt(hipMalloc((void**)&enu2ecefMat_d, sizeof(double)*9));
// Load ephemeris
ephSetVec = *((std::vector<dsp::utils::ephSet_t>*)(inputs[0]->Data));
ephSetSize = ephSetVec.size();
// Convert eph's from vector to array so hipMemcpy can easily copy to device
ephSetArr = new dsp::utils::ephSet_t[ephSetSize];
for (int i = 0; i < ephSetSize; i++) { ephSetArr[i].copyInto(ephSetVec[i]); }
// Allocate space for every ephSet in navData
size = sizeof(dsp::utils::ephSet_t)*ephSetSize;
cuCheckMSt(hipMalloc((void**)&ephSetPtr_d, size));
cuCheckMSt(hipMemcpyAsync(ephSetPtr_d, ephSetArr, size, hipMemcpyHostToDevice));
// Compute satellite states for the first computation
// (launching kernels less than 32 threads can be more inefficient, even if only <10 channels are being tracked)
hipLaunchKernelGGL(( CHM_ComputeSatStates), dim3(1), dim3(auxil::roundUpToNextPowerOfTwo(CONST_PRN_MAX)), 0, *cuStream,
ephSetPtr_d, ephSetSize, ephToUse_d,
satStates_d,
codePhaseEnd_d,
cpElapsedEnd_d, cpReference_d, TOWcpReference_d,
PRNs_d, numChan,
txTime_d);
// Propagate the channels for the next iteration
// (launching kernels less than 32 threads can be more inefficient, even if only <10 channels are being tracked)
hipLaunchKernelGGL(( CHM_TimeUpdateChannels), dim3(1), dim3(auxil::roundUpToNextPowerOfTwo(CONST_PRN_MAX)), 0, *cuStream,
ephSetPtr_d, ephSetSize, ephToUse_d,
xk1k1_d, satStates_d,
codePhaseStart_d, codePhaseEnd_d, codeFrequency_d,
carrierPhaseStart_d, carrierPhaseEnd_d, carrierFrequency_d, dopplerSign_d,
cpElapsedStart_d, cpElapsedEnd_d, cpReference_d, TOWcpReference_d,
PRNs_d, numChan,
rxTime, txTime_d, T);
// Advance the rxTime to the end of the next sample set
// (needs to happen after the propagate because MeasUpdate uses the rxTime of this iteration)
rxTime += T;
// Compute the rotated satellite positions
prepBlockCount = floor((numChan*timeGridDim+1)/64)+1;
// Shouldn't need to update sat states here since CHM_TimeUpdateChannels does it
// Compute the satellite state from the ephemerides using the channel parameters
// Rotate the satellites over the earth for every candidate grid point
hipLaunchKernelGGL(( CHM_GridPrep), dim3(prepBlockCount), dim3(64), 0, *cuStream, rxTime, txTime_d, xkk1_d, satStates_d,
numChan, timeGrid_d, timeGridDim,
batchSatStates_d, enu2ecefMat_d);
// Now that space is allocated, assign the position buffer and size
outputs[0].Data = (void*)&rxTime;
outputs[0].VectorLength = 1;
outputs[1].Data = txTime_d;
outputs[1].VectorLength = numChan;
outputs[2].Data = codePhaseStart_d;
outputs[2].VectorLength = numChan;
outputs[3].Data = carrierPhaseStart_d;
outputs[3].VectorLength = numChan;
outputs[4].Data = codePhaseEnd_d;
outputs[4].VectorLength = numChan;
outputs[5].Data = carrierPhaseEnd_d;
outputs[5].VectorLength = numChan;
outputs[6].Data = codeFrequency_d;
outputs[6].VectorLength = numChan;
outputs[7].Data = carrierFrequency_d;
outputs[7].VectorLength = numChan;
outputs[8].Data = batchSatStates_d;
outputs[8].VectorLength = numChan*timeGridDim;
outputs[9].Data = dopplerSign_d;
outputs[9].VectorLength = numChan;
outputs[10].Data = PRNs_d;
outputs[10].VectorLength = numChan;
outputs[11].Data = cpReference_d;
outputs[11].VectorLength = numChan;
outputs[12].Data = cpElapsedStart_d;
outputs[12].VectorLength = numChan;
outputs[13].Data = cpElapsedEnd_d;
outputs[13].VectorLength = numChan;
outputs[14].Data = enu2ecefMat_d;
outputs[14].VectorLength = 9;
outputs[15].Data = satStates_d;
outputs[15].VectorLength = numChan;
outputs[16].Data = cpReference_d;
outputs[16].VectorLength = numChan;
outputs[17].Data = TOWcpReference_d;
outputs[17].VectorLength = numChan;
// Make sure all GPU tasks have completed before continuing
cuCheckMSt(hipStreamSynchronize(*cuStream));
cuCheckMSt(hipDeviceSynchronize());
// Signifies that the next call to update() will be the first after start()
Started = 1;
std::clog << "Started." << std::endl;
return 0;
}
int
dsp::cuChanMgr::Stop() {
int ret = 0;
if (Started == 0) {
std::clog << "[" << ModuleName << "] Stop: Wasn't Started." << std::endl;
return 0;
}
std::clog << "[" << ModuleName << "] Stopping ... " << std::flush;
// Free device memory
cuCheckMSp(hipFree((void*)codePhaseStart_d));
cuCheckMSp(hipFree((void*)codePhaseEnd_d));
cuCheckMSp(hipFree((void*)carrierPhaseStart_d));
cuCheckMSp(hipFree((void*)carrierPhaseEnd_d));
cuCheckMSp(hipFree((void*)codeFrequency_d));
cuCheckMSp(hipFree((void*)carrierFrequency_d));
cuCheckMSp(hipFree((void*)cpElapsedStart_d));
cuCheckMSp(hipFree((void*)cpElapsedEnd_d));
cuCheckMSp(hipFree((void*)cpReference_d));
cuCheckMSp(hipFree((void*)TOWcpReference_d));
cuCheckMSp(hipFree((void*)dopplerSign_d));
cuCheckMSp(hipFree((void*)ephSetPtr_d));
cuCheckMSp(hipFree((void*)satStates_d));
cuCheckMSp(hipFree((void*)batchSatStates_d));
cuCheckMSp(hipFree((void*)enu2ecefMat_d));
Started = 0;
std::clog << "Stopped." << std::endl;
return ret;
}
int
dsp::cuChanMgr::Update(void* cuFlowStream) {
if (Started == 0){
std::cerr << "[" << ModuleName
<< "] Error: Update() Failed due to SatPos not initialized"
<< std::endl;
return -1;
}
// Propagate the channels for the next iteration
// (launching kernels less than 32 threads can be more inefficient, even if only <10 channels are being tracked)
hipLaunchKernelGGL(( CHM_PropagateChannels), dim3(1), dim3(auxil::roundUpToNextPowerOfTwo(CONST_PRN_MAX)), 0, *cuStream,
ephSetPtr_d, ephSetSize, ephToUse_d,
xk1k1_d, satStates_d,
codePhaseStart_d, codePhaseEnd_d, codeFrequency_d,
carrierPhaseStart_d, carrierPhaseEnd_d, carrierFrequency_d, dopplerSign_d,
cpElapsedStart_d, cpElapsedEnd_d, cpReference_d, TOWcpReference_d,
PRNs_d, numChan,
rxTime, txTime_d, T);
// Advance the rxTime to the end of the next sample set
// (needs to happen after the propagate because MeasUpdate uses the rxTime of this iteration)
rxTime += T;
// Compute the rotated satellite positions
prepBlockCount = floor((numChan*timeGridDim+1)/64)+1;
// Shouldn't need to run CHM_ComputeSatStates since CHM_PropagateChannels does this at the end
// Determine the satellite positions from the ephemerides using the channel parameters
// Rotate the satellite over the earth for each candidate grid point
hipLaunchKernelGGL(( CHM_GridPrep), dim3(prepBlockCount), dim3(64), 0, *cuStream, rxTime, txTime_d, xkk1_d, satStates_d,
numChan, timeGrid_d, timeGridDim,
batchSatStates_d, enu2ecefMat_d);
// Block on host until channel parameters have updated
cuCheckMSt(hipStreamSynchronize(*cuStream));
return 0;
}
| ca9654addd9d6395e64184d2b9802dc626cd4329.cu |
#include <cuda_runtime.h>
#include <cuda_runtime_api.h>
//#include "cuPrintf.cu"
#include <iostream>
#include <cmath>
#include <cstring>
#include <cstdio>
#include <stdint.h>
#include "cuchanmgr.h"
#include "errorhandler.h"
#include "auxil.h"
__device__ __constant__ int CHM_LCA_d;
__device__ __constant__ double CHM_T_d;
/*
*
* DEVICE FUNCTIONS
*
*/
__device__ double
CHM_Correct_Week_Crossover(double time) {
if (time > 302400.0) { return time-604800.0; }
else if (time < -302400.00) { return time+604800.0; }
else { return time; }
}
// Light-weight, parallelized lat-lon calculator that returns values in radians
__device__ void
CHM_Dev_ECEF2LL_Rad(const double *posECEF, double *posLL) {
// Compute lat
double p = norm(2, posECEF);
double theta = atan2(posECEF[2]*CONST_WGS84_A, p*CONST_WGS84_B);
posLL[0] = atan2((posECEF[2] + pow(CONST_WGS84_EP,2) * CONST_WGS84_B * pow(sin(theta),3)),
(p - pow(CONST_WGS84_E,2) * CONST_WGS84_A * pow(cos(theta), 3)));
// Compute lon
posLL[1] = atan2(posECEF[1], posECEF[0]);
return;
}
// Given latitude and longitude, return the elements of the ENU->ECEF rotation matrix
__device__ void
CHM_Dev_R_ENU2ECEF(const double *posLL, double *posENU) {
double sinLat = sin(posLL[0]);
double sinLon = sin(posLL[1]);
double cosLat = cos(posLL[0]);
double cosLon = cos(posLL[1]);
posENU[0] = -sinLon;
posENU[1] = -sinLat*cosLon;
posENU[2] = cosLat*cosLon;
posENU[3] = cosLon;
posENU[4] = -sinLat*sinLon;
posENU[5] = cosLat*sinLon;
posENU[6] = 0.0;
posENU[7] = cosLat;
posENU[8] = sinLat;
return;
}
// Function to be called within Update_Sat_Pos to prevent function saturation
// Note: this is hardcoded to compute GPS ephemerides only!
// See ephemeris.c in RTKLIB for potentially useful code for multi-constellation
//
// NOTE: IF THERE ARE ERRORS, "FMOD" is defined differently than np.mod! Check those lines!!!
//
__device__ int
CHM_Get_Sat_Pos(dsp::utils::state_t<double> *satState, dsp::utils::eph_t *satEph, double txTime) {
// Corrected mean motion
double n = sqrt(dsp::utils::MU_GPS/(CHM_CUBE(satEph->A))) + satEph->deln;
// Compute satellite clock corrections
double tc = CHM_Correct_Week_Crossover(txTime - satEph->tocs); // Without corrections
double clkb = satEph->f2*tc*tc + satEph->f1*tc + satEph->f0 - satEph->tgd[0]; // Without relativistic correction
double tk = CHM_Correct_Week_Crossover(txTime - clkb - satEph->toes); // Without relativistic correction
// Mean anomaly
double E, M, f, dfdE, dE = 1;
E = M = fmod((satEph->M0 + n*tk), CONST_2PI);
// Eccentric anomaly
for (int eccIdx = 0; eccIdx < CHM_MAX_ITER_KEPLER && abs(dE) > CHM_RTOL_KEPLER; eccIdx++) {
f = M - E + satEph->e * sin(E);
dfdE = -1.0 + satEph->e * cos(E);
dE = -f / dfdE;
E = fmod(E + dE, CONST_2PI);
//if (abs(dE) < SATPOS_RTOL_KEPLER) { break; } // Break here if convergence is achieved!
}
if (abs(dE) > CHM_RTOL_KEPLER) { return -1; }
// Add in relativistic corrections and compute clock drift
double dtr = CONST_F*(satEph->e)*(satEph->sqrt_A)*sin(E);
tc = txTime - (clkb + dtr) - satEph->tocs;
clkb = satEph->f2*tc*tc + satEph->f1*tc + satEph->f0 + dtr - satEph->tgd[0];
double clkd = satEph->f1 + 2.0*satEph->f2*tc;
// Recompute tk with relativisitic correction
tk = CHM_Correct_Week_Crossover(txTime - clkb - satEph->toes);
// Recompute mean anomaly with relativisitic correction
dE = 1;
E = M = fmod((satEph->M0 + n*tk), (CONST_2PI));
// Eccentric anomaly
for (int eccIdx = 0; eccIdx < CHM_MAX_ITER_KEPLER && abs(dE) > CHM_RTOL_KEPLER; eccIdx++) {
f = M - E + satEph->e * sin(E);
dfdE = -1.0 + satEph->e * cos(E);
dE = -f / dfdE;
E = fmod(E + dE, CONST_2PI);
//if (abs(dE) < SATPOS_RTOL_KEPLER) { break; } // Break here if convergence is achieved!
}
if (abs(dE) > CHM_RTOL_KEPLER) { return -1; }
// Compute helpers
double sinE = sin(E);
double cosE = cos(E);
// True anomaly
double v = atan2(sqrt(1.0-satEph->e_sqr) * sinE / (1.0-satEph->e*cosE), (cosE-satEph->e)/(1.0-satEph->e*cosE));
// Argument of latitude
double u = fmod(v + satEph->omg, CONST_2PI); // Don't need mod here? (Computing for guarantee)
// Second harmonic perturbations
double cos2u = cos(2.0*u);
double sin2u = sin(2.0*u);
// Argument of latitude correction -> corrected argument of latitude
u += satEph->cuc * cos2u + satEph->cus * sin2u;
// Radius correction -> corrected radius
double r = satEph->A * (1.0 - satEph->e * cosE) + satEph->crc * cos2u + satEph->crs * sin2u;
// Orbital inclination correction -> corrected inclination
double i = satEph->i0 + satEph->idot * tk + satEph->cic * cos2u + satEph->cis * sin2u;
// Corrected longitude of node
double omegak = fmod(satEph->OMG0 + (satEph->OMGd-CONST_OEDot)*tk - CONST_OEDot*satEph->toes, CONST_2PI);
// Positions in orbital plane
double x_op = r * cos(u);
double y_op = r * sin(u);
double cos_omegak = cos(omegak);
double sin_omegak = sin(omegak);
double cosi = cos(i);
double sini = sin(i);
// Assign position states
double state_x = x_op * cos_omegak - y_op * sin_omegak * cosi;
satState->x = state_x;
double state_y = x_op * sin_omegak + y_op * cos_omegak * cosi;
satState->y = state_y;
double state_z = y_op * sini;
satState->z = state_z;
satState->delta_t = clkb;
// Velocity calculation
// Second harmonic perturbations
cos2u = cos(2.0*u);
sin2u = sin(2.0*u);
double edot = n / (1.0 - satEph->e*cosE);
double vdot = sinE*edot*(1.0 + satEph->e*cos(v)) / (sin(v)*(1.0-satEph->e*cosE));
double udot = vdot + 2.0*(satEph->cus*cos2u - satEph->cuc*sin2u)*vdot;
double rdot = satEph->A*satEph->e*sinE*edot + 2.0*(satEph->crs*cos2u - satEph->crc*sin2u)*vdot;
double idotdot = satEph->idot + (satEph->cis*cos2u - satEph->cic*sin2u)*2*vdot;
double vx_op = rdot*cos(u) - y_op*udot;
double vy_op = rdot*sin(u) + x_op*udot;
double omegadot = satEph->OMGd - CONST_OEDot;
double tmpa = vx_op - y_op*cosi*omegadot;
double tmpb = x_op*omegadot + vy_op*cosi - y_op*sini*idotdot;
// Assign velocity states
double state_xdot = tmpa * cos_omegak - tmpb * sin_omegak;
satState->x_dot = state_xdot;
double state_ydot = tmpa * sin_omegak + tmpb * cos_omegak;
satState->y_dot = state_ydot;
double state_zdot = vy_op*sini + y_op*cosi*idotdot;
satState->z_dot = state_zdot;
satState->delta_t_dot = clkd;
// TODO: implement ionospheric corrections?
return 0;
}
/*
*
* DEVICE KERNELS
*
*/
/** \brief Compute the satellite state given the current estimate of the received signal
*
* The initial update -- propagates forward from scalar handoff
*
* \param navData Ptr to all ephemerides we have
* \param navDataSize The number of ephemerides in navData
* \param ephToUse Output: the ephemerides chosen (closest in time to the current state estimate)
* \param satStates Output: The states of the tracked satellites according to the channel parameters
* \param codePhase Ptr to the current estimate of the code phase of each channel
* \param cpElapsed The number of code periods for this sample set since the start of tracking
* \param cpRef The number of code periods elapsed since the start of tracking for the reference TOW
* \param cpRefTOW The TOW at the reference code period
* \param PRNs The satellite channels being tracked
* \param numChan The number of channels being tracked
* \param txTime Output: When each satellite sent the transmission that is now the sample set (computed by the channel parameters)
*
*/
__global__ void
CHM_ComputeSatStates(dsp::utils::ephSet_t *navData, int navDataSize, int *ephToUse,
dsp::utils::state_t<double> *satStates,
double *codePhase,
int *cpElapsed, int *cpRef, int *cpRefTOW,
uint8_t *PRNs, int numChan,
double *txTime) {
unsigned int i = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int stride = blockDim.x * gridDim.x;
int test = 0;
// only compute if within the range to estimate
while (i < numChan) {
// COMPUTE TXTIME
txTime[i] = cpRefTOW[i] +
((cpElapsed[i] - cpRef[i]) * CONST_T_CA) +
(codePhase[i] / CONST_F_CA);
// SATELLITE COMPUTATION
// Save the PRN number for this index
int currPrn = PRNs[i]-1;
// Reset eph to the unchosen index
ephToUse[i] = -1;
// Check all eph's and find the one closest to the passed-in time if we have valid ephems for it
// TODO: cleaner eph search that doesn't require the brute force search?
// or just stick with this to be thorough?
for(int currIdx = 0; currIdx < navDataSize; currIdx++) {
// Check if the ephems are valid for this PRN
if (navData[currIdx].ephValid[currPrn]) {
// If no eph chosen and this eph is valid, select this eph
if (ephToUse[i] == -1) {
ephToUse[i] = currIdx;
}
// Update the best eph if it's closer in time to the transmit time
else if (fabs(navData[currIdx].ephToes - txTime[i]) <
fabs(navData[ephToUse[i]].ephToes - txTime[i])) {
ephToUse[i] = currIdx;
}
}
}
// Make sure that an ephemeris actually got selected
if (ephToUse[i] == -1) {
// return error
// FIXME: Remove channel
test = 1;
}
CHM_Get_Sat_Pos(&(satStates[i]), &(navData[ephToUse[i]].eph[currPrn]), txTime[i]);
i += stride;
}
}
/** \brief The standard update: Performs MeasUpdate -> TimeUpdate -> txTime -> satState
*
* Propagates from EKF measurement; to be run in update
*
* \param navData Ptr to all ephemerides we have
* \param navDataSize The number of ephemerides in navData
* \param ephToUse The ephemerides chosen (closest in time to the current state estimate)
* \param centerPt The current best estimate on the receiver's state
* \param satStates Output: the states of the tracked satellites according to the channel parameters
* \param codePhaseStart Ptr to the estimate of the code phase of each channel at the beginning of the sample set
* \param codePhaseEnd Ptr to the estimate of the code phase of each channel at the end of the sample set
* \param codeFreq Ptr to the estimate of code frequency (under Doppler effects)
* \param carrPhaseStart Ptr to the estimate of the carrier phase of each channel at the beginning of the sample set
* \param carrPhaseEnd Ptr to the estimate of the carrier phase of each channel at the end of the sample set
* \param carrFreq Ptr to the estimate of the carrier frequency (under Doppler effects)
* \param dopplerSign The sign convention for Doppler frequency
* \param cpElapsedStart The number of code periods for the start of this sample set since the start of tracking
* \param cpElapsedEnd The number of code periods for the end of this sample set since the start of tracking
* \param cpRef The number of code periods elapsed since the start of tracking for the reference TOW
* \param cpRefTOW The TOW at the reference code period
* \param PRNs The satellite channels being tracked
* \param numChan The number of channels being tracked
* \param rxTime The current time of the receiver's internal clock (no delta-t applied)
* \param txTime Output: When each satellite sent the transmission that is now the sample set (computed by the channel parameters)
* \param T The length in seconds of one sample set
*
*/
__global__ void
CHM_PropagateChannels(dsp::utils::ephSet_t *navData, int navDataSize, int *ephToUse,
const double *centerPt, dsp::utils::state_t<double> *satStates,
double *codePhaseStart, double *codePhaseEnd, double *codeFreq,
double *carrPhaseStart, double *carrPhaseEnd, double *carrFreq, int *dopplerSign,
int *cpElapsedStart, int *cpElapsedEnd, int *cpRef, int *cpRefTOW,
uint8_t *PRNs, int numChan,
double rxTime, double *txTime, double T)
{
unsigned int i = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int stride = blockDim.x * gridDim.x;
int test = 0;
// Back-calculation values for finding scores
int currPrn;
double currPtVelECI[4];
double satPosTransmitTime;
dsp::utils::state_t<double> currPtSatState;
double bc_los[3];
double bc_range;
double bc_losrangerate;
double bc_fi;
double bc_rc0;
double bc_fc;
double bc_pseudorange;
double bc_txTime;
double bc_codeFracDiff;
double bc_rc;
double cos_tau_OEDot;
double sin_tau_OEDot;
// Since kernels are launched in multiples of 32 threads for efficiency,
// only compute if within the range to estimate
while (i < numChan) {
// MEASUREMENT UPDATE
// For this satellite position, compute the transmit time (TOF) to the candidate point
satPosTransmitTime = rxTime - (txTime[i] + (centerPt[3]/(double)CONST_C)) + satStates[i].delta_t;
// Convert the satellite and coordinate positions to ECI to add
cos_tau_OEDot = cos(-CONST_OEDot*satPosTransmitTime);
sin_tau_OEDot = sin(-CONST_OEDot*satPosTransmitTime);
// Rotate the satellite position over the earth for this point's pvt state
currPtSatState.x = cos_tau_OEDot * satStates[i].x
- sin_tau_OEDot * satStates[i].y;
currPtSatState.y = sin_tau_OEDot * satStates[i].x
+ cos_tau_OEDot * satStates[i].y;
currPtSatState.z = satStates[i].z;
currPtSatState.delta_t = satStates[i].delta_t;
currPtSatState.x_dot = cos_tau_OEDot * satStates[i].x_dot
- sin_tau_OEDot * satStates[i].y_dot
- CONST_OEDot * sin_tau_OEDot * satStates[i].x
- CONST_OEDot * cos_tau_OEDot * satStates[i].y;
currPtSatState.y_dot = sin_tau_OEDot * satStates[i].x_dot
+ cos_tau_OEDot * satStates[i].y_dot
+ CONST_OEDot * cos_tau_OEDot * satStates[i].x
- CONST_OEDot * sin_tau_OEDot * satStates[i].y;
currPtSatState.z_dot = satStates[i].z_dot;
currPtSatState.delta_t_dot = satStates[i].delta_t_dot;
// Also need to rotate the point's velocity into the inertial frame
currPtVelECI[0] = centerPt[4] - CONST_OEDot*centerPt[1];
currPtVelECI[1] = centerPt[5] + CONST_OEDot*centerPt[0];
currPtVelECI[2] = centerPt[6];
currPtVelECI[3] = centerPt[7];
// Back-calculate the channel parameters
// Find carrier frequency
bc_los[0] = currPtSatState.x - centerPt[0];
bc_los[1] = currPtSatState.y - centerPt[1];
bc_los[2] = currPtSatState.z - centerPt[2];
bc_range = norm(3, bc_los); // Might as well find range this way since the unit vector is needed.
bc_losrangerate = ((bc_los[0]/bc_range)*(currPtVelECI[0]-currPtSatState.x_dot)) +
((bc_los[1]/bc_range)*(currPtVelECI[1]-currPtSatState.y_dot)) +
((bc_los[2]/bc_range)*(currPtVelECI[2]-currPtSatState.z_dot));
bc_fi = CONST_F_L1 * ((bc_losrangerate - currPtVelECI[3])/CONST_C + currPtSatState.delta_t_dot) / (*dopplerSign);
// Find the code frequency elapsed since last timestep
bc_pseudorange = bc_range - CONST_C * currPtSatState.delta_t + centerPt[3];
bc_txTime = rxTime - bc_pseudorange/CONST_C;
bc_codeFracDiff = bc_txTime - cpRefTOW[i] - ((cpElapsedEnd[i] - cpRef[i]) * CONST_T_CA);
bc_rc = bc_codeFracDiff * CONST_F_CA;
// The back-calculated code frequency is the updated frequency for the channel
bc_fc = CONST_F_CA + (*dopplerSign * CONST_F_CA/CONST_F_L1) * bc_fi + (bc_rc - codePhaseEnd[i]) / CHM_T_d;
// Something's very wrong if the codeFreq is shifted by more than 10kHz from F_CA
if (abs(bc_fc-CONST_F_CA) > 10000) {
// TODO: Throw an error if we get here
test = 3;
}
// Update channels using the calculated results
carrFreq[i] = bc_fi;
codeFreq[i] = bc_fc;
// TIME UPDATE
// ***ENHANCED*** time update -- uses the back-calculated code phase instead
// Progress the channel params by one sample set (not ephem dependent; can still do regardless)
double temp1 = floor((codeFreq[i]*CHM_T_d + codePhaseEnd[i])/CHM_LCA_d);
if (temp1 > 30 || temp1 < 10) {
test = -1;
}
double cpElapsedPred = cpElapsedEnd[i] + temp1;
double temp2 = fmod((double)(codeFreq[i]*CHM_T_d + codePhaseEnd[i]), (double)CHM_LCA_d);
if (temp2 < 0.0) {
temp2 += (double)CHM_LCA_d;
}
double codePhasePred = temp2;
// COMPUTE TXTIME
// Reset the PRN flag
double txTimePred = cpRefTOW[i] +
((cpElapsedPred - cpRef[i]) * CONST_T_CA) +
(codePhasePred / CONST_F_CA);
// SATELLITE COMPUTATION
// Save the PRN number for this index
int currPrn = PRNs[i]-1;
// Reset eph to the unchosen index
ephToUse[i] = -1;
// Check all eph's and find the one closest to the passed-in time if we have valid ephems for it
// TODO: cleaner eph search that doesn't require the brute force search?
// or just stick with this to be thorough?
for(int currIdx = 0; currIdx < navDataSize; currIdx++) {
// Check if the ephems are valid for this PRN
if (navData[currIdx].ephValid[currPrn]) {
// If no eph chosen and this eph is valid, select this eph
if (ephToUse[i] == -1) {
ephToUse[i] = currIdx;
}
// Update the best eph if it's closer in time to the transmit time
else if (fabs(navData[currIdx].ephToes - txTimePred) <
fabs(navData[ephToUse[i]].ephToes - txTimePred)) {
ephToUse[i] = currIdx;
}
}
}
// Make sure that an ephemeris actually got selected
if (ephToUse[i] == -1) {
// return error
// FIXME: Remove channel
test = 1;
}
dsp::utils::state_t<double> satStatePred;
CHM_Get_Sat_Pos(&satStatePred, &(navData[ephToUse[i]].eph[currPrn]), txTimePred);
// (Internally) advance the rxTime by T and recompute the code phase
// For this satellite position, compute the transmit time (TOF) to the candidate point
satPosTransmitTime = rxTime + T - (txTimePred + (centerPt[3]/(double)CONST_C)) + satStatePred.delta_t;
// Convert the satellite and coordinate positions to ECI to add
cos_tau_OEDot = cos(-CONST_OEDot*satPosTransmitTime);
sin_tau_OEDot = sin(-CONST_OEDot*satPosTransmitTime);
// Rotate the satellite position over the earth for this point's pvt state
currPtSatState.x = cos_tau_OEDot * satStatePred.x
- sin_tau_OEDot * satStatePred.y;
currPtSatState.y = sin_tau_OEDot * satStatePred.x
+ cos_tau_OEDot * satStatePred.y;
currPtSatState.z = satStatePred.z;
currPtSatState.delta_t = satStatePred.delta_t;
// Back-calculate the channel parameters
// Find carrier frequency
bc_los[0] = currPtSatState.x - centerPt[0];
bc_los[1] = currPtSatState.y - centerPt[1];
bc_los[2] = currPtSatState.z - centerPt[2];
bc_range = norm(3, bc_los); // Might as well find range this way since the unit vector is needed.
bc_pseudorange = bc_range - CONST_C * currPtSatState.delta_t + centerPt[3];
bc_txTime = rxTime + T - bc_pseudorange/CONST_C;
bc_codeFracDiff = bc_txTime - cpRefTOW[i] - ((cpElapsedEnd[i] - cpRef[i]) * CONST_T_CA);
bc_rc = bc_codeFracDiff * CONST_F_CA;
double bc_rc0_check = bc_rc - codePhaseEnd[i];
// Find the code frequency elapsed since last timestep
bc_rc0 = CONST_F_CA * (satPosTransmitTime - (bc_range / CONST_C));
// Debug comparison
double temp1old = floor((codeFreq[i]*CHM_T_d + codePhaseEnd[i])/CHM_LCA_d);
if (temp1old > 30 || temp1old < 10) {
test = -1;
}
double temp2old = fmod((double)(codeFreq[i]*CHM_T_d + codePhaseEnd[i]), (double)CHM_LCA_d);
if (temp2old < 0.0) {
temp2old += (double)CHM_LCA_d;
}
// Progress the channel params by one sample set (not ephem dependent; can still do regardless)
cpElapsedStart[i] = cpElapsedEnd[i];
temp1 = floor(bc_rc/CHM_LCA_d);
if (temp1 > 30 || temp1 < 10) {
test = -1;
}
codePhaseStart[i] = codePhaseEnd[i];
temp2 = fmod(bc_rc, (double)CHM_LCA_d);
if (temp2 < 0.0) {
temp2 += (double)CHM_LCA_d;
}
cpElapsedEnd[i] += temp1;
codePhaseEnd[i] = temp2;
carrPhaseStart[i] = carrPhaseEnd[i];
double temp3 = fmod((double)(carrFreq[i]*CHM_T_d + carrPhaseEnd[i]), (double)1.0);
if (temp3 < 0.0) {
temp3 += 1.0;
}
carrPhaseEnd[i] = temp3;
// End ENHANCED
// COMPUTE TXTIME
// Reset the PRN flag
txTime[i] = cpRefTOW[i] +
((cpElapsedEnd[i] - cpRef[i]) * CONST_T_CA) +
(codePhaseEnd[i] / CONST_F_CA);
// SATELLITE COMPUTATION
// Save the PRN number for this index
currPrn = PRNs[i]-1;
CHM_Get_Sat_Pos(&(satStates[i]), &(navData[ephToUse[i]].eph[currPrn]), txTime[i]);
i += stride;
}
return;
}
/** \brief The initial update: Performs TimeUpdate -> txTime -> satState
*
* Propagates from EKF measurement; to be run in update
*
* \param navData Ptr to all ephemerides we have
* \param navDataSize The number of ephemerides in navData
* \param ephToUse The ephemerides chosen (closest in time to the current state estimate)
* \param centerPt The current best estimate on the receiver's state
* \param satStates Output: the states of the tracked satellites according to the channel parameters
* \param codePhaseStart Ptr to the estimate of the code phase of each channel at the beginning of the sample set
* \param codePhaseEnd Ptr to the estimate of the code phase of each channel at the end of the sample set
* \param codeFreq Ptr to the estimate of code frequency (under Doppler effects)
* \param carrPhaseStart Ptr to the estimate of the carrier phase of each channel at the beginning of the sample set
* \param carrPhaseEnd Ptr to the estimate of the carrier phase of each channel at the end of the sample set
* \param carrFreq Ptr to the estimate of the carrier frequency (under Doppler effects)
* \param dopplerSign The sign convention for Doppler frequency
* \param cpElapsedStart The number of code periods for the start of this sample set since the start of tracking
* \param cpElapsedEnd The number of code periods for the end of this sample set since the start of tracking
* \param cpRef The number of code periods elapsed since the start of tracking for the reference TOW
* \param cpRefTOW The TOW at the reference code period
* \param PRNs The satellite channels being tracked
* \param numChan The number of channels being tracked
* \param rxTime The current time of the receiver's internal clock (no delta-t applied)
* \param txTime Output: When each satellite sent the transmission that is now the sample set (computed by the channel parameters)
* \param T The length in seconds of one sample set
*
*/
__global__ void
CHM_TimeUpdateChannels(dsp::utils::ephSet_t *navData, int navDataSize, int *ephToUse,
const double *centerPt, dsp::utils::state_t<double> *satStates,
double *codePhaseStart, double *codePhaseEnd, double *codeFreq,
double *carrPhaseStart, double *carrPhaseEnd, double *carrFreq, int *dopplerSign,
int *cpElapsedStart, int *cpElapsedEnd, int *cpRef, int *cpRefTOW,
uint8_t *PRNs, int numChan,
double rxTime, double *txTime, double T)
{
unsigned int i = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int stride = blockDim.x * gridDim.x;
int test = 0;
// Back-calculation values for finding scores
int currPrn;
double currPtVelECI[4];
double satPosTransmitTime;
dsp::utils::state_t<double> currPtSatState;
double bc_los[3];
double bc_range;
double bc_losrangerate;
double bc_fi;
double bc_rc0;
double bc_fc;
double cos_tau_OEDot;
double sin_tau_OEDot;
// Since kernels are launched in multiples of 32 threads for efficiency,
// only compute if within the range to estimate
while (i < numChan) {
// TIME UPDATE
// ***ENHANCED*** time update -- uses the back-calculated code phase instead
// Progress the channel params by one sample set (not ephem dependent; can still do regardless)
//cpElapsed[i] += floor((BCS_S_d * (codeFreq[i]/BCS_FS_d) + codePhase[i])/BCS_LCA_d);
double temp1 = floor((codeFreq[i]*CHM_T_d + codePhaseEnd[i])/CHM_LCA_d);
if (temp1 > 30 || temp1 < 10) {
test = -1;
}
double cpElapsedPred = cpElapsedEnd[i] + temp1;
//codePhase[i] = fmod((double)(codePhase[i] + codeFreq[i]*BCS_T_d), (double)BCS_LCA_d);
double temp2 = fmod((double)(codeFreq[i]*CHM_T_d + codePhaseEnd[i]), (double)CHM_LCA_d);
if (temp2 < 0.0) {
temp2 += (double)CHM_LCA_d;
}
double codePhasePred = temp2;
// COMPUTE TXTIME
// Reset the PRN flag
double txTimePred = cpRefTOW[i] +
((cpElapsedPred - cpRef[i]) * CONST_T_CA) +
(codePhasePred / CONST_F_CA);
// SATELLITE COMPUTATION
// Save the PRN number for this index
int currPrn = PRNs[i]-1;
// Reset eph to the unchosen index
ephToUse[i] = -1;
// Check all eph's and find the one closest to the passed-in time if we have valid ephems for it
// TODO: cleaner eph search that doesn't require the brute force search?
// or just stick with this to be thorough?
for(int currIdx = 0; currIdx < navDataSize; currIdx++) {
// Check if the ephems are valid for this PRN
if (navData[currIdx].ephValid[currPrn]) {
// If no eph chosen and this eph is valid, select this eph
if (ephToUse[i] == -1) {
ephToUse[i] = currIdx;
}
// Update the best eph if it's closer in time to the transmit time
else if (fabs(navData[currIdx].ephToes - txTimePred) <
fabs(navData[ephToUse[i]].ephToes - txTimePred)) {
ephToUse[i] = currIdx;
}
}
}
// Make sure that an ephemeris actually got selected
if (ephToUse[i] == -1) {
// TODO: Return error
test = 1;
}
dsp::utils::state_t<double> satStatePred;
CHM_Get_Sat_Pos(&satStatePred, &(navData[ephToUse[i]].eph[currPrn]), txTimePred);
// (Internally) advance the rxTime by T and recompute the code phase
// For this satellite position, compute the transmit time (TOF) to the candidate point
satPosTransmitTime = rxTime + T - (txTimePred + (centerPt[3]/(double)CONST_C)) + satStatePred.delta_t;
// Convert the satellite and coordinate positions to ECI to add
cos_tau_OEDot = cos(-CONST_OEDot*satPosTransmitTime);
sin_tau_OEDot = sin(-CONST_OEDot*satPosTransmitTime);
// Rotate the satellite position over the earth for this point's pvt state
currPtSatState.x = cos_tau_OEDot * satStatePred.x
- sin_tau_OEDot * satStatePred.y;
currPtSatState.y = sin_tau_OEDot * satStatePred.x
+ cos_tau_OEDot * satStatePred.y;
currPtSatState.z = satStatePred.z;
currPtSatState.delta_t = satStatePred.delta_t;
// Back-calculate the channel parameters
// Find carrier frequency
bc_los[0] = currPtSatState.x - centerPt[0];
bc_los[1] = currPtSatState.y - centerPt[1];
bc_los[2] = currPtSatState.z - centerPt[2];
bc_range = norm(3, bc_los); // Might as well find range this way since the unit vector is needed.
double bc_pseudorange = bc_range - CONST_C * currPtSatState.delta_t + centerPt[3];
double bc_txTime = rxTime + T - bc_pseudorange/CONST_C;
// Not using the predicted values here because we want to see how much the code phase moved since last timestep
// based on the predicted satellite location and newly-predicted receiver location.
// This would be most accurate if done iteratively, as the new code phase estimate would update the satellite position.
// But, after re-applying the code phase estimate to the satellite position and calculating again, the bc-pseudorange
// shouldn't change much, because the satellites move at 4m/ms and we expect code chips of difference with bc_rc (1ms/1023)
double bc_codeFracDiff = bc_txTime - cpRefTOW[i] - ((cpElapsedEnd[i] - cpRef[i]) * CONST_T_CA);
double bc_rc = bc_codeFracDiff * CONST_F_CA;
double bc_rc0_check = bc_rc - codePhaseEnd[i];
double modCheck = fmod(bc_rc0_check, (double)CHM_LCA_d);
// Find the code frequency elapsed since last timestep
bc_rc0 = CONST_F_CA * (satPosTransmitTime - (bc_range / CONST_C));
// Advance the code phase
double temp2old = fmod((double)(codeFreq[i]*CHM_T_d + codePhaseEnd[i]), (double)CHM_LCA_d);
if (temp2old < 0.0) {
temp2old += (double)CHM_LCA_d;
}
// Progress the channel params by one sample set (not ephem dependent; can still do regardless)
cpElapsedStart[i] = cpElapsedEnd[i];
temp1 = floor(bc_rc/CHM_LCA_d);
if (temp1 > 30 || temp1 < 10) {
test = -1;
}
codePhaseStart[i] = codePhaseEnd[i];
temp2 = fmod(bc_rc, (double)CHM_LCA_d);
if (temp2 < 0.0) {
temp2 += (double)CHM_LCA_d;
}
cpElapsedEnd[i] += temp1;
codePhaseEnd[i] = temp2;
carrPhaseStart[i] = carrPhaseEnd[i];
double temp3 = fmod((double)(carrFreq[i]*CHM_T_d + carrPhaseEnd[i]), (double)1.0);
if (temp3 < 0.0) {
temp3 += 1.0;
}
carrPhaseEnd[i] = temp3;
// COMPUTE TXTIME
// Reset the PRN flag
txTime[i] = cpRefTOW[i] +
((cpElapsedEnd[i] - cpRef[i]) * CONST_T_CA) +
(codePhaseEnd[i] / CONST_F_CA);
// SATELLITE COMPUTATION
// Save the PRN number for this index
currPrn = PRNs[i]-1;
CHM_Get_Sat_Pos(&(satStates[i]), &(navData[ephToUse[i]].eph[currPrn]), txTime[i]);
i += stride;
}
return;
}
/** \brief Computes values needed by BCM grid evaluation -- ENU->ECEF rotation matrix and the satellite states
*
* \param rxTime The current time of the receiver's internal clock (no delta-t applied)
* \param txTime When each satellite sent the transmission that is now the sample set (computed by the channel parameters)
* \param centerPt The current best estimate on the receiver's state
* \param satStates The states of the tracked satellites according to the channel parameters
* \param numChan The number of channels being tracked
* \param timeGrid The specific offsets from centerPt being evaluated on the grid
* \param timeGridDim The number of time offsets being evaluated
* \param batchSatStates Output: The specific states of the satellites at all the offsets on the grid
* \param enu2ecefMat Output: The rotation matrix to convert the centerPt-ENU frame to ECEF
*
*/
__global__ void
CHM_GridPrep(double rxTime, double *txTime, double *centerPt, dsp::utils::state_t<double> *satStates,
int numChan, double *timeGrid, int timeGridDim,
dsp::utils::state_t<double> *batchSatStates, double *enu2ecefMat) {
unsigned int i = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int stride = blockDim.x * gridDim.x;
int currPt; // The point on the grid being examined
int currChan; // The channel this thread is processing
double satPosTransmitTime;
double cos_tau_OEDot;
double sin_tau_OEDot;
while(i < numChan * timeGridDim + 1) {
// Update index tracker
currPt = POSMOD(i, timeGridDim);
currChan = i / timeGridDim;
// If the last thread, compute the ENU->ECEF rotation matrix
// This looks like it goes ECEF->ENU, but ENU->ECEF is the transpose of ECEF->ENU,
// and ENU->ECEF gives the transformation from grid-space to satellite frame space,
// so that's the rotation we actually want.
if (i == numChan * timeGridDim) {
double posLL[2];
CHM_Dev_ECEF2LL_Rad(centerPt, posLL);
CHM_Dev_R_ENU2ECEF(posLL, enu2ecefMat);
}
// Otherwise, find the satellite state you're responsible for rotating
// We know the satellite's location from the channel parameters -- it's a function of the received signal.
// However, the state we compute from the channel parameters needs to consider the rotation of the Earth
// for EACH time being evaluated on the grid. So, the multiple satellite states being computed here are the same
// location in different ECEF frame-times.
else {
// For this satellite position, compute the transmit time (TOF) to the candidate point
satPosTransmitTime = rxTime - (txTime[currChan] + ((timeGrid[currPt] + centerPt[3])/(double)CONST_C)) + satStates[currChan].delta_t; // More conceptually accurate
// Convert the satellite and coordinate positions to ECI to add
cos_tau_OEDot = cos(-CONST_OEDot*satPosTransmitTime);
sin_tau_OEDot = sin(-CONST_OEDot*satPosTransmitTime);
// batchSatStates contains the states for all grid points for the first channel, then for the second channel, ...
// Rotate the satellite position over the earth for this point's pvt state
batchSatStates[i].x = cos_tau_OEDot * satStates[currChan].x
- sin_tau_OEDot * satStates[currChan].y;
batchSatStates[i].y = sin_tau_OEDot * satStates[currChan].x
+ cos_tau_OEDot * satStates[currChan].y;
batchSatStates[i].z = satStates[currChan].z;
batchSatStates[i].delta_t = satStates[currChan].delta_t;
batchSatStates[i].x_dot = cos_tau_OEDot * satStates[currChan].x_dot
- sin_tau_OEDot * satStates[currChan].y_dot
- CONST_OEDot * sin_tau_OEDot * satStates[currChan].x
- CONST_OEDot * cos_tau_OEDot * satStates[currChan].y;
batchSatStates[i].y_dot = sin_tau_OEDot * satStates[currChan].x_dot
+ cos_tau_OEDot * satStates[currChan].y_dot
+ CONST_OEDot * cos_tau_OEDot * satStates[currChan].x
- CONST_OEDot * sin_tau_OEDot * satStates[currChan].y;
batchSatStates[i].z_dot = satStates[currChan].z_dot;
batchSatStates[i].delta_t_dot = satStates[currChan].delta_t_dot;
}
i += stride;
}
}
dsp::cuChanMgr::cuChanMgr(){
ModuleName = "cuChanMgr";
AllocateInputs(15);
AllocateOutputs(18);
Started = 0;
/**
* INPUT CONFIGURATION
*/
// Configure inputs
ConfigExpectedInput(0, "InitEph", UNDEFINED_t, EPHEMS, VECTORLENGTH_ANY);
ConfigExpectedInput(1, "InitPRN", CHAR_t, VALUE, VECTORLENGTH_ANY);
ConfigExpectedInput(2, "InitCodePhase", DOUBLE_t, VALUE, VECTORLENGTH_ANY);
ConfigExpectedInput(3, "InitCarrierPhase", DOUBLE_t, VALUE, VECTORLENGTH_ANY);
ConfigExpectedInput(4, "InitCodeFrequency", DOUBLE_t, FREQUENCY_HZ, VECTORLENGTH_ANY);
ConfigExpectedInput(5, "InitCarrierFrequency", DOUBLE_t, FREQUENCY_HZ, VECTORLENGTH_ANY);
ConfigExpectedInput(6, "InitElapsedCodePeriods", INT_t, VALUE, VECTORLENGTH_ANY);
ConfigExpectedInput(7, "InitReferenceCodePeriods", INT_t, VALUE, VECTORLENGTH_ANY);
ConfigExpectedInput(8, "InitCPRefTOW", INT_t, VALUE, VECTORLENGTH_ANY);
ConfigExpectedInput(9, "InitRXTime", DOUBLE_t, VALUE, VECTORLENGTH_ANY);
ConfigExpectedInput(10, "xCurrk1k1", DOUBLE_t, STATE, VECTORLENGTH_ANY);
ConfigExpectedInput(11, "SampleLength", DOUBLE_t, VALUE, 1);
ConfigExpectedInput(12, "xCurrkk1", DOUBLE_t, STATE, VECTORLENGTH_ANY);
ConfigExpectedInput(13, "TimeGrid", DOUBLE_t, VALUE, VECTORLENGTH_ANY);
InsertParam("DopplerSign", (void*)&dopplerSign, INT_t, sizeof(int), sizeof(int));
std::clog << "[" << ModuleName << "] Configured inputs" << std::endl;
// Note: Phase and cpEla params are saved wrt both the start AND end of the sample set!
// BatchCorrScores wants references wrt the start so indices can be counted forwards
// State estimation is done wrt the end, since we have that full set of samples
// Frequency measurements are wrt the end, though this really just means they include the most recent measurement
// This is consistent with PyGNSS:
// rxTime+=T -> TimeUpdateState -> BatchCorr -> TimeUpdateChannels -> ManifoldEstimation -> MeasUpdateState -> MeasUpdateChannels
// PyGNSS does it that way so BatchCorr uses start-referenced params and ManifoldEstimation uses end-referenced params.
// Though, doing the param updating all at once and denoting which is start-referenced and end-referenced is clearer.
ConfigOutput(0, "rxTime", DOUBLE_t, VALUE, HOST, 1, NULL, 0);
ConfigOutput(1, "txTime", DOUBLE_t, VALUE, CUDA_DEVICE, VECTORLENGTH_ANY, NULL, 0);
ConfigOutput(2, "CodePhaseStart", DOUBLE_t, VALUE, CUDA_DEVICE, VECTORLENGTH_ANY, NULL, 0);
ConfigOutput(3, "CarrierPhaseStart", DOUBLE_t, VALUE, CUDA_DEVICE, VECTORLENGTH_ANY, NULL, 0);
ConfigOutput(4, "CodePhaseEnd", DOUBLE_t, VALUE, CUDA_DEVICE, VECTORLENGTH_ANY, NULL, 0);
ConfigOutput(5, "CarrierPhaseEnd", DOUBLE_t, VALUE, CUDA_DEVICE, VECTORLENGTH_ANY, NULL, 0);
ConfigOutput(6, "CodeFrequency", DOUBLE_t, FREQUENCY_HZ, CUDA_DEVICE, VECTORLENGTH_ANY, NULL, 0);
ConfigOutput(7, "CarrierFrequency", DOUBLE_t, FREQUENCY_HZ, CUDA_DEVICE, VECTORLENGTH_ANY, NULL, 0);
ConfigOutput(8, "SatStates", DOUBLE_t, STATE, CUDA_DEVICE, VECTORLENGTH_ANY, NULL, 0);
ConfigOutput(9, "DopplerSign", INT_t, VALUE, CUDA_DEVICE, 1, NULL, 0);
ConfigOutput(10, "ValidPRNs", CHAR_t, VALUE, CUDA_DEVICE, VECTORLENGTH_ANY, NULL, 0);
ConfigOutput(11, "cpReference", INT_t, VALUE, CUDA_DEVICE, VECTORLENGTH_ANY, NULL, 0);
ConfigOutput(12, "cpElapsedStart", INT_t, VALUE, CUDA_DEVICE, VECTORLENGTH_ANY, NULL, 0);
ConfigOutput(13, "cpElapsedEnd", INT_t, VALUE, CUDA_DEVICE, VECTORLENGTH_ANY, NULL, 0);
ConfigOutput(14, "ENU2ECEFMat", DOUBLE_t, VALUE, CUDA_DEVICE, 9, NULL, 0);
ConfigOutput(15, "SatStatesOld", DOUBLE_t, STATE, CUDA_DEVICE, VECTORLENGTH_ANY, NULL, 0);
ConfigOutput(16, "cpRef", INT_t, VALUE, CUDA_DEVICE, VECTORLENGTH_ANY, NULL, 0);
ConfigOutput(17, "cpRefTOW", INT_t, VALUE, CUDA_DEVICE, VECTORLENGTH_ANY, NULL, 0);
}
dsp::cuChanMgr::~cuChanMgr(){
if (Started) Stop();
delete [] ephSetArr;
delete [] inputs;
delete [] outputs;
delete [] expectedInputs;
}
int
dsp::cuChanMgr::Start(void* cuFlowStream) {
// Check module status and report accordingly
if (Started) {
std::clog << "[" << ModuleName << "] Start: Already Started." << std::endl;
return 0;
}
std::clog << "[" << ModuleName << "] Starting ... " << std::flush;
// Set the CUDA Stream for the GPU operations
cuStream = (cudaStream_t*)cuFlowStream;
// Get inputs
// Determine how many channels are being tracked
numChan = inputs[1]->VectorLength;
// Get the pointer to the state (on device)
xk1k1_d = (double*)inputs[10]->Data;
xkk1_d = (double*)inputs[12]->Data;
// Get manifold info
timeGrid_d = (double*)inputs[13]->Data;
timeGridDim = inputs[13]->VectorLength;
// Get the starting time
rxTime = *((double*)(inputs[9]->Data));
// Length of one sample set (should be 1ms)
T = *((double*)(inputs[11]->Data));
// Round to the nearest 1us to ensure there is no trailing garbage precision
T = round(T*1.0e6)/1.0e6;
// Copy processing parameters to device constant memory
cuCheckMSt(cudaMemcpyToSymbol(CHM_LCA_d, &numCA, sizeof(int), 0, cudaMemcpyHostToDevice));
cuCheckMSt(cudaMemcpyToSymbol(CHM_T_d, &T, sizeof(double), 0, cudaMemcpyHostToDevice));
// Get the input parameters copied into local device memory
int size = sizeof(uint8_t)*CONST_PRN_MAX;
cuCheckMSt(cudaMalloc((void**)&PRNs_d, size));
cuCheckMSt(cudaMemcpyAsync(PRNs_d, (uint8_t*)inputs[1]->Data, size, cudaMemcpyHostToDevice, *cuStream));
// Channel parameters and friends
size = sizeof(double)*CONST_PRN_MAX;
cuCheckMSt(cudaMalloc((void**)&codePhaseStart_d, size));
cuCheckMSt(cudaMalloc((void**)&codePhaseEnd_d, size));
cuCheckMSt(cudaMemcpyAsync(codePhaseEnd_d, (double*)inputs[2]->Data, size, cudaMemcpyHostToDevice, *cuStream));
cuCheckMSt(cudaMalloc((void**)&carrierPhaseStart_d, size));
cuCheckMSt(cudaMalloc((void**)&carrierPhaseEnd_d, size));
cuCheckMSt(cudaMemcpyAsync(carrierPhaseEnd_d, (double*)inputs[3]->Data, size, cudaMemcpyHostToDevice, *cuStream));
cuCheckMSt(cudaMalloc((void**)&codeFrequency_d, size));
cuCheckMSt(cudaMemcpyAsync(codeFrequency_d, (double*)inputs[4]->Data, size, cudaMemcpyHostToDevice, *cuStream));
cuCheckMSt(cudaMalloc((void**)&carrierFrequency_d, size));
cuCheckMSt(cudaMemcpyAsync(carrierFrequency_d, (double*)inputs[5]->Data, size, cudaMemcpyHostToDevice, *cuStream));
cuCheckMSt(cudaMalloc((void**)&txTime_d, size));
size = sizeof(int)*CONST_PRN_MAX;
cuCheckMSt(cudaMalloc((void**)&cpElapsedStart_d, size));
cuCheckMSt(cudaMalloc((void**)&cpElapsedEnd_d, size));
cuCheckMSt(cudaMemcpyAsync(cpElapsedEnd_d, (int*)inputs[6]->Data, size, cudaMemcpyHostToDevice, *cuStream));
cuCheckMSt(cudaMalloc((void**)&cpReference_d, size));
cuCheckMSt(cudaMemcpyAsync(cpReference_d, (int*)inputs[7]->Data, size, cudaMemcpyHostToDevice, *cuStream));
cuCheckMSt(cudaMalloc((void**)&TOWcpReference_d, size));
cuCheckMSt(cudaMemcpyAsync(TOWcpReference_d, (int*)inputs[8]->Data, size, cudaMemcpyHostToDevice, *cuStream));
cuCheckMSt(cudaMalloc((void**)&dopplerSign_d, size));
cuCheckMSt(cudaMemcpyAsync(dopplerSign_d, &dopplerSign, sizeof(int), cudaMemcpyHostToDevice, *cuStream));
cuCheckMSt(cudaMalloc((void**)&ephToUse_d, size));
// PVT state of each satellite as calculated by the channel params
size = sizeof(dsp::utils::state_t<double>)*CONST_PRN_MAX;
cuCheckMSt(cudaMalloc((void**)&satStates_d, size));
// Satellite state things
cuCheckMSt(cudaMalloc((void**)&batchSatStates_d, sizeof(dsp::utils::state_t<double>)*CONST_PRN_MAX*timeGridDim));
cuCheckMSt(cudaMalloc((void**)&enu2ecefMat_d, sizeof(double)*9));
// Load ephemeris
ephSetVec = *((std::vector<dsp::utils::ephSet_t>*)(inputs[0]->Data));
ephSetSize = ephSetVec.size();
// Convert eph's from vector to array so cudaMemcpy can easily copy to device
ephSetArr = new dsp::utils::ephSet_t[ephSetSize];
for (int i = 0; i < ephSetSize; i++) { ephSetArr[i].copyInto(ephSetVec[i]); }
// Allocate space for every ephSet in navData
size = sizeof(dsp::utils::ephSet_t)*ephSetSize;
cuCheckMSt(cudaMalloc((void**)&ephSetPtr_d, size));
cuCheckMSt(cudaMemcpyAsync(ephSetPtr_d, ephSetArr, size, cudaMemcpyHostToDevice));
// Compute satellite states for the first computation
// (launching kernels less than 32 threads can be more inefficient, even if only <10 channels are being tracked)
CHM_ComputeSatStates<<<1, auxil::roundUpToNextPowerOfTwo(CONST_PRN_MAX), 0, *cuStream>>>
(ephSetPtr_d, ephSetSize, ephToUse_d,
satStates_d,
codePhaseEnd_d,
cpElapsedEnd_d, cpReference_d, TOWcpReference_d,
PRNs_d, numChan,
txTime_d);
// Propagate the channels for the next iteration
// (launching kernels less than 32 threads can be more inefficient, even if only <10 channels are being tracked)
CHM_TimeUpdateChannels<<<1, auxil::roundUpToNextPowerOfTwo(CONST_PRN_MAX), 0, *cuStream>>>
(ephSetPtr_d, ephSetSize, ephToUse_d,
xk1k1_d, satStates_d,
codePhaseStart_d, codePhaseEnd_d, codeFrequency_d,
carrierPhaseStart_d, carrierPhaseEnd_d, carrierFrequency_d, dopplerSign_d,
cpElapsedStart_d, cpElapsedEnd_d, cpReference_d, TOWcpReference_d,
PRNs_d, numChan,
rxTime, txTime_d, T);
// Advance the rxTime to the end of the next sample set
// (needs to happen after the propagate because MeasUpdate uses the rxTime of this iteration)
rxTime += T;
// Compute the rotated satellite positions
prepBlockCount = floor((numChan*timeGridDim+1)/64)+1;
// Shouldn't need to update sat states here since CHM_TimeUpdateChannels does it
// Compute the satellite state from the ephemerides using the channel parameters
// Rotate the satellites over the earth for every candidate grid point
CHM_GridPrep<<<prepBlockCount, 64, 0, *cuStream>>> (rxTime, txTime_d, xkk1_d, satStates_d,
numChan, timeGrid_d, timeGridDim,
batchSatStates_d, enu2ecefMat_d);
// Now that space is allocated, assign the position buffer and size
outputs[0].Data = (void*)&rxTime;
outputs[0].VectorLength = 1;
outputs[1].Data = txTime_d;
outputs[1].VectorLength = numChan;
outputs[2].Data = codePhaseStart_d;
outputs[2].VectorLength = numChan;
outputs[3].Data = carrierPhaseStart_d;
outputs[3].VectorLength = numChan;
outputs[4].Data = codePhaseEnd_d;
outputs[4].VectorLength = numChan;
outputs[5].Data = carrierPhaseEnd_d;
outputs[5].VectorLength = numChan;
outputs[6].Data = codeFrequency_d;
outputs[6].VectorLength = numChan;
outputs[7].Data = carrierFrequency_d;
outputs[7].VectorLength = numChan;
outputs[8].Data = batchSatStates_d;
outputs[8].VectorLength = numChan*timeGridDim;
outputs[9].Data = dopplerSign_d;
outputs[9].VectorLength = numChan;
outputs[10].Data = PRNs_d;
outputs[10].VectorLength = numChan;
outputs[11].Data = cpReference_d;
outputs[11].VectorLength = numChan;
outputs[12].Data = cpElapsedStart_d;
outputs[12].VectorLength = numChan;
outputs[13].Data = cpElapsedEnd_d;
outputs[13].VectorLength = numChan;
outputs[14].Data = enu2ecefMat_d;
outputs[14].VectorLength = 9;
outputs[15].Data = satStates_d;
outputs[15].VectorLength = numChan;
outputs[16].Data = cpReference_d;
outputs[16].VectorLength = numChan;
outputs[17].Data = TOWcpReference_d;
outputs[17].VectorLength = numChan;
// Make sure all GPU tasks have completed before continuing
cuCheckMSt(cudaStreamSynchronize(*cuStream));
cuCheckMSt(cudaDeviceSynchronize());
// Signifies that the next call to update() will be the first after start()
Started = 1;
std::clog << "Started." << std::endl;
return 0;
}
int
dsp::cuChanMgr::Stop() {
int ret = 0;
if (Started == 0) {
std::clog << "[" << ModuleName << "] Stop: Wasn't Started." << std::endl;
return 0;
}
std::clog << "[" << ModuleName << "] Stopping ... " << std::flush;
// Free device memory
cuCheckMSp(cudaFree((void*)codePhaseStart_d));
cuCheckMSp(cudaFree((void*)codePhaseEnd_d));
cuCheckMSp(cudaFree((void*)carrierPhaseStart_d));
cuCheckMSp(cudaFree((void*)carrierPhaseEnd_d));
cuCheckMSp(cudaFree((void*)codeFrequency_d));
cuCheckMSp(cudaFree((void*)carrierFrequency_d));
cuCheckMSp(cudaFree((void*)cpElapsedStart_d));
cuCheckMSp(cudaFree((void*)cpElapsedEnd_d));
cuCheckMSp(cudaFree((void*)cpReference_d));
cuCheckMSp(cudaFree((void*)TOWcpReference_d));
cuCheckMSp(cudaFree((void*)dopplerSign_d));
cuCheckMSp(cudaFree((void*)ephSetPtr_d));
cuCheckMSp(cudaFree((void*)satStates_d));
cuCheckMSp(cudaFree((void*)batchSatStates_d));
cuCheckMSp(cudaFree((void*)enu2ecefMat_d));
Started = 0;
std::clog << "Stopped." << std::endl;
return ret;
}
int
dsp::cuChanMgr::Update(void* cuFlowStream) {
if (Started == 0){
std::cerr << "[" << ModuleName
<< "] Error: Update() Failed due to SatPos not initialized"
<< std::endl;
return -1;
}
// Propagate the channels for the next iteration
// (launching kernels less than 32 threads can be more inefficient, even if only <10 channels are being tracked)
CHM_PropagateChannels<<<1, auxil::roundUpToNextPowerOfTwo(CONST_PRN_MAX), 0, *cuStream>>>
(ephSetPtr_d, ephSetSize, ephToUse_d,
xk1k1_d, satStates_d,
codePhaseStart_d, codePhaseEnd_d, codeFrequency_d,
carrierPhaseStart_d, carrierPhaseEnd_d, carrierFrequency_d, dopplerSign_d,
cpElapsedStart_d, cpElapsedEnd_d, cpReference_d, TOWcpReference_d,
PRNs_d, numChan,
rxTime, txTime_d, T);
// Advance the rxTime to the end of the next sample set
// (needs to happen after the propagate because MeasUpdate uses the rxTime of this iteration)
rxTime += T;
// Compute the rotated satellite positions
prepBlockCount = floor((numChan*timeGridDim+1)/64)+1;
// Shouldn't need to run CHM_ComputeSatStates since CHM_PropagateChannels does this at the end
// Determine the satellite positions from the ephemerides using the channel parameters
// Rotate the satellite over the earth for each candidate grid point
CHM_GridPrep<<<prepBlockCount, 64, 0, *cuStream>>> (rxTime, txTime_d, xkk1_d, satStates_d,
numChan, timeGrid_d, timeGridDim,
batchSatStates_d, enu2ecefMat_d);
// Block on host until channel parameters have updated
cuCheckMSt(cudaStreamSynchronize(*cuStream));
return 0;
}
|
9a050c9acd7f1d61f850f197a9b2d45744dd351c.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*! \file Sort.cu
\author Gregory Diamos <gregory.diamos>
\date Wednesday December 1, 2010
\brief The source file for the C interface to CUDA sorting routines.
*/
// Redfox Includes
#include <redfox/nvcc/interface/RelationalAlgebraKernel.h>
#include <redfox/ra/interface/ModernGPUJoin.h>
#include <redfox/ra/interface/Tuple.h>
#include <redfox/ra/interface/moderngpu/include/kernels/join.cuh>
#include <stdio.h>
#include <iostream>
#include <string.h>
class gpu128
{
public:
typedef long long unsigned int type;
public:
type a[2];
};
struct compare_sort_gpu128
{
typedef gpu128 type;
__host__ __device__
bool operator()(type i, type j)
{
if (i.a[1] != j.a[1])
return (i.a[1] < j.a[1]);
return (i.a[0] < j.a[0]);
}
};
namespace redfox
{
struct compare_string
{
__host__ __device__
bool operator()(unsigned char * i, unsigned char *j)
{
const char *string1 = (char *) i;
const char *string2 = (char *) j;
// return(strcmp(string1, string2) < 0);
int ii = 0;
while(string1[ii] != '\0' && string2[ii] != '\0')
{
if(string1[ii] < string2[ii])
return true;
else if(string1[ii] > string2[ii])
return false;
ii++;
}
if(string1[ii] == '\0' && string2[ii] != '\0')
return true;
else
return false;
}
};
struct compare_string2
{
__host__ __device__
bool operator()(unsigned long long int i, unsigned long long int j)
{
const char *string1 = (char *) i;
const char *string2 = (char *) j;
// return(strcmp(string1, string2) < 0);
int ii = 0;
while(string1[ii] != '\0' && string2[ii] != '\0')
{
if(string1[ii] < string2[ii])
return true;
else if(string1[ii] > string2[ii])
return false;
ii++;
}
if(string1[ii] == '\0' && string2[ii] != '\0')
return true;
else
return false;
}
};
void check(hipError_t status)
{
if(status != hipSuccess)
{
std::cerr << hipGetErrorString(status) << "\n";
std::abort();
}
}
void find_bounds_128(void* lower_bound, void* left_count, unsigned long long int *result_size,
void* left_key, unsigned long long int left_size,
void* right_key, unsigned long long int right_size)
{
// unsigned short left[100];
// unsigned short right[100];
// unsigned long long int *left_host = (unsigned long long int*)malloc(16);
// unsigned long long int *right_host = (unsigned long long int*)malloc(6001215 * 8);
// check(hipMemcpy(left_host, (unsigned long long int *)left_key, 16,
// hipMemcpyDeviceToHost));
//
// check(hipMemcpy(right_host, (unsigned long long int *)right_key, 6001215 * 8,
// hipMemcpyDeviceToHost));
//
// for(unsigned int i = 6001214; i > 6001204; --i)
// printf("%u %x %x\n", i, left[i], right[i]);
// int *left_host = (int *)malloc(30142*sizeof(int));
// check(hipMemcpy(left_host, (int *)left_key, 30142*sizeof(int),
// hipMemcpyDeviceToHost));
// int *right_host = (int *)malloc(1500000*sizeof(int));
// check(hipMemcpy(right_host, (int *)right_key, 1500000*sizeof(int),
// hipMemcpyDeviceToHost));
// for(unsigned int i = 0; i < 2; ++i)
// printf("left %u %llx\n", i, left_host[i]);
// for(unsigned int i = 0; i < 100; ++i)
// printf("right %u %llx\n", i, right_host[i]);
// for(unsigned int i = 6001214; i > 6001204; --i)
// printf("right %u %llx\n", i, right_host[i]);
hipEvent_t start, stop;
hipEventCreate(&start); hipEventCreate(&stop);
float exe_time = 0.0f;
hipEventRecord(start,0);
// size_t freeMem, totalMem;
// hipMemGetInfo(&freeMem, &totalMem);
// std::cout << freeMem << "\n";
//
// std::cout << left_size << " " << right_size << "\n";
// mgpu::ContextPtr context = mgpu::CreateCudaDevice(0);
// context->Start();
typedef gpu128 type;
const mgpu::MgpuSearchType LeftType = mgpu::MgpuSearchTypeIndex;
const mgpu::MgpuBounds Bounds = mgpu::MgpuBoundsLower;
const mgpu::MgpuSearchType RightType = mgpu::MgpuSearchTypeNone;
mgpu::SortedSearch<Bounds, LeftType, RightType, type*, type*, int *, int *>((type*)left_key,
(int)left_size, (type*)right_key, (int)right_size, (int *)lower_bound, (int*)0, compare_sort_gpu128(),
/**context,*/ (int *)0, (int *)0);
//int *lower_bound_host = (int *)malloc(2*sizeof(int));
//check(hipMemcpy(lower_bound_host, (int *)lower_bound, 2*sizeof(int),
// hipMemcpyDeviceToHost));
//std::cout << "lower bound 0: " << lower_bound_host[0] << " lower bound 1: " << lower_bound_host[1] << "\n";
mgpu::SortedEqualityCount<type*, type *, int *, int *, struct compare_sort_gpu128, struct mgpu::SortedEqualityOp>
((type *)left_key, (int)left_size,
(type *)right_key, (int)right_size, (int *)lower_bound, (int *)left_count,
compare_sort_gpu128(), mgpu::SortedEqualityOp()/*,
*context*/);
//int *left_count_host = (int *)malloc(2*sizeof(int));
//check(hipMemcpy(left_count_host, (int *)left_count, 2*sizeof(int),
// hipMemcpyDeviceToHost));
//std::cout << "left count 0: " << left_count_host[0] << " left count 1: " << left_count_host[1] << "\n";
// Scan the product counts. This is part of the load-balancing search.
unsigned long long int total = mgpu::Scan((int *)left_count, (int)left_size/*, *context*/);
//printf("total %llu\n", total);
check(hipMemcpy(result_size, &total, sizeof(unsigned long long int),
hipMemcpyHostToDevice));
// exe_time += context->Split();
// hipMemGetInfo(&freeMem, &totalMem);
// std::cout << freeMem << "\n";
hipEventRecord(stop,0);
hipEventSynchronize(stop);
hipEventElapsedTime(&exe_time, start, stop);
hipEventDestroy(start);
hipEventDestroy(stop);
printf("join_find_bounds %lf\n", exe_time);
printf("**********************join size %llu\n", total);
// check(hipMemcpy(data_key, (unsigned char *)key_begin, 10,
// hipMemcpyDeviceToHost));
//
// check(hipMemcpy(data_value, (double *)value_begin, 80,
// hipMemcpyDeviceToHost));
//
// for(unsigned int i = 0; i < 10; ++i)
// printf("%u %x %lf\n", i, data_key[i], data_value[i]);
}
void find_bounds_string(void* lower_bound, void* left_count, unsigned long long int *result_size,
void* left_key, unsigned long long int left_size,
void* right_key, unsigned long long int right_size)
{
// unsigned short left[100];
// unsigned short right[100];
// unsigned long long int *left_host = (unsigned long long int*)malloc(16);
// unsigned long long int *right_host = (unsigned long long int*)malloc(6001215 * 8);
// check(hipMemcpy(left_host, (unsigned long long int *)left_key, 16,
// hipMemcpyDeviceToHost));
//
// check(hipMemcpy(right_host, (unsigned long long int *)right_key, 6001215 * 8,
// hipMemcpyDeviceToHost));
//
// for(unsigned int i = 6001214; i > 6001204; --i)
// printf("%u %x %x\n", i, left[i], right[i]);
// int *left_host = (int *)malloc(30142*sizeof(int));
// check(hipMemcpy(left_host, (int *)left_key, 30142*sizeof(int),
// hipMemcpyDeviceToHost));
// int *right_host = (int *)malloc(1500000*sizeof(int));
// check(hipMemcpy(right_host, (int *)right_key, 1500000*sizeof(int),
// hipMemcpyDeviceToHost));
// for(unsigned int i = 0; i < 2; ++i)
// printf("left %u %llx\n", i, left_host[i]);
// for(unsigned int i = 0; i < 100; ++i)
// printf("right %u %llx\n", i, right_host[i]);
// for(unsigned int i = 6001214; i > 6001204; --i)
// printf("right %u %llx\n", i, right_host[i]);
hipEvent_t start, stop;
hipEventCreate(&start); hipEventCreate(&stop);
float exe_time = 0.0f;
hipEventRecord(start,0);
// size_t freeMem, totalMem;
// hipMemGetInfo(&freeMem, &totalMem);
// std::cout << freeMem << "\n";
//
// std::cout << left_size << " " << right_size << "\n";
// mgpu::ContextPtr context = mgpu::CreateCudaDevice(0);
// context->Start();
const mgpu::MgpuSearchType LeftType = mgpu::MgpuSearchTypeIndex;
const mgpu::MgpuBounds Bounds = mgpu::MgpuBoundsLower;
const mgpu::MgpuSearchType RightType = mgpu::MgpuSearchTypeNone;
mgpu::SortedSearch<Bounds, LeftType, RightType, unsigned long long int*, unsigned long long int*, int *, int *>((unsigned long long int*)left_key,
(int)left_size, (unsigned long long int*)right_key, (int)right_size, (int *)lower_bound, (int*)0, compare_string2(),/*mgpu::less<T>(),
*context,*/ (int *)0, (int *)0);
//int *lower_bound_host = (int *)malloc(2*sizeof(int));
//check(hipMemcpy(lower_bound_host, (int *)lower_bound, 2*sizeof(int),
// hipMemcpyDeviceToHost));
//std::cout << "lower bound 0: " << lower_bound_host[0] << " lower bound 1: " << lower_bound_host[1] << "\n";
mgpu::SortedEqualityCount<unsigned long long int*, unsigned long long int *, int *, int *, struct compare_string2, struct mgpu::SortedEqualityOp>
((unsigned long long int *)left_key, (int)left_size,
(unsigned long long int *)right_key, (int)right_size, (int *)lower_bound, (int *)left_count,
compare_string2(), mgpu::SortedEqualityOp()/*,
*context*/);
//int *left_count_host = (int *)malloc(2*sizeof(int));
//check(hipMemcpy(left_count_host, (int *)left_count, 2*sizeof(int),
// hipMemcpyDeviceToHost));
//std::cout << "left count 0: " << left_count_host[0] << " left count 1: " << left_count_host[1] << "\n";
// Scan the product counts. This is part of the load-balancing search.
unsigned long long int total = mgpu::Scan((int *)left_count, (int)left_size/*, *context*/);
//printf("total %llu\n", total);
check(hipMemcpy(result_size, &total, sizeof(unsigned long long int),
hipMemcpyHostToDevice));
// exe_time += context->Split();
// hipMemGetInfo(&freeMem, &totalMem);
// std::cout << freeMem << "\n";
hipEventRecord(stop,0);
hipEventSynchronize(stop);
hipEventElapsedTime(&exe_time, start, stop);
hipEventDestroy(start);
hipEventDestroy(stop);
printf("join_find_bounds %lf\n", exe_time);
printf("**********************join size %llu\n", total);
// check(hipMemcpy(data_key, (unsigned char *)key_begin, 10,
// hipMemcpyDeviceToHost));
//
// check(hipMemcpy(data_value, (double *)value_begin, 80,
// hipMemcpyDeviceToHost));
//
// for(unsigned int i = 0; i < 10; ++i)
// printf("%u %x %lf\n", i, data_key[i], data_value[i]);
}
void find_bounds_64(void* lower_bound, void* left_count, unsigned long long int *result_size,
void* left_key, unsigned long long int left_size,
void* right_key, unsigned long long int right_size)
{
// unsigned short left[100];
// unsigned short right[100];
// unsigned long long int *left_host = (unsigned long long int*)malloc(16);
// unsigned long long int *right_host = (unsigned long long int*)malloc(6001215 * 8);
// check(hipMemcpy(left_host, (unsigned long long int *)left_key, 16,
// hipMemcpyDeviceToHost));
//
// check(hipMemcpy(right_host, (unsigned long long int *)right_key, 6001215 * 8,
// hipMemcpyDeviceToHost));
//
// for(unsigned int i = 6001214; i > 6001204; --i)
// printf("%u %x %x\n", i, left[i], right[i]);
// int *left_host = (int *)malloc(30142*sizeof(int));
// check(hipMemcpy(left_host, (int *)left_key, 30142*sizeof(int),
// hipMemcpyDeviceToHost));
// int *right_host = (int *)malloc(1500000*sizeof(int));
// check(hipMemcpy(right_host, (int *)right_key, 1500000*sizeof(int),
// hipMemcpyDeviceToHost));
// for(unsigned int i = 0; i < 2; ++i)
// printf("left %u %llx\n", i, left_host[i]);
// for(unsigned int i = 0; i < 100; ++i)
// printf("right %u %llx\n", i, right_host[i]);
// for(unsigned int i = 6001214; i > 6001204; --i)
// printf("right %u %llx\n", i, right_host[i]);
hipEvent_t start, stop;
hipEventCreate(&start); hipEventCreate(&stop);
float exe_time = 0.0f;
hipEventRecord(start,0);
// size_t freeMem, totalMem;
// hipMemGetInfo(&freeMem, &totalMem);
// std::cout << freeMem << "\n";
//
// std::cout << left_size << " " << right_size << "\n";
// mgpu::ContextPtr context = mgpu::CreateCudaDevice(0);
// context->Start();
const mgpu::MgpuSearchType LeftType = mgpu::MgpuSearchTypeIndex;
const mgpu::MgpuBounds Bounds = mgpu::MgpuBoundsLower;
const mgpu::MgpuSearchType RightType = mgpu::MgpuSearchTypeNone;
mgpu::SortedSearch<Bounds, LeftType, RightType, unsigned long long int*, unsigned long long int*, int *, int *>((unsigned long long int*)left_key,
(int)left_size, (unsigned long long int*)right_key, (int)right_size, (int *)lower_bound, (int*)0, /*mgpu::less<T>(),
*context,*/ (int *)0, (int *)0);
//int *lower_bound_host = (int *)malloc(2*sizeof(int));
//check(hipMemcpy(lower_bound_host, (int *)lower_bound, 2*sizeof(int),
// hipMemcpyDeviceToHost));
//std::cout << "lower bound 0: " << lower_bound_host[0] << " lower bound 1: " << lower_bound_host[1] << "\n";
mgpu::SortedEqualityCount<unsigned long long int*, unsigned long long int *, int *, int *, struct mgpu::SortedEqualityOp>
((unsigned long long int *)left_key, (int)left_size,
(unsigned long long int *)right_key, (int)right_size, (int *)lower_bound, (int *)left_count,
mgpu::SortedEqualityOp()/*,
*context*/);
//int *left_count_host = (int *)malloc(2*sizeof(int));
//check(hipMemcpy(left_count_host, (int *)left_count, 2*sizeof(int),
// hipMemcpyDeviceToHost));
//std::cout << "left count 0: " << left_count_host[0] << " left count 1: " << left_count_host[1] << "\n";
// Scan the product counts. This is part of the load-balancing search.
unsigned long long int total = mgpu::Scan((int *)left_count, (int)left_size/*, *context*/);
//printf("total %llu\n", total);
check(hipMemcpy(result_size, &total, sizeof(unsigned long long int),
hipMemcpyHostToDevice));
// exe_time += context->Split();
// hipMemGetInfo(&freeMem, &totalMem);
// std::cout << freeMem << "\n";
hipEventRecord(stop,0);
hipEventSynchronize(stop);
hipEventElapsedTime(&exe_time, start, stop);
hipEventDestroy(start);
hipEventDestroy(stop);
printf("join_find_bounds %lf\n", exe_time);
printf("**********************join size %llu\n", total);
// check(hipMemcpy(data_key, (unsigned char *)key_begin, 10,
// hipMemcpyDeviceToHost));
//
// check(hipMemcpy(data_value, (double *)value_begin, 80,
// hipMemcpyDeviceToHost));
//
// for(unsigned int i = 0; i < 10; ++i)
// printf("%u %x %lf\n", i, data_key[i], data_value[i]);
}
void find_bounds_16(void* lower_bound, void* left_count, unsigned long long int *result_size,
void* left_key, unsigned long long int left_size,
void* right_key, unsigned long long int right_size)
{
// unsigned short left[100];
// unsigned short right[100];
// unsigned short *left_host = (unsigned short*)malloc(6001215*2);
// unsigned short *right = (unsigned short*)malloc(100*2);
// check(hipMemcpy(left_host, (unsigned short *)left_key, 6001215*2,
// hipMemcpyDeviceToHost));
// check(hipMemcpy(right, (unsigned short *)right_key, 200,
// hipMemcpyDeviceToHost));
//
// for(unsigned int i = 6001214; i > 6001204; --i)
// printf("%u %x %x\n", i, left[i], right[i]);
// int *left_host = (int *)malloc(30142*sizeof(int));
// check(hipMemcpy(left_host, (int *)left_key, 30142*sizeof(int),
// hipMemcpyDeviceToHost));
// int *right_host = (int *)malloc(1500000*sizeof(int));
// check(hipMemcpy(right_host, (int *)right_key, 1500000*sizeof(int),
// hipMemcpyDeviceToHost));
// for(unsigned int i = 0; i < 6001215; ++i)
// if(left_host[i] < left_host[i - 1]) printf("error %d %d %d\n", i, left_host[i], left_host[i - 1]);
// printf("left %u %u\n", i, left_host[i]);
// for(unsigned int i = 0; i < 100; ++i)
// if(right_host[i] < right_host[i - 1]) printf("error %d %d %d\n", i, right_host[i], right_host[i - 1]);
// printf("right %u %u\n", i, right[i]);
hipEvent_t start, stop;
hipEventCreate(&start); hipEventCreate(&stop);
float exe_time = 0.0f;
hipEventRecord(start,0);
// size_t freeMem, totalMem;
// hipMemGetInfo(&freeMem, &totalMem);
// std::cout << freeMem << "\n";
//
// std::cout << left_size << " " << right_size << "\n";
// mgpu::ContextPtr context = mgpu::CreateCudaDevice(0);
// context->Start();
typedef unsigned short* T;
const mgpu::MgpuSearchType LeftType = mgpu::MgpuSearchTypeIndex;
const mgpu::MgpuBounds Bounds = mgpu::MgpuBoundsLower;
const mgpu::MgpuSearchType RightType = mgpu::MgpuSearchTypeNone;
mgpu::SortedSearch<Bounds, LeftType, RightType, unsigned short *, unsigned short *, int *, int *>((unsigned short*)left_key,
(int)left_size, (unsigned short *)right_key, (int)right_size, (int *)lower_bound, (int*)0, /*mgpu::less<T>(),
*context,*/ (int *)0, (int *)0);
mgpu::SortedEqualityCount<unsigned short*, unsigned short*, int *, int *, struct mgpu::SortedEqualityOp>
((unsigned short*)left_key, (int)left_size,
(unsigned short*)right_key, (int)right_size, (int *)lower_bound, (int *)left_count, mgpu::SortedEqualityOp()/*,
*context*/);
// Scan the product counts. This is part of the load-balancing search.
unsigned long long int total = mgpu::Scan((int *)left_count, (int)left_size/*, *context*/);
//printf("total %llu\n", total);
check(hipMemcpy(result_size, &total, sizeof(unsigned long long int),
hipMemcpyHostToDevice));
// exe_time += context->Split();
// hipMemGetInfo(&freeMem, &totalMem);
// std::cout << freeMem << "\n";
hipEventRecord(stop,0);
hipEventSynchronize(stop);
hipEventElapsedTime(&exe_time, start, stop);
hipEventDestroy(start);
hipEventDestroy(stop);
printf("join_find_bounds %lf\n", exe_time);
printf("**********************join size %llu\n", total);
// check(hipMemcpy(data_key, (unsigned char *)key_begin, 10,
// hipMemcpyDeviceToHost));
//
// check(hipMemcpy(data_value, (double *)value_begin, 80,
// hipMemcpyDeviceToHost));
//
// for(unsigned int i = 0; i < 10; ++i)
// printf("%u %x %lf\n", i, data_key[i], data_value[i]);
}
void find_bounds_32(void* lower_bound, void* left_count, unsigned long long int *result_size,
void* left_key, unsigned long long int left_size,
void* right_key, unsigned long long int right_size)
{
// unsigned int left[6001215];
// unsigned int right[6001215];
// unsigned int *left = (unsigned int*)malloc(6001215*4);
// unsigned int *right = (unsigned int*)malloc(6001215*4);
// check(hipMemcpy(left, (unsigned int *)left_key, 4*6001215,
// hipMemcpyDeviceToHost));
//
// check(hipMemcpy(right, (unsigned int *)right_key, 4*6001215,
// hipMemcpyDeviceToHost));
//
// for(unsigned int i = 6001214; i > 6001204; --i)
// printf("%u %x %x\n", i, left[i], right[i]);
// int *left_host = (int *)malloc(30142*sizeof(int));
// check(hipMemcpy(left_host, (int *)left_key, 30142*sizeof(int),
// hipMemcpyDeviceToHost));
// int *right_host = (int *)malloc(1500000*sizeof(int));
// check(hipMemcpy(right_host, (int *)right_key, 1500000*sizeof(int),
// hipMemcpyDeviceToHost));
// for(unsigned int i = 1; i < 100; ++i)
// if(left_host[i] < left_host[i - 1]) printf("error %d %d %d\n", i, left_host[i], left_host[i - 1]);
// printf("left %d %d\n", i, left_host[i]);
// for(unsigned int i = 1; i < 100; ++i)
// if(right_host[i] < right_host[i - 1]) printf("error %d %d %d\n", i, right_host[i], right_host[i - 1]);
// printf("right %d %d\n", i, right_host[i]);
hipEvent_t start, stop;
hipEventCreate(&start); hipEventCreate(&stop);
float exe_time = 0.0f;
hipEventRecord(start,0);
// size_t freeMem, totalMem;
// hipMemGetInfo(&freeMem, &totalMem);
// std::cout << freeMem << "\n";
//
// std::cout << left_size << " " << right_size << "\n";
// mgpu::ContextPtr context = mgpu::CreateCudaDevice(0);
// context->Start();
typedef unsigned int* T;
const mgpu::MgpuSearchType LeftType = mgpu::MgpuSearchTypeIndex;
const mgpu::MgpuBounds Bounds = mgpu::MgpuBoundsLower;
const mgpu::MgpuSearchType RightType = mgpu::MgpuSearchTypeNone;
mgpu::SortedSearch<Bounds, LeftType, RightType, unsigned int *, unsigned int *, int *, int *>((unsigned int*)left_key,
(int)left_size, (unsigned int *)right_key, (int)right_size, (int *)lower_bound, (int*)0, /*mgpu::less<T>(),
*context,*/ (int *)0, (int *)0);
mgpu::SortedEqualityCount<unsigned int*, unsigned int*, int *, int *, struct mgpu::SortedEqualityOp>((unsigned int*)left_key, (int)left_size,
(unsigned int*)right_key, (int)right_size, (int *)lower_bound, (int *)left_count, mgpu::SortedEqualityOp()/*,
*context*/);
// Scan the product counts. This is part of the load-balancing search.
unsigned long long int total = mgpu::Scan((int *)left_count, (int)left_size/*, *context*/);
//printf("total %llu\n", total);
check(hipMemcpy(result_size, &total, sizeof(unsigned long long int),
hipMemcpyHostToDevice));
// exe_time += context->Split();
// hipMemGetInfo(&freeMem, &totalMem);
// std::cout << freeMem << "\n";
hipEventRecord(stop,0);
hipEventSynchronize(stop);
hipEventElapsedTime(&exe_time, start, stop);
hipEventDestroy(start);
hipEventDestroy(stop);
printf("join_find_bounds %lf\n", exe_time);
printf("**********************join size %llu\n", total);
// check(hipMemcpy(data_key, (unsigned char *)key_begin, 10,
// hipMemcpyDeviceToHost));
//
// check(hipMemcpy(data_value, (double *)value_begin, 80,
// hipMemcpyDeviceToHost));
//
// for(unsigned int i = 0; i < 10; ++i)
// printf("%u %x %lf\n", i, data_key[i], data_value[i]);
}
void find_bounds_8(void* lower_bound, void* left_count, unsigned long long int *result_size,
void* left_key, unsigned long long int left_size,
void* right_key, unsigned long long int right_size)
{
// unsigned char data_key[10];
// double data_value[10];
//
// check(hipMemcpy(data_key, (unsigned char *)key_begin, 10,
// hipMemcpyDeviceToHost));
//
// check(hipMemcpy(data_value, (double *)value_begin, 80,
// hipMemcpyDeviceToHost));
//
// for(unsigned int i = 0; i < 10; ++i)
// printf("%u %x %lf\n", i, data_key[i], data_value[i]);
hipEvent_t start, stop;
hipEventCreate(&start); hipEventCreate(&stop);
float exe_time = 0.0f;
hipEventRecord(start,0);
// size_t freeMem, totalMem;
// hipMemGetInfo(&freeMem, &totalMem);
// std::cout << freeMem << "\n";
// mgpu::ContextPtr context = mgpu::CreateCudaDevice(0);
// context->Start();
typedef unsigned char* T;
const mgpu::MgpuSearchType LeftType = mgpu::MgpuSearchTypeIndex;
const mgpu::MgpuBounds Bounds = mgpu::MgpuBoundsLower;
const mgpu::MgpuSearchType RightType = mgpu::MgpuSearchTypeNone;
mgpu::SortedSearch<Bounds, LeftType, RightType, unsigned char *, unsigned char *, int *, int *>((unsigned char*)left_key,
(int)left_size, (unsigned char *)right_key, (int)right_size, (int *)lower_bound, (int*)0,/* mgpu::less<T>(),
*context,*/ (int *)0, (int *)0);
mgpu::SortedEqualityCount<unsigned char*, unsigned char*, int *, int *, struct mgpu::SortedEqualityOp>
((unsigned char*)left_key, (int)left_size,
(unsigned char*)right_key, (int)right_size, (int *)lower_bound, (int *)left_count, mgpu::SortedEqualityOp()/*,
*context*/);
// Scan the product counts. This is part of the load-balancing search.
unsigned long long int total = mgpu::Scan((int *)left_count, (int)left_size/*, *context*/);
//printf("total %llu\n", total);
check(hipMemcpy(result_size, &total, sizeof(unsigned long long int),
hipMemcpyHostToDevice));
// exe_time += context->Split();
// hipMemGetInfo(&freeMem, &totalMem);
// std::cout << freeMem << "\n";
hipEventRecord(stop,0);
hipEventSynchronize(stop);
hipEventElapsedTime(&exe_time, start, stop);
hipEventDestroy(start);
hipEventDestroy(stop);
printf("join_find_bounds %lf\n", exe_time);
printf("**********************join size %llu\n", total);
// check(hipMemcpy(data_key, (unsigned char *)key_begin, 10,
// hipMemcpyDeviceToHost));
//
// check(hipMemcpy(data_value, (double *)value_begin, 80,
// hipMemcpyDeviceToHost));
//
// for(unsigned int i = 0; i < 10; ++i)
// printf("%u %x %lf\n", i, data_key[i], data_value[i]);
}
void join(int* left_indices, int* right_indices, unsigned long long int result_size,
int* lowerBound, int* leftCount,
unsigned long long int input_size)
{
// unsigned char data_key[10];
// double data_value[10];
//
// check(hipMemcpy(data_key, (unsigned char *)key_begin, 10,
// hipMemcpyDeviceToHost));
//
// check(hipMemcpy(data_value, (double *)value_begin, 80,
// hipMemcpyDeviceToHost));
//
// for(unsigned int i = 0; i < 10; ++i)
// printf("%u %x %lf\n", i, data_key[i], data_value[i]);
hipEvent_t start, stop;
hipEventCreate(&start); hipEventCreate(&stop);
float exe_time = 0.0f;
hipEventRecord(start,0);
// size_t freeMem, totalMem
// hipMemGetInfo(&freeMem, &totalMem);
// std::cout << freeMem << "\n";
// mgpu::ContextPtr context = mgpu::CreateCudaDevice(0);
// context->Start();
const int NT = 128;
const int VT = 7;
typedef mgpu::LaunchBoxVT<NT, VT> Tuning;
int2 launch = Tuning::GetLaunchParams(/**context*/);
int NV = launch.x * launch.y;
const mgpu::MgpuBounds Bounds = mgpu::MgpuBoundsUpper;
// MGPU_MEM(int) partitionsDevice = mgpu::MergePathPartitions<Bounds>(
int* partitionsDevice = mgpu::MergePathPartitions<Bounds>(
mgpu::counting_iterator<int>(0), result_size, leftCount,
input_size, NV, 0, mgpu::less<int>()/*, *context*/);
int numBlocks = MGPU_DIV_UP(result_size + input_size, NV);
hipLaunchKernelGGL(( mgpu::KernelLeftJoin<Tuning, false>)
, dim3(numBlocks), dim3(launch.x/*), 0, context->Stream()*/, 0, 0, result_size,
// lowerBound, leftCount, input_size, partitionsDevice->get(),
lowerBound, leftCount, input_size, partitionsDevice,
left_indices, right_indices);
check(hipFree(partitionsDevice));
// exe_time += context->Split();
// hipMemGetInfo(&freeMem, &totalMem);
// std::cout << freeMem << "\n";
hipEventRecord(stop,0);
hipEventSynchronize(stop);
hipEventElapsedTime(&exe_time, start, stop);
hipEventDestroy(start);
hipEventDestroy(stop);
printf("join_main %lf\n", exe_time);
// check(hipMemcpy(data_key, (unsigned char *)key_begin, 10,
// hipMemcpyDeviceToHost));
//
// check(hipMemcpy(data_value, (double *)value_begin, 80,
// hipMemcpyDeviceToHost));
//
// for(unsigned int i = 0; i < 10; ++i)
// printf("%u %x %lf\n", i, data_key[i], data_value[i]);
}
}
| 9a050c9acd7f1d61f850f197a9b2d45744dd351c.cu | /*! \file Sort.cu
\author Gregory Diamos <gregory.diamos>
\date Wednesday December 1, 2010
\brief The source file for the C interface to CUDA sorting routines.
*/
// Redfox Includes
#include <redfox/nvcc/interface/RelationalAlgebraKernel.h>
#include <redfox/ra/interface/ModernGPUJoin.h>
#include <redfox/ra/interface/Tuple.h>
#include <redfox/ra/interface/moderngpu/include/kernels/join.cuh>
#include <stdio.h>
#include <iostream>
#include <string.h>
class gpu128
{
public:
typedef long long unsigned int type;
public:
type a[2];
};
struct compare_sort_gpu128
{
typedef gpu128 type;
__host__ __device__
bool operator()(type i, type j)
{
if (i.a[1] != j.a[1])
return (i.a[1] < j.a[1]);
return (i.a[0] < j.a[0]);
}
};
namespace redfox
{
struct compare_string
{
__host__ __device__
bool operator()(unsigned char * i, unsigned char *j)
{
const char *string1 = (char *) i;
const char *string2 = (char *) j;
// return(strcmp(string1, string2) < 0);
int ii = 0;
while(string1[ii] != '\0' && string2[ii] != '\0')
{
if(string1[ii] < string2[ii])
return true;
else if(string1[ii] > string2[ii])
return false;
ii++;
}
if(string1[ii] == '\0' && string2[ii] != '\0')
return true;
else
return false;
}
};
struct compare_string2
{
__host__ __device__
bool operator()(unsigned long long int i, unsigned long long int j)
{
const char *string1 = (char *) i;
const char *string2 = (char *) j;
// return(strcmp(string1, string2) < 0);
int ii = 0;
while(string1[ii] != '\0' && string2[ii] != '\0')
{
if(string1[ii] < string2[ii])
return true;
else if(string1[ii] > string2[ii])
return false;
ii++;
}
if(string1[ii] == '\0' && string2[ii] != '\0')
return true;
else
return false;
}
};
void check(cudaError_t status)
{
if(status != cudaSuccess)
{
std::cerr << cudaGetErrorString(status) << "\n";
std::abort();
}
}
void find_bounds_128(void* lower_bound, void* left_count, unsigned long long int *result_size,
void* left_key, unsigned long long int left_size,
void* right_key, unsigned long long int right_size)
{
// unsigned short left[100];
// unsigned short right[100];
// unsigned long long int *left_host = (unsigned long long int*)malloc(16);
// unsigned long long int *right_host = (unsigned long long int*)malloc(6001215 * 8);
// check(cudaMemcpy(left_host, (unsigned long long int *)left_key, 16,
// cudaMemcpyDeviceToHost));
//
// check(cudaMemcpy(right_host, (unsigned long long int *)right_key, 6001215 * 8,
// cudaMemcpyDeviceToHost));
//
// for(unsigned int i = 6001214; i > 6001204; --i)
// printf("%u %x %x\n", i, left[i], right[i]);
// int *left_host = (int *)malloc(30142*sizeof(int));
// check(cudaMemcpy(left_host, (int *)left_key, 30142*sizeof(int),
// cudaMemcpyDeviceToHost));
// int *right_host = (int *)malloc(1500000*sizeof(int));
// check(cudaMemcpy(right_host, (int *)right_key, 1500000*sizeof(int),
// cudaMemcpyDeviceToHost));
// for(unsigned int i = 0; i < 2; ++i)
// printf("left %u %llx\n", i, left_host[i]);
// for(unsigned int i = 0; i < 100; ++i)
// printf("right %u %llx\n", i, right_host[i]);
// for(unsigned int i = 6001214; i > 6001204; --i)
// printf("right %u %llx\n", i, right_host[i]);
cudaEvent_t start, stop;
cudaEventCreate(&start); cudaEventCreate(&stop);
float exe_time = 0.0f;
cudaEventRecord(start,0);
// size_t freeMem, totalMem;
// cudaMemGetInfo(&freeMem, &totalMem);
// std::cout << freeMem << "\n";
//
// std::cout << left_size << " " << right_size << "\n";
// mgpu::ContextPtr context = mgpu::CreateCudaDevice(0);
// context->Start();
typedef gpu128 type;
const mgpu::MgpuSearchType LeftType = mgpu::MgpuSearchTypeIndex;
const mgpu::MgpuBounds Bounds = mgpu::MgpuBoundsLower;
const mgpu::MgpuSearchType RightType = mgpu::MgpuSearchTypeNone;
mgpu::SortedSearch<Bounds, LeftType, RightType, type*, type*, int *, int *>((type*)left_key,
(int)left_size, (type*)right_key, (int)right_size, (int *)lower_bound, (int*)0, compare_sort_gpu128(),
/**context,*/ (int *)0, (int *)0);
//int *lower_bound_host = (int *)malloc(2*sizeof(int));
//check(cudaMemcpy(lower_bound_host, (int *)lower_bound, 2*sizeof(int),
// cudaMemcpyDeviceToHost));
//std::cout << "lower bound 0: " << lower_bound_host[0] << " lower bound 1: " << lower_bound_host[1] << "\n";
mgpu::SortedEqualityCount<type*, type *, int *, int *, struct compare_sort_gpu128, struct mgpu::SortedEqualityOp>
((type *)left_key, (int)left_size,
(type *)right_key, (int)right_size, (int *)lower_bound, (int *)left_count,
compare_sort_gpu128(), mgpu::SortedEqualityOp()/*,
*context*/);
//int *left_count_host = (int *)malloc(2*sizeof(int));
//check(cudaMemcpy(left_count_host, (int *)left_count, 2*sizeof(int),
// cudaMemcpyDeviceToHost));
//std::cout << "left count 0: " << left_count_host[0] << " left count 1: " << left_count_host[1] << "\n";
// Scan the product counts. This is part of the load-balancing search.
unsigned long long int total = mgpu::Scan((int *)left_count, (int)left_size/*, *context*/);
//printf("total %llu\n", total);
check(cudaMemcpy(result_size, &total, sizeof(unsigned long long int),
cudaMemcpyHostToDevice));
// exe_time += context->Split();
// cudaMemGetInfo(&freeMem, &totalMem);
// std::cout << freeMem << "\n";
cudaEventRecord(stop,0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&exe_time, start, stop);
cudaEventDestroy(start);
cudaEventDestroy(stop);
printf("join_find_bounds %lf\n", exe_time);
printf("**********************join size %llu\n", total);
// check(cudaMemcpy(data_key, (unsigned char *)key_begin, 10,
// cudaMemcpyDeviceToHost));
//
// check(cudaMemcpy(data_value, (double *)value_begin, 80,
// cudaMemcpyDeviceToHost));
//
// for(unsigned int i = 0; i < 10; ++i)
// printf("%u %x %lf\n", i, data_key[i], data_value[i]);
}
void find_bounds_string(void* lower_bound, void* left_count, unsigned long long int *result_size,
void* left_key, unsigned long long int left_size,
void* right_key, unsigned long long int right_size)
{
// unsigned short left[100];
// unsigned short right[100];
// unsigned long long int *left_host = (unsigned long long int*)malloc(16);
// unsigned long long int *right_host = (unsigned long long int*)malloc(6001215 * 8);
// check(cudaMemcpy(left_host, (unsigned long long int *)left_key, 16,
// cudaMemcpyDeviceToHost));
//
// check(cudaMemcpy(right_host, (unsigned long long int *)right_key, 6001215 * 8,
// cudaMemcpyDeviceToHost));
//
// for(unsigned int i = 6001214; i > 6001204; --i)
// printf("%u %x %x\n", i, left[i], right[i]);
// int *left_host = (int *)malloc(30142*sizeof(int));
// check(cudaMemcpy(left_host, (int *)left_key, 30142*sizeof(int),
// cudaMemcpyDeviceToHost));
// int *right_host = (int *)malloc(1500000*sizeof(int));
// check(cudaMemcpy(right_host, (int *)right_key, 1500000*sizeof(int),
// cudaMemcpyDeviceToHost));
// for(unsigned int i = 0; i < 2; ++i)
// printf("left %u %llx\n", i, left_host[i]);
// for(unsigned int i = 0; i < 100; ++i)
// printf("right %u %llx\n", i, right_host[i]);
// for(unsigned int i = 6001214; i > 6001204; --i)
// printf("right %u %llx\n", i, right_host[i]);
cudaEvent_t start, stop;
cudaEventCreate(&start); cudaEventCreate(&stop);
float exe_time = 0.0f;
cudaEventRecord(start,0);
// size_t freeMem, totalMem;
// cudaMemGetInfo(&freeMem, &totalMem);
// std::cout << freeMem << "\n";
//
// std::cout << left_size << " " << right_size << "\n";
// mgpu::ContextPtr context = mgpu::CreateCudaDevice(0);
// context->Start();
const mgpu::MgpuSearchType LeftType = mgpu::MgpuSearchTypeIndex;
const mgpu::MgpuBounds Bounds = mgpu::MgpuBoundsLower;
const mgpu::MgpuSearchType RightType = mgpu::MgpuSearchTypeNone;
mgpu::SortedSearch<Bounds, LeftType, RightType, unsigned long long int*, unsigned long long int*, int *, int *>((unsigned long long int*)left_key,
(int)left_size, (unsigned long long int*)right_key, (int)right_size, (int *)lower_bound, (int*)0, compare_string2(),/*mgpu::less<T>(),
*context,*/ (int *)0, (int *)0);
//int *lower_bound_host = (int *)malloc(2*sizeof(int));
//check(cudaMemcpy(lower_bound_host, (int *)lower_bound, 2*sizeof(int),
// cudaMemcpyDeviceToHost));
//std::cout << "lower bound 0: " << lower_bound_host[0] << " lower bound 1: " << lower_bound_host[1] << "\n";
mgpu::SortedEqualityCount<unsigned long long int*, unsigned long long int *, int *, int *, struct compare_string2, struct mgpu::SortedEqualityOp>
((unsigned long long int *)left_key, (int)left_size,
(unsigned long long int *)right_key, (int)right_size, (int *)lower_bound, (int *)left_count,
compare_string2(), mgpu::SortedEqualityOp()/*,
*context*/);
//int *left_count_host = (int *)malloc(2*sizeof(int));
//check(cudaMemcpy(left_count_host, (int *)left_count, 2*sizeof(int),
// cudaMemcpyDeviceToHost));
//std::cout << "left count 0: " << left_count_host[0] << " left count 1: " << left_count_host[1] << "\n";
// Scan the product counts. This is part of the load-balancing search.
unsigned long long int total = mgpu::Scan((int *)left_count, (int)left_size/*, *context*/);
//printf("total %llu\n", total);
check(cudaMemcpy(result_size, &total, sizeof(unsigned long long int),
cudaMemcpyHostToDevice));
// exe_time += context->Split();
// cudaMemGetInfo(&freeMem, &totalMem);
// std::cout << freeMem << "\n";
cudaEventRecord(stop,0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&exe_time, start, stop);
cudaEventDestroy(start);
cudaEventDestroy(stop);
printf("join_find_bounds %lf\n", exe_time);
printf("**********************join size %llu\n", total);
// check(cudaMemcpy(data_key, (unsigned char *)key_begin, 10,
// cudaMemcpyDeviceToHost));
//
// check(cudaMemcpy(data_value, (double *)value_begin, 80,
// cudaMemcpyDeviceToHost));
//
// for(unsigned int i = 0; i < 10; ++i)
// printf("%u %x %lf\n", i, data_key[i], data_value[i]);
}
void find_bounds_64(void* lower_bound, void* left_count, unsigned long long int *result_size,
void* left_key, unsigned long long int left_size,
void* right_key, unsigned long long int right_size)
{
// unsigned short left[100];
// unsigned short right[100];
// unsigned long long int *left_host = (unsigned long long int*)malloc(16);
// unsigned long long int *right_host = (unsigned long long int*)malloc(6001215 * 8);
// check(cudaMemcpy(left_host, (unsigned long long int *)left_key, 16,
// cudaMemcpyDeviceToHost));
//
// check(cudaMemcpy(right_host, (unsigned long long int *)right_key, 6001215 * 8,
// cudaMemcpyDeviceToHost));
//
// for(unsigned int i = 6001214; i > 6001204; --i)
// printf("%u %x %x\n", i, left[i], right[i]);
// int *left_host = (int *)malloc(30142*sizeof(int));
// check(cudaMemcpy(left_host, (int *)left_key, 30142*sizeof(int),
// cudaMemcpyDeviceToHost));
// int *right_host = (int *)malloc(1500000*sizeof(int));
// check(cudaMemcpy(right_host, (int *)right_key, 1500000*sizeof(int),
// cudaMemcpyDeviceToHost));
// for(unsigned int i = 0; i < 2; ++i)
// printf("left %u %llx\n", i, left_host[i]);
// for(unsigned int i = 0; i < 100; ++i)
// printf("right %u %llx\n", i, right_host[i]);
// for(unsigned int i = 6001214; i > 6001204; --i)
// printf("right %u %llx\n", i, right_host[i]);
cudaEvent_t start, stop;
cudaEventCreate(&start); cudaEventCreate(&stop);
float exe_time = 0.0f;
cudaEventRecord(start,0);
// size_t freeMem, totalMem;
// cudaMemGetInfo(&freeMem, &totalMem);
// std::cout << freeMem << "\n";
//
// std::cout << left_size << " " << right_size << "\n";
// mgpu::ContextPtr context = mgpu::CreateCudaDevice(0);
// context->Start();
const mgpu::MgpuSearchType LeftType = mgpu::MgpuSearchTypeIndex;
const mgpu::MgpuBounds Bounds = mgpu::MgpuBoundsLower;
const mgpu::MgpuSearchType RightType = mgpu::MgpuSearchTypeNone;
mgpu::SortedSearch<Bounds, LeftType, RightType, unsigned long long int*, unsigned long long int*, int *, int *>((unsigned long long int*)left_key,
(int)left_size, (unsigned long long int*)right_key, (int)right_size, (int *)lower_bound, (int*)0, /*mgpu::less<T>(),
*context,*/ (int *)0, (int *)0);
//int *lower_bound_host = (int *)malloc(2*sizeof(int));
//check(cudaMemcpy(lower_bound_host, (int *)lower_bound, 2*sizeof(int),
// cudaMemcpyDeviceToHost));
//std::cout << "lower bound 0: " << lower_bound_host[0] << " lower bound 1: " << lower_bound_host[1] << "\n";
mgpu::SortedEqualityCount<unsigned long long int*, unsigned long long int *, int *, int *, struct mgpu::SortedEqualityOp>
((unsigned long long int *)left_key, (int)left_size,
(unsigned long long int *)right_key, (int)right_size, (int *)lower_bound, (int *)left_count,
mgpu::SortedEqualityOp()/*,
*context*/);
//int *left_count_host = (int *)malloc(2*sizeof(int));
//check(cudaMemcpy(left_count_host, (int *)left_count, 2*sizeof(int),
// cudaMemcpyDeviceToHost));
//std::cout << "left count 0: " << left_count_host[0] << " left count 1: " << left_count_host[1] << "\n";
// Scan the product counts. This is part of the load-balancing search.
unsigned long long int total = mgpu::Scan((int *)left_count, (int)left_size/*, *context*/);
//printf("total %llu\n", total);
check(cudaMemcpy(result_size, &total, sizeof(unsigned long long int),
cudaMemcpyHostToDevice));
// exe_time += context->Split();
// cudaMemGetInfo(&freeMem, &totalMem);
// std::cout << freeMem << "\n";
cudaEventRecord(stop,0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&exe_time, start, stop);
cudaEventDestroy(start);
cudaEventDestroy(stop);
printf("join_find_bounds %lf\n", exe_time);
printf("**********************join size %llu\n", total);
// check(cudaMemcpy(data_key, (unsigned char *)key_begin, 10,
// cudaMemcpyDeviceToHost));
//
// check(cudaMemcpy(data_value, (double *)value_begin, 80,
// cudaMemcpyDeviceToHost));
//
// for(unsigned int i = 0; i < 10; ++i)
// printf("%u %x %lf\n", i, data_key[i], data_value[i]);
}
void find_bounds_16(void* lower_bound, void* left_count, unsigned long long int *result_size,
void* left_key, unsigned long long int left_size,
void* right_key, unsigned long long int right_size)
{
// unsigned short left[100];
// unsigned short right[100];
// unsigned short *left_host = (unsigned short*)malloc(6001215*2);
// unsigned short *right = (unsigned short*)malloc(100*2);
// check(cudaMemcpy(left_host, (unsigned short *)left_key, 6001215*2,
// cudaMemcpyDeviceToHost));
// check(cudaMemcpy(right, (unsigned short *)right_key, 200,
// cudaMemcpyDeviceToHost));
//
// for(unsigned int i = 6001214; i > 6001204; --i)
// printf("%u %x %x\n", i, left[i], right[i]);
// int *left_host = (int *)malloc(30142*sizeof(int));
// check(cudaMemcpy(left_host, (int *)left_key, 30142*sizeof(int),
// cudaMemcpyDeviceToHost));
// int *right_host = (int *)malloc(1500000*sizeof(int));
// check(cudaMemcpy(right_host, (int *)right_key, 1500000*sizeof(int),
// cudaMemcpyDeviceToHost));
// for(unsigned int i = 0; i < 6001215; ++i)
// if(left_host[i] < left_host[i - 1]) printf("error %d %d %d\n", i, left_host[i], left_host[i - 1]);
// printf("left %u %u\n", i, left_host[i]);
// for(unsigned int i = 0; i < 100; ++i)
// if(right_host[i] < right_host[i - 1]) printf("error %d %d %d\n", i, right_host[i], right_host[i - 1]);
// printf("right %u %u\n", i, right[i]);
cudaEvent_t start, stop;
cudaEventCreate(&start); cudaEventCreate(&stop);
float exe_time = 0.0f;
cudaEventRecord(start,0);
// size_t freeMem, totalMem;
// cudaMemGetInfo(&freeMem, &totalMem);
// std::cout << freeMem << "\n";
//
// std::cout << left_size << " " << right_size << "\n";
// mgpu::ContextPtr context = mgpu::CreateCudaDevice(0);
// context->Start();
typedef unsigned short* T;
const mgpu::MgpuSearchType LeftType = mgpu::MgpuSearchTypeIndex;
const mgpu::MgpuBounds Bounds = mgpu::MgpuBoundsLower;
const mgpu::MgpuSearchType RightType = mgpu::MgpuSearchTypeNone;
mgpu::SortedSearch<Bounds, LeftType, RightType, unsigned short *, unsigned short *, int *, int *>((unsigned short*)left_key,
(int)left_size, (unsigned short *)right_key, (int)right_size, (int *)lower_bound, (int*)0, /*mgpu::less<T>(),
*context,*/ (int *)0, (int *)0);
mgpu::SortedEqualityCount<unsigned short*, unsigned short*, int *, int *, struct mgpu::SortedEqualityOp>
((unsigned short*)left_key, (int)left_size,
(unsigned short*)right_key, (int)right_size, (int *)lower_bound, (int *)left_count, mgpu::SortedEqualityOp()/*,
*context*/);
// Scan the product counts. This is part of the load-balancing search.
unsigned long long int total = mgpu::Scan((int *)left_count, (int)left_size/*, *context*/);
//printf("total %llu\n", total);
check(cudaMemcpy(result_size, &total, sizeof(unsigned long long int),
cudaMemcpyHostToDevice));
// exe_time += context->Split();
// cudaMemGetInfo(&freeMem, &totalMem);
// std::cout << freeMem << "\n";
cudaEventRecord(stop,0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&exe_time, start, stop);
cudaEventDestroy(start);
cudaEventDestroy(stop);
printf("join_find_bounds %lf\n", exe_time);
printf("**********************join size %llu\n", total);
// check(cudaMemcpy(data_key, (unsigned char *)key_begin, 10,
// cudaMemcpyDeviceToHost));
//
// check(cudaMemcpy(data_value, (double *)value_begin, 80,
// cudaMemcpyDeviceToHost));
//
// for(unsigned int i = 0; i < 10; ++i)
// printf("%u %x %lf\n", i, data_key[i], data_value[i]);
}
void find_bounds_32(void* lower_bound, void* left_count, unsigned long long int *result_size,
void* left_key, unsigned long long int left_size,
void* right_key, unsigned long long int right_size)
{
// unsigned int left[6001215];
// unsigned int right[6001215];
// unsigned int *left = (unsigned int*)malloc(6001215*4);
// unsigned int *right = (unsigned int*)malloc(6001215*4);
// check(cudaMemcpy(left, (unsigned int *)left_key, 4*6001215,
// cudaMemcpyDeviceToHost));
//
// check(cudaMemcpy(right, (unsigned int *)right_key, 4*6001215,
// cudaMemcpyDeviceToHost));
//
// for(unsigned int i = 6001214; i > 6001204; --i)
// printf("%u %x %x\n", i, left[i], right[i]);
// int *left_host = (int *)malloc(30142*sizeof(int));
// check(cudaMemcpy(left_host, (int *)left_key, 30142*sizeof(int),
// cudaMemcpyDeviceToHost));
// int *right_host = (int *)malloc(1500000*sizeof(int));
// check(cudaMemcpy(right_host, (int *)right_key, 1500000*sizeof(int),
// cudaMemcpyDeviceToHost));
// for(unsigned int i = 1; i < 100; ++i)
// if(left_host[i] < left_host[i - 1]) printf("error %d %d %d\n", i, left_host[i], left_host[i - 1]);
// printf("left %d %d\n", i, left_host[i]);
// for(unsigned int i = 1; i < 100; ++i)
// if(right_host[i] < right_host[i - 1]) printf("error %d %d %d\n", i, right_host[i], right_host[i - 1]);
// printf("right %d %d\n", i, right_host[i]);
cudaEvent_t start, stop;
cudaEventCreate(&start); cudaEventCreate(&stop);
float exe_time = 0.0f;
cudaEventRecord(start,0);
// size_t freeMem, totalMem;
// cudaMemGetInfo(&freeMem, &totalMem);
// std::cout << freeMem << "\n";
//
// std::cout << left_size << " " << right_size << "\n";
// mgpu::ContextPtr context = mgpu::CreateCudaDevice(0);
// context->Start();
typedef unsigned int* T;
const mgpu::MgpuSearchType LeftType = mgpu::MgpuSearchTypeIndex;
const mgpu::MgpuBounds Bounds = mgpu::MgpuBoundsLower;
const mgpu::MgpuSearchType RightType = mgpu::MgpuSearchTypeNone;
mgpu::SortedSearch<Bounds, LeftType, RightType, unsigned int *, unsigned int *, int *, int *>((unsigned int*)left_key,
(int)left_size, (unsigned int *)right_key, (int)right_size, (int *)lower_bound, (int*)0, /*mgpu::less<T>(),
*context,*/ (int *)0, (int *)0);
mgpu::SortedEqualityCount<unsigned int*, unsigned int*, int *, int *, struct mgpu::SortedEqualityOp>((unsigned int*)left_key, (int)left_size,
(unsigned int*)right_key, (int)right_size, (int *)lower_bound, (int *)left_count, mgpu::SortedEqualityOp()/*,
*context*/);
// Scan the product counts. This is part of the load-balancing search.
unsigned long long int total = mgpu::Scan((int *)left_count, (int)left_size/*, *context*/);
//printf("total %llu\n", total);
check(cudaMemcpy(result_size, &total, sizeof(unsigned long long int),
cudaMemcpyHostToDevice));
// exe_time += context->Split();
// cudaMemGetInfo(&freeMem, &totalMem);
// std::cout << freeMem << "\n";
cudaEventRecord(stop,0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&exe_time, start, stop);
cudaEventDestroy(start);
cudaEventDestroy(stop);
printf("join_find_bounds %lf\n", exe_time);
printf("**********************join size %llu\n", total);
// check(cudaMemcpy(data_key, (unsigned char *)key_begin, 10,
// cudaMemcpyDeviceToHost));
//
// check(cudaMemcpy(data_value, (double *)value_begin, 80,
// cudaMemcpyDeviceToHost));
//
// for(unsigned int i = 0; i < 10; ++i)
// printf("%u %x %lf\n", i, data_key[i], data_value[i]);
}
void find_bounds_8(void* lower_bound, void* left_count, unsigned long long int *result_size,
void* left_key, unsigned long long int left_size,
void* right_key, unsigned long long int right_size)
{
// unsigned char data_key[10];
// double data_value[10];
//
// check(cudaMemcpy(data_key, (unsigned char *)key_begin, 10,
// cudaMemcpyDeviceToHost));
//
// check(cudaMemcpy(data_value, (double *)value_begin, 80,
// cudaMemcpyDeviceToHost));
//
// for(unsigned int i = 0; i < 10; ++i)
// printf("%u %x %lf\n", i, data_key[i], data_value[i]);
cudaEvent_t start, stop;
cudaEventCreate(&start); cudaEventCreate(&stop);
float exe_time = 0.0f;
cudaEventRecord(start,0);
// size_t freeMem, totalMem;
// cudaMemGetInfo(&freeMem, &totalMem);
// std::cout << freeMem << "\n";
// mgpu::ContextPtr context = mgpu::CreateCudaDevice(0);
// context->Start();
typedef unsigned char* T;
const mgpu::MgpuSearchType LeftType = mgpu::MgpuSearchTypeIndex;
const mgpu::MgpuBounds Bounds = mgpu::MgpuBoundsLower;
const mgpu::MgpuSearchType RightType = mgpu::MgpuSearchTypeNone;
mgpu::SortedSearch<Bounds, LeftType, RightType, unsigned char *, unsigned char *, int *, int *>((unsigned char*)left_key,
(int)left_size, (unsigned char *)right_key, (int)right_size, (int *)lower_bound, (int*)0,/* mgpu::less<T>(),
*context,*/ (int *)0, (int *)0);
mgpu::SortedEqualityCount<unsigned char*, unsigned char*, int *, int *, struct mgpu::SortedEqualityOp>
((unsigned char*)left_key, (int)left_size,
(unsigned char*)right_key, (int)right_size, (int *)lower_bound, (int *)left_count, mgpu::SortedEqualityOp()/*,
*context*/);
// Scan the product counts. This is part of the load-balancing search.
unsigned long long int total = mgpu::Scan((int *)left_count, (int)left_size/*, *context*/);
//printf("total %llu\n", total);
check(cudaMemcpy(result_size, &total, sizeof(unsigned long long int),
cudaMemcpyHostToDevice));
// exe_time += context->Split();
// cudaMemGetInfo(&freeMem, &totalMem);
// std::cout << freeMem << "\n";
cudaEventRecord(stop,0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&exe_time, start, stop);
cudaEventDestroy(start);
cudaEventDestroy(stop);
printf("join_find_bounds %lf\n", exe_time);
printf("**********************join size %llu\n", total);
// check(cudaMemcpy(data_key, (unsigned char *)key_begin, 10,
// cudaMemcpyDeviceToHost));
//
// check(cudaMemcpy(data_value, (double *)value_begin, 80,
// cudaMemcpyDeviceToHost));
//
// for(unsigned int i = 0; i < 10; ++i)
// printf("%u %x %lf\n", i, data_key[i], data_value[i]);
}
void join(int* left_indices, int* right_indices, unsigned long long int result_size,
int* lowerBound, int* leftCount,
unsigned long long int input_size)
{
// unsigned char data_key[10];
// double data_value[10];
//
// check(cudaMemcpy(data_key, (unsigned char *)key_begin, 10,
// cudaMemcpyDeviceToHost));
//
// check(cudaMemcpy(data_value, (double *)value_begin, 80,
// cudaMemcpyDeviceToHost));
//
// for(unsigned int i = 0; i < 10; ++i)
// printf("%u %x %lf\n", i, data_key[i], data_value[i]);
cudaEvent_t start, stop;
cudaEventCreate(&start); cudaEventCreate(&stop);
float exe_time = 0.0f;
cudaEventRecord(start,0);
// size_t freeMem, totalMem
// cudaMemGetInfo(&freeMem, &totalMem);
// std::cout << freeMem << "\n";
// mgpu::ContextPtr context = mgpu::CreateCudaDevice(0);
// context->Start();
const int NT = 128;
const int VT = 7;
typedef mgpu::LaunchBoxVT<NT, VT> Tuning;
int2 launch = Tuning::GetLaunchParams(/**context*/);
int NV = launch.x * launch.y;
const mgpu::MgpuBounds Bounds = mgpu::MgpuBoundsUpper;
// MGPU_MEM(int) partitionsDevice = mgpu::MergePathPartitions<Bounds>(
int* partitionsDevice = mgpu::MergePathPartitions<Bounds>(
mgpu::counting_iterator<int>(0), result_size, leftCount,
input_size, NV, 0, mgpu::less<int>()/*, *context*/);
int numBlocks = MGPU_DIV_UP(result_size + input_size, NV);
mgpu::KernelLeftJoin<Tuning, false>
<<<numBlocks, launch.x/*, 0, context->Stream()*/>>>(result_size,
// lowerBound, leftCount, input_size, partitionsDevice->get(),
lowerBound, leftCount, input_size, partitionsDevice,
left_indices, right_indices);
check(cudaFree(partitionsDevice));
// exe_time += context->Split();
// cudaMemGetInfo(&freeMem, &totalMem);
// std::cout << freeMem << "\n";
cudaEventRecord(stop,0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&exe_time, start, stop);
cudaEventDestroy(start);
cudaEventDestroy(stop);
printf("join_main %lf\n", exe_time);
// check(cudaMemcpy(data_key, (unsigned char *)key_begin, 10,
// cudaMemcpyDeviceToHost));
//
// check(cudaMemcpy(data_value, (double *)value_begin, 80,
// cudaMemcpyDeviceToHost));
//
// for(unsigned int i = 0; i < 10; ++i)
// printf("%u %x %lf\n", i, data_key[i], data_value[i]);
}
}
|
75d14318dddd94ffdd2dfc580cd2fe9d66c6d286.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include<stdlib.h>
#include<math.h>
#include<time.h>
//#pragma comment(lib,"cutil64D.lib")
//#pragma comment(lib,"cutil64.lib")
# define N 1
typedef struct
{
float real;
float img;
}complex;
//
texture<float,2,hipReadModeElementType>texRef1;
texture<float,2,hipReadModeElementType>texRef2;
texture<unsigned,2,hipReadModeElementType>texRef3;
//
__global__ void initW(complex* W,int size_x)
{
float PI=atan((float)1)*4;
int i = blockIdx.x*blockDim.x+threadIdx.x;
if(i<size_x/2)
{
W[i].real=cos(2*PI/size_x*i);
W[i].img=-1.0*sin(2*PI/size_x*i);
}
}
//
__global__ void initW_array(complex* W,float* W_array_real,float* W_array_img,int size_x)
{
long long i = blockIdx.x*blockDim.x+threadIdx.x;
long long j = blockIdx.y; //
int l;
l = exp2f(j);
//l=1<<j;
if(i<size_x/2&&j<log((float)size_x)/log((float)2))
{W_array_real[j*size_x/2+i] = W[size_x*(i%l)/2/l].real;
W_array_img[j*size_x/2+i] = W[size_x*(i%l)/2/l].img;}
// __syncthreads();
}
//
__device__ complex ComplexMul(complex X_in,complex W_in)
{
complex X_out;
X_out.real = X_in.real*W_in.real-X_in.img*W_in.img;
X_out.img = X_in.real*W_in.img+X_in.img*W_in.real;
return X_out;
}
//
__device__ complex ComplexAdd(complex X1,complex X2)
{
complex X_out;
X_out.real = X1.real+X2.real;
X_out.img = X1.img+X2.img;
return X_out;
}
//
__device__ complex ComplexSub(complex X1,complex X2)
{
complex X_out;
X_out.real = X1.real-X2.real;
X_out.img = X1.img-X2.img;
return X_out;
}
__global__ void FFT_T(complex* DataIn,int size_x,int Ns,int stage)
{
//block
int k = blockIdx.x*blockDim.x+threadIdx.x;
int width = size_x/(2*N);
int p,q,t;
complex Wn,Xp,XqWn;
//
if( k<size_x/2)
{
p = k / Ns * Ns * 2 + k % Ns;
q = p + Ns;
t = (k/width)+stage;
Wn.real = tex2D( texRef1,k%width,t );
Wn.img = tex2D( texRef2,k%width,t );
XqWn = ComplexMul( DataIn[q],Wn);
Xp = DataIn[p];
DataIn[p] = ComplexAdd( Xp,XqWn);
DataIn[q] = ComplexSub( Xp,XqWn) ;
} //end if
} //end kernel
__global__ void FFT_T1(complex* DataIn,int size_x)
{
//block
int i = threadIdx.x;
__shared__ complex sdata[1024];
int j = blockIdx.x*blockDim.x;
int k ;
k = j + i;
int width = size_x/(2*N);
int p,q,t;
int stage = 0;
complex Wn,Xp,XqWn;
//
if( k<size_x/2)
{
sdata[i] = DataIn[i+j*2];
sdata[i+512] = DataIn[i+j*2+512];
__syncthreads();
for(int Ns = 1;Ns < 1024;Ns = Ns * 2)
{
p = i / Ns * Ns * 2 + i % Ns;
q = p + Ns;
t = (k/width)+stage;
Wn.real = tex2D( texRef1,k%width,t );
Wn.img = tex2D( texRef2,k%width,t );
stage = stage + N;
XqWn = ComplexMul( sdata[q],Wn);
Xp = sdata[p];
sdata[p] = ComplexAdd( Xp,XqWn);
sdata[q] = ComplexSub( Xp,XqWn) ;
__syncthreads();
}
DataIn[p+j*2] = sdata[p];
DataIn[q+j*2] = sdata[q];
} //end if
} //end kernel
//
__global__ void change(unsigned *trans,int size_x)
{
int i = blockIdx.x*blockDim.x+threadIdx.x;
unsigned int j=0,k=0;
unsigned int t;
if(i<size_x)
{
k=i;
j=0;
t=(log((float)size_x)/log((float)2))+0.5; //
while( (t--)>0 )
{
j=j<<1; //
j|=(k & 1); //
k=k>>1; //
}
trans[i] = j;
}//end if
}//end kernal
__global__ void change1(complex* d_idata,complex* d_idata1,int size_x)
{
int i = blockIdx.x*blockDim.x+threadIdx.x;
unsigned j;
if(i<size_x)
{
j = tex2D( texRef3,i%65536,i/65536 );
d_idata[j]=d_idata1[i];
}
}
int main()
{
int size_x = 256;
int size_x1;
complex* h_idata;
complex* h_odata;
complex* d_idata;
complex* d_idata1;
unsigned* trans;
complex* W;
float *W_array_real;
float *W_array_img;
int i=0;
int length,t;
t=(log((double)size_x)/log((double)2))+0.5;
float gpu_time = 0;
length = size_x/1025+1;
//cuda
int height = (log((double)size_x)/log((double)2))+0.5;
//cuda
int width = size_x/2;
int size = width * height * sizeof(float);
hipHostMalloc((void**)&h_odata,size_x*sizeof(complex));
hipHostMalloc((void**)&h_idata,size_x*sizeof(complex));
hipMalloc((void**)&W,size_x/2*sizeof(complex));
hipMalloc((void**)&d_idata,size_x*sizeof(complex));
hipMalloc((void**)&d_idata1,size_x*sizeof(complex));
hipMalloc((void**)&trans,size_x*sizeof(unsigned));
hipMalloc((void**)&W_array_real,size);
hipMalloc((void**)&W_array_img,size);
//CUDA
hipChannelFormatDesc channelDesc1 = hipCreateChannelDesc(32,0,0,0,hipChannelFormatKindFloat);
hipChannelFormatDesc channelDesc2 = hipCreateChannelDesc(32,0,0,0,hipChannelFormatKindFloat);
hipChannelFormatDesc channelDesc3 = hipCreateChannelDesc(32,0,0,0,hipChannelFormatKindUnsigned);
//cuda
hipArray* d_Wdata_real;
hipArray* d_Wdata_img;
hipArray* trans1;
hipMallocArray(&d_Wdata_real,&channelDesc1,width/N,height*N);
hipMallocArray(&d_Wdata_img,&channelDesc2,width/N,height*N);
hipMallocArray(&trans1,&channelDesc3,65536,size_x/65537+1);
dim3 block(length,1,1);
dim3 thread(512,1,1);
hipLaunchKernelGGL(( initW), dim3(block),dim3(thread), 0, 0, W,size_x);
dim3 block1(length,height,1);
dim3 thread1(512,1,1);
hipLaunchKernelGGL(( initW_array), dim3(block1),dim3(thread1), 0, 0, W,W_array_real,W_array_img,size_x);
dim3 blocks3(length*32,1,1);
dim3 threads3(32,1,1);
hipLaunchKernelGGL(( change), dim3(blocks3),dim3(threads3), 0, 0, trans,size_x);
hipMemcpyToArray(d_Wdata_real,0,0,W_array_real,size,hipMemcpyDeviceToDevice);
hipMemcpyToArray(d_Wdata_img,0,0,W_array_img,size,hipMemcpyDeviceToDevice);
hipMemcpyToArray(trans1,0,0,trans,size_x*sizeof(unsigned),hipMemcpyDeviceToDevice);
//cuda
hipBindTextureToArray(texRef1,d_Wdata_real,channelDesc1);
hipBindTextureToArray(texRef2,d_Wdata_img,channelDesc2);
hipBindTextureToArray(texRef3,trans1,channelDesc3);
//FFT
for(i=0;i<size_x;i++)
{
h_odata[i].real=i+1.0f;
h_odata[i].img=0.0f;
}
hipEvent_t start,stop;
hipEventCreate(&start);
hipEventCreate(&stop);
hipMemcpy(d_idata1,h_odata,size_x*sizeof(complex),hipMemcpyHostToDevice);
dim3 blocks1(length*32,1,1);
dim3 threads1(32,1,1);
hipLaunchKernelGGL(( change1), dim3(blocks1),dim3(threads1), 0, 0, d_idata,d_idata1,size_x);
hipEventRecord(start,0);
//size_x1 = 1024;
dim3 blocks4(length,1,1);
dim3 threads4(512,1,1);
hipLaunchKernelGGL(( FFT_T1), dim3(blocks4),dim3(threads4), 0, 0, d_idata,size_x);
dim3 blocks2(length,1,1);
dim3 threads2(512,1,1);
//Ns
for ( int Ns = 1024,stage = N*10; Ns<size_x; Ns = Ns * 2,stage+=N)
{
hipLaunchKernelGGL(( FFT_T), dim3(blocks2),dim3(threads2), 0, 0, d_idata,size_x,Ns,stage);
//hipDeviceSynchronize();
}
hipEventRecord(stop,0);
hipEventSynchronize(stop);
hipMemcpy(h_idata,d_idata,size_x*sizeof(complex),hipMemcpyDeviceToHost);
hipEventElapsedTime(&gpu_time,start,stop);
/*
FILE *fp = fopen("D:/cuda3.txt","w");
if(fp==0)
exit(0);
for(i = 0;i<size_x;i++)
{
fprintf(fp,"%f+%f*i\n",h_idata[i].real,h_idata[i].img);
}
fclose(fp);
*/
hipEventDestroy(start);
hipEventDestroy(stop);
// cuda
hipUnbindTexture(texRef1);
hipUnbindTexture(texRef2);
hipUnbindTexture(texRef3);
hipHostFree(h_odata);
hipHostFree(h_idata);
hipFree(W);
hipFree(d_idata);
hipFree(d_idata1);
hipFree(trans);
hipFree(W_array_real);
hipFree(W_array_img);
hipFreeArray(d_Wdata_real);
hipFreeArray(d_Wdata_img);
hipFreeArray(trans1);
//
hipDeviceReset();
printf("%f\n",gpu_time);
printf("OK\n");
getchar();
return 0;
}
| 75d14318dddd94ffdd2dfc580cd2fe9d66c6d286.cu | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include<stdlib.h>
#include<math.h>
#include<time.h>
//#pragma comment(lib,"cutil64D.lib")
//#pragma comment(lib,"cutil64.lib")
# define N 1
typedef struct
{
float real;
float img;
}complex;
//申请纹理
texture<float,2,cudaReadModeElementType>texRef1;
texture<float,2,cudaReadModeElementType>texRef2;
texture<unsigned,2,cudaReadModeElementType>texRef3;
//计算旋转因子
__global__ void initW(complex* W,int size_x)
{
float PI=atan((float)1)*4;
int i = blockIdx.x*blockDim.x+threadIdx.x;
if(i<size_x/2)
{
W[i].real=cos(2*PI/size_x*i);
W[i].img=-1.0*sin(2*PI/size_x*i);
}
}
//计算旋转因子的数组
__global__ void initW_array(complex* W,float* W_array_real,float* W_array_img,int size_x)
{
long long i = blockIdx.x*blockDim.x+threadIdx.x;
long long j = blockIdx.y; //级数
int l;
l = exp2f(j);
//l=1<<j;
if(i<size_x/2&&j<log((float)size_x)/log((float)2))
{W_array_real[j*size_x/2+i] = W[size_x*(i%l)/2/l].real;
W_array_img[j*size_x/2+i] = W[size_x*(i%l)/2/l].img;}
// __syncthreads();
}
//复数乘
__device__ complex ComplexMul(complex X_in,complex W_in)
{
complex X_out;
X_out.real = X_in.real*W_in.real-X_in.img*W_in.img;
X_out.img = X_in.real*W_in.img+X_in.img*W_in.real;
return X_out;
}
//复数加
__device__ complex ComplexAdd(complex X1,complex X2)
{
complex X_out;
X_out.real = X1.real+X2.real;
X_out.img = X1.img+X2.img;
return X_out;
}
//复数减
__device__ complex ComplexSub(complex X1,complex X2)
{
complex X_out;
X_out.real = X1.real-X2.real;
X_out.img = X1.img-X2.img;
return X_out;
}
__global__ void FFT_T(complex* DataIn,int size_x,int Ns,int stage)
{
//线程在block 中的位置
int k = blockIdx.x*blockDim.x+threadIdx.x;
int width = size_x/(2*N);
int p,q,t;
complex Wn,Xp,XqWn;
//按级进行并行的蝶形运算
if( k<size_x/2)
{
p = k / Ns * Ns * 2 + k % Ns;
q = p + Ns;
t = (k/width)+stage;
Wn.real = tex2D( texRef1,k%width,t );
Wn.img = tex2D( texRef2,k%width,t );
XqWn = ComplexMul( DataIn[q],Wn);
Xp = DataIn[p];
DataIn[p] = ComplexAdd( Xp,XqWn);
DataIn[q] = ComplexSub( Xp,XqWn) ;
} //end if
} //end kernel
__global__ void FFT_T1(complex* DataIn,int size_x)
{
//线程在block 中的位置
int i = threadIdx.x;
__shared__ complex sdata[1024];
int j = blockIdx.x*blockDim.x;
int k ;
k = j + i;
int width = size_x/(2*N);
int p,q,t;
int stage = 0;
complex Wn,Xp,XqWn;
//按级进行并行的蝶形运算
if( k<size_x/2)
{
sdata[i] = DataIn[i+j*2];
sdata[i+512] = DataIn[i+j*2+512];
__syncthreads();
for(int Ns = 1;Ns < 1024;Ns = Ns * 2)
{
p = i / Ns * Ns * 2 + i % Ns;
q = p + Ns;
t = (k/width)+stage;
Wn.real = tex2D( texRef1,k%width,t );
Wn.img = tex2D( texRef2,k%width,t );
stage = stage + N;
XqWn = ComplexMul( sdata[q],Wn);
Xp = sdata[p];
sdata[p] = ComplexAdd( Xp,XqWn);
sdata[q] = ComplexSub( Xp,XqWn) ;
__syncthreads();
}
DataIn[p+j*2] = sdata[p];
DataIn[q+j*2] = sdata[q];
} //end if
} //end kernel
//倒位序
__global__ void change(unsigned *trans,int size_x)
{
int i = blockIdx.x*blockDim.x+threadIdx.x;
unsigned int j=0,k=0;
unsigned int t;
if(i<size_x)
{
k=i;
j=0;
t=(log((float)size_x)/log((float)2))+0.5; //级数
while( (t--)>0 )
{
j=j<<1; //最后一位 左移一位
j|=(k & 1); //每次取二进制数的最后一位 其余取零
k=k>>1; //倒数第二位 右移一位
}
trans[i] = j;
}//end if
}//end kernal
__global__ void change1(complex* d_idata,complex* d_idata1,int size_x)
{
int i = blockIdx.x*blockDim.x+threadIdx.x;
unsigned j;
if(i<size_x)
{
j = tex2D( texRef3,i%65536,i/65536 );
d_idata[j]=d_idata1[i];
}
}
int main()
{
int size_x = 256;
int size_x1;
complex* h_idata;
complex* h_odata;
complex* d_idata;
complex* d_idata1;
unsigned* trans;
complex* W;
float *W_array_real;
float *W_array_img;
int i=0;
int length,t;
t=(log((double)size_x)/log((double)2))+0.5;
float gpu_time = 0;
length = size_x/1025+1;
//cuda数组高度 精度问题
int height = (log((double)size_x)/log((double)2))+0.5;
//cuda数组宽度
int width = size_x/2;
int size = width * height * sizeof(float);
cudaMallocHost((void**)&h_odata,size_x*sizeof(complex));
cudaMallocHost((void**)&h_idata,size_x*sizeof(complex));
cudaMalloc((void**)&W,size_x/2*sizeof(complex));
cudaMalloc((void**)&d_idata,size_x*sizeof(complex));
cudaMalloc((void**)&d_idata1,size_x*sizeof(complex));
cudaMalloc((void**)&trans,size_x*sizeof(unsigned));
cudaMalloc((void**)&W_array_real,size);
cudaMalloc((void**)&W_array_img,size);
//CUDA数组类型
cudaChannelFormatDesc channelDesc1 = cudaCreateChannelDesc(32,0,0,0,cudaChannelFormatKindFloat);
cudaChannelFormatDesc channelDesc2 = cudaCreateChannelDesc(32,0,0,0,cudaChannelFormatKindFloat);
cudaChannelFormatDesc channelDesc3 = cudaCreateChannelDesc(32,0,0,0,cudaChannelFormatKindUnsigned);
//申请cuda数组
cudaArray* d_Wdata_real;
cudaArray* d_Wdata_img;
cudaArray* trans1;
cudaMallocArray(&d_Wdata_real,&channelDesc1,width/N,height*N);
cudaMallocArray(&d_Wdata_img,&channelDesc2,width/N,height*N);
cudaMallocArray(&trans1,&channelDesc3,65536,size_x/65537+1);
dim3 block(length,1,1);
dim3 thread(512,1,1);
initW<<<block,thread>>>(W,size_x);
dim3 block1(length,height,1);
dim3 thread1(512,1,1);
initW_array<<<block1,thread1>>>(W,W_array_real,W_array_img,size_x);
dim3 blocks3(length*32,1,1);
dim3 threads3(32,1,1);
change<<<blocks3,threads3>>>(trans,size_x);
cudaMemcpyToArray(d_Wdata_real,0,0,W_array_real,size,cudaMemcpyDeviceToDevice);
cudaMemcpyToArray(d_Wdata_img,0,0,W_array_img,size,cudaMemcpyDeviceToDevice);
cudaMemcpyToArray(trans1,0,0,trans,size_x*sizeof(unsigned),cudaMemcpyDeviceToDevice);
//cuda数组和纹理绑定
cudaBindTextureToArray(texRef1,d_Wdata_real,channelDesc1);
cudaBindTextureToArray(texRef2,d_Wdata_img,channelDesc2);
cudaBindTextureToArray(texRef3,trans1,channelDesc3);
//需要FFT的数
for(i=0;i<size_x;i++)
{
h_odata[i].real=i+1.0f;
h_odata[i].img=0.0f;
}
cudaEvent_t start,stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaMemcpy(d_idata1,h_odata,size_x*sizeof(complex),cudaMemcpyHostToDevice);
dim3 blocks1(length*32,1,1);
dim3 threads1(32,1,1);
change1<<<blocks1,threads1>>>(d_idata,d_idata1,size_x);
cudaEventRecord(start,0);
//size_x1 = 1024;
dim3 blocks4(length,1,1);
dim3 threads4(512,1,1);
FFT_T1<<<blocks4,threads4>>>(d_idata,size_x);
dim3 blocks2(length,1,1);
dim3 threads2(512,1,1);
//Ns是级数
for ( int Ns = 1024,stage = N*10; Ns<size_x; Ns = Ns * 2,stage+=N)
{
FFT_T<<<blocks2,threads2>>>(d_idata,size_x,Ns,stage);
//cudaThreadSynchronize();
}
cudaEventRecord(stop,0);
cudaEventSynchronize(stop);
cudaMemcpy(h_idata,d_idata,size_x*sizeof(complex),cudaMemcpyDeviceToHost);
cudaEventElapsedTime(&gpu_time,start,stop);
/*
FILE *fp = fopen("D:/cuda3.txt","w");
if(fp==0)
exit(0);
for(i = 0;i<size_x;i++)
{
fprintf(fp,"%f+%f*i\n",h_idata[i].real,h_idata[i].img);
}
fclose(fp);
*/
cudaEventDestroy(start);
cudaEventDestroy(stop);
//释放内存 显存cuda数组
cudaUnbindTexture(texRef1);
cudaUnbindTexture(texRef2);
cudaUnbindTexture(texRef3);
cudaFreeHost(h_odata);
cudaFreeHost(h_idata);
cudaFree(W);
cudaFree(d_idata);
cudaFree(d_idata1);
cudaFree(trans);
cudaFree(W_array_real);
cudaFree(W_array_img);
cudaFreeArray(d_Wdata_real);
cudaFreeArray(d_Wdata_img);
cudaFreeArray(trans1);
//释放线程
cudaThreadExit();
printf("%f\n",gpu_time);
printf("OK\n");
getchar();
return 0;
}
|
ac688c2cd8e69db8855ff09cdfc2270452857221.hip | // !!! This is a file automatically generated by hipify!!!
#include <iostream>
#include <hip/hip_runtime.h>
#include <helper_cuda.h>
#include <helper_functions.h>
#include <vector>
using namespace std;
const int m_ = 64;
__global__ void distanceKernel(int* a, int m, int n, int* d)
{
__shared__ int tmp[m_][m_];
int row1 = blockIdx.x * blockDim.x;
int row2 = blockIdx.y * blockDim.y;
__syncthreads();
tmp[row1][row2] += (a[row1 * n + threadIdx.x] - a[row2 * n + threadIdx.y]) * (a[row1 * n + threadIdx.x] - a[row2 * n + threadIdx.y]);
__syncthreads();
d[row1 * m + row2] = tmp[row1][row2];
}
void distance(int* a, int m, int n, int* d)
{
dim3 threadsPerBlock(m, m);
dim3 blocksPerGrid(1, 1);
if (m > 16) {
threadsPerBlock.x = 16;
threadsPerBlock.y = 16;
blocksPerGrid.x = ceil(double(m) / double(n));
blocksPerGrid.y = ceil(double(m) / double(n));
printf("%d\n", blocksPerGrid.x);
}
cout << "blocks per grid x : " << blocksPerGrid.x << endl;
cout << "blocks per grid y : " << blocksPerGrid.x << endl;
distanceKernel << <blocksPerGrid, threadsPerBlock >> > (a, m, n, d);
{
hipError_t cudaerr = hipDeviceSynchronize();
if (cudaerr != hipSuccess)
printf("kernel launch failed with error \"%s\".\n",
hipGetErrorString(cudaerr));
}
}
int main(int argc, char* argv[])
{
int devID;
hipDeviceProp_t deviceProps;
printf("[%s] - Starting...\n", argv[0]);
// This will pick the best possible CUDA capable device
devID = findCudaDevice(argc, (const char**)argv);
// get device name
checkCudaErrors(hipGetDeviceProperties(&deviceProps, devID));
printf("CUDA device [%s]\n", deviceProps.name);
//int m = 1024;
//int n = 512;
int m = 64;
int n = 16;
int nmbytes = n * m * sizeof(int);
int mmbytes = m * m * sizeof(int);
// allocate host memory
int* a = 0;
checkCudaErrors(hipHostMalloc((void**)&a, nmbytes));
memset(a, 0, nmbytes);
for (int i = 0; i < m; i++)
{
for (int j = 0; j < n; j++)
a[i * m + j] = 6;
}
int* d = 0;
checkCudaErrors(hipHostMalloc((void**)&d, mmbytes));
memset(d, 0, mmbytes);
// allocate device memory
int* d_a = 0;
checkCudaErrors(hipMalloc((void**)&d_a, nmbytes));
checkCudaErrors(hipMemset(d_a, 255, nmbytes));
int* d_d = 0;
checkCudaErrors(hipMalloc((void**)&d_d, mmbytes));
checkCudaErrors(hipMemset(d_d, 255, mmbytes));
// set kernel launch configuration
//dim3 threads = dim3(512, 1);
//dim3 blocks = dim3(m * n / threads.x, 1);
// create cuda event handles
hipEvent_t start, stop;
checkCudaErrors(hipEventCreate(&start));
checkCudaErrors(hipEventCreate(&stop));
StopWatchInterface* timer = NULL;
sdkCreateTimer(&timer);
sdkResetTimer(&timer);
checkCudaErrors(hipDeviceSynchronize());
float gpu_time = 0.0f;
// asynchronously issue work to the GPU (all to stream 0)
sdkStartTimer(&timer);
hipEventRecord(start, 0);
hipMemcpyAsync(d_a, a, nmbytes, hipMemcpyHostToDevice, 0);
hipMemcpyAsync(d_d, d, mmbytes, hipMemcpyHostToDevice, 0);
distance(d_a, m, n, d_d);
hipMemcpyAsync(a, d_a, nmbytes, hipMemcpyDeviceToHost, 0);
hipMemcpyAsync(d, d_d, mmbytes, hipMemcpyDeviceToHost, 0);
hipEventRecord(stop, 0);
sdkStopTimer(&timer);
// have CPU do some work while waiting for stage 1 to finish
unsigned long long counter = 0;
while (hipEventQuery(stop) == hipErrorNotReady)
{
counter++;
}
checkCudaErrors(hipEventElapsedTime(&gpu_time, start, stop));
// print the cpu and gpu times
printf("time spent executing by the GPU: %.2f\n", gpu_time);
printf("time spent by CPU in CUDA calls: %.2f\n", sdkGetTimerValue(&timer));
printf("CPU executed %lu iterations while waiting for GPU to finish\n", counter);
// check the output for correctness
//bool bFinalResults = correct_output(a, n, value);
for (int i = 0; i < m; i++)
{
for (int j = 0; j < m; j++)
printf("%d ", d[i * m + j]);
printf("\n");
}
// release resources
checkCudaErrors(hipEventDestroy(start));
checkCudaErrors(hipEventDestroy(stop));
checkCudaErrors(hipHostFree(a));
checkCudaErrors(hipFree(d_a));
checkCudaErrors(hipHostFree(d));
checkCudaErrors(hipFree(d_d));
return 0;
}
| ac688c2cd8e69db8855ff09cdfc2270452857221.cu | #include <iostream>
#include <cuda_runtime.h>
#include <helper_cuda.h>
#include <helper_functions.h>
#include <vector>
using namespace std;
const int m_ = 64;
__global__ void distanceKernel(int* a, int m, int n, int* d)
{
__shared__ int tmp[m_][m_];
int row1 = blockIdx.x * blockDim.x;
int row2 = blockIdx.y * blockDim.y;
__syncthreads();
tmp[row1][row2] += (a[row1 * n + threadIdx.x] - a[row2 * n + threadIdx.y]) * (a[row1 * n + threadIdx.x] - a[row2 * n + threadIdx.y]);
__syncthreads();
d[row1 * m + row2] = tmp[row1][row2];
}
void distance(int* a, int m, int n, int* d)
{
dim3 threadsPerBlock(m, m);
dim3 blocksPerGrid(1, 1);
if (m > 16) {
threadsPerBlock.x = 16;
threadsPerBlock.y = 16;
blocksPerGrid.x = ceil(double(m) / double(n));
blocksPerGrid.y = ceil(double(m) / double(n));
printf("%d\n", blocksPerGrid.x);
}
cout << "blocks per grid x : " << blocksPerGrid.x << endl;
cout << "blocks per grid y : " << blocksPerGrid.x << endl;
distanceKernel << <blocksPerGrid, threadsPerBlock >> > (a, m, n, d);
{
cudaError_t cudaerr = cudaDeviceSynchronize();
if (cudaerr != cudaSuccess)
printf("kernel launch failed with error \"%s\".\n",
cudaGetErrorString(cudaerr));
}
}
int main(int argc, char* argv[])
{
int devID;
cudaDeviceProp deviceProps;
printf("[%s] - Starting...\n", argv[0]);
// This will pick the best possible CUDA capable device
devID = findCudaDevice(argc, (const char**)argv);
// get device name
checkCudaErrors(cudaGetDeviceProperties(&deviceProps, devID));
printf("CUDA device [%s]\n", deviceProps.name);
//int m = 1024;
//int n = 512;
int m = 64;
int n = 16;
int nmbytes = n * m * sizeof(int);
int mmbytes = m * m * sizeof(int);
// allocate host memory
int* a = 0;
checkCudaErrors(cudaMallocHost((void**)&a, nmbytes));
memset(a, 0, nmbytes);
for (int i = 0; i < m; i++)
{
for (int j = 0; j < n; j++)
a[i * m + j] = 6;
}
int* d = 0;
checkCudaErrors(cudaMallocHost((void**)&d, mmbytes));
memset(d, 0, mmbytes);
// allocate device memory
int* d_a = 0;
checkCudaErrors(cudaMalloc((void**)&d_a, nmbytes));
checkCudaErrors(cudaMemset(d_a, 255, nmbytes));
int* d_d = 0;
checkCudaErrors(cudaMalloc((void**)&d_d, mmbytes));
checkCudaErrors(cudaMemset(d_d, 255, mmbytes));
// set kernel launch configuration
//dim3 threads = dim3(512, 1);
//dim3 blocks = dim3(m * n / threads.x, 1);
// create cuda event handles
cudaEvent_t start, stop;
checkCudaErrors(cudaEventCreate(&start));
checkCudaErrors(cudaEventCreate(&stop));
StopWatchInterface* timer = NULL;
sdkCreateTimer(&timer);
sdkResetTimer(&timer);
checkCudaErrors(cudaDeviceSynchronize());
float gpu_time = 0.0f;
// asynchronously issue work to the GPU (all to stream 0)
sdkStartTimer(&timer);
cudaEventRecord(start, 0);
cudaMemcpyAsync(d_a, a, nmbytes, cudaMemcpyHostToDevice, 0);
cudaMemcpyAsync(d_d, d, mmbytes, cudaMemcpyHostToDevice, 0);
distance(d_a, m, n, d_d);
cudaMemcpyAsync(a, d_a, nmbytes, cudaMemcpyDeviceToHost, 0);
cudaMemcpyAsync(d, d_d, mmbytes, cudaMemcpyDeviceToHost, 0);
cudaEventRecord(stop, 0);
sdkStopTimer(&timer);
// have CPU do some work while waiting for stage 1 to finish
unsigned long long counter = 0;
while (cudaEventQuery(stop) == cudaErrorNotReady)
{
counter++;
}
checkCudaErrors(cudaEventElapsedTime(&gpu_time, start, stop));
// print the cpu and gpu times
printf("time spent executing by the GPU: %.2f\n", gpu_time);
printf("time spent by CPU in CUDA calls: %.2f\n", sdkGetTimerValue(&timer));
printf("CPU executed %lu iterations while waiting for GPU to finish\n", counter);
// check the output for correctness
//bool bFinalResults = correct_output(a, n, value);
for (int i = 0; i < m; i++)
{
for (int j = 0; j < m; j++)
printf("%d ", d[i * m + j]);
printf("\n");
}
// release resources
checkCudaErrors(cudaEventDestroy(start));
checkCudaErrors(cudaEventDestroy(stop));
checkCudaErrors(cudaFreeHost(a));
checkCudaErrors(cudaFree(d_a));
checkCudaErrors(cudaFreeHost(d));
checkCudaErrors(cudaFree(d_d));
return 0;
}
|
8560f4ea9b090d96af946d9b958a00397a030205.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//******************************************
// operators
// based on min-app code written by Oliver Fuhrer, MeteoSwiss
// modified by Ben Cumming, CSCS
//
// implements
// *****************************************
// Description: Contains simple operators which can be used on 3d-meshes
#include "cuda_helpers.h"
#include "data.h"
#include "operators.h"
#include "stats.h"
namespace operators {
// POD type holding information for device
struct DiffusionParams {
int nx;
int ny;
double alpha;
double dxs;
double *x_old;
double *bndN;
double *bndE;
double *bndS;
double *bndW;
};
// TODO : explain what the params variable and setup_params_on_device() do
__device__
DiffusionParams params;
void setup_params_on_device(int nx, int ny, double alpha, double dxs)
{
auto p = DiffusionParams {
nx,
ny,
alpha,
dxs,
data::x_old.device_data(),
data::bndN.device_data(),
data::bndE.device_data(),
data::bndS.device_data(),
data::bndW.device_data()
};
cuda_check_status(
hipMemcpyToSymbol(params, &p, sizeof(DiffusionParams))
);
}
namespace kernels {
__global__
void stencil_interior(double* S, const double *U)
{
// TODO : implement the interior stencil
// EXTRA : can you make it use shared memory?
// S(i,j) = -(4. + alpha) * U(i,j) // central point
// + U(i-1,j) + U(i+1,j) // east and west
// + U(i,j-1) + U(i,j+1) // north and south
// + alpha * x_old(i,j)
// + dxs * U(i,j) * (1.0 - U(i,j));
}
__global__
void stencil_east_west(double* S, const double *U) {
auto j = threadIdx.x + blockDim.x*blockIdx.x;
auto nx = params.nx;
auto ny = params.ny;
auto alpha = params.alpha;
auto dxs = params.dxs;
auto find_pos = [&nx] (size_t i, size_t j) {
return i + j * nx;
};
if(j>0 && j<ny-1) {
// EAST : i = nx-1
auto pos = find_pos(nx-1, j);
S[pos] = -(4. + alpha) * U[pos]
+ U[pos-1] + params.bndE[j]
+ U[pos-nx] + U[pos+nx]
+ alpha*params.x_old[pos]
+ dxs * U[pos] * (1.0 - U[pos]);
// TODO : do the stencil on the WEST side
// WEST : i = 0
}
}
__global__
void stencil_north_south(double* S, const double *U) {
auto i = threadIdx.x + blockDim.x*blockIdx.x;
auto nx = params.nx;
auto ny = params.ny;
auto alpha = params.alpha;
auto dxs = params.dxs;
auto find_pos = [&nx] (size_t i, size_t j) {
return i + j * nx;
};
if(i>0 && i<nx-1) {
// NORTH : j = ny -1
auto pos = find_pos(i, ny-1);
S[pos] = -(4. + alpha) * U[pos]
+ U[pos-1] + U[pos+1]
+ U[pos-nx] + params.bndN[i]
+ alpha*params.x_old[pos]
+ dxs * U[pos] * (1.0 - U[pos]);
// TODO : do the stencil on the SOUTH side
// SOUTH : j = 0
}
}
__global__
void stencil_corners(double* S, const double* U) {
auto i = threadIdx.x + blockDim.x*blockIdx.x;
auto nx = params.nx;
auto ny = params.ny;
auto alpha = params.alpha;
auto dxs = params.dxs;
auto find_pos = [&nx] (size_t i, size_t j) {
return i + j * nx;
};
// only 1 thread executes this kernel
if(i==0) {
// NORTH-EAST
auto pos = find_pos(nx-1, ny-1);
S[pos] = -(4. + alpha) * U[pos]
+ U[pos-1] + params.bndE[ny-1] // east and west
+ U[pos-nx] + params.bndN[nx-1] // north and south
+ alpha * params.x_old[pos]
+ dxs * U[pos] * (1.0 - U[pos]);
// SOUTH-EAST
pos = find_pos(nx-1, 0);
S[pos] = -(4. + alpha) * U[pos]
+ U[pos-1] + params.bndE[0] // east and west
+ params.bndS[nx-1]+ U[pos+nx] // north and south
+ alpha * params.x_old[pos]
+ dxs * U[pos] * (1.0 - U[pos]);
// SOUTH-WEST
pos = find_pos(0, 0);
S[pos] = -(4. + alpha) * U[pos]
+ params.bndW[0] + U[pos+1] // east and west
+ params.bndS[0] + U[pos+nx] // north and south
+ alpha * params.x_old[pos]
+ dxs * U[pos] * (1.0 - U[pos]);
// NORTH-WEST
pos = find_pos(0, ny-1);
S[pos] = -(4. + alpha) * U[pos]
+ params.bndW[nx-1]+ U[pos+1] // east and west
+ U[pos-nx] + params.bndN[0] // north and south
+ alpha * params.x_old[pos]
+ dxs * U[pos] * (1.0 - U[pos]);
}
}
}
void diffusion(data::Field const& U, data::Field &S)
{
using data::options;
using data::bndE;
using data::bndW;
using data::bndN;
using data::bndS;
using data::x_old;
double dxs = 1000. * (options.dx * options.dx);
double alpha = options.alpha;
int nx = options.nx;
int ny = options.ny;
static bool is_initialized = false;
if(!is_initialized) {
setup_params_on_device(nx, ny, alpha, dxs);
is_initialized = true;
}
// TODO: what is the purpose of the following?
auto calculate_grid_dim = [] (size_t n, size_t block_dim) {
return (n+block_dim-1)/block_dim;
};
// TODO: apply stencil to the interior grid points
hipDeviceSynchronize(); // TODO: remove after debugging
cuda_check_last_kernel("interior kernel"); // TODO: remove after debugging
// apply stencil at east-west boundary
auto bnd_grid_dim_y = calculate_grid_dim(ny, 64);
hipLaunchKernelGGL(( kernels::stencil_east_west), dim3(bnd_grid_dim_y), dim3(64), 0, 0, S.device_data(), U.device_data());
hipDeviceSynchronize(); // TODO: remove after debugging
cuda_check_last_kernel("east-west kernel"); // TODO: remove after debugging
// apply stencil at north-south boundary
auto bnd_grid_dim_x = calculate_grid_dim(nx, 64);
hipLaunchKernelGGL(( kernels::stencil_north_south), dim3(bnd_grid_dim_x), dim3(64), 0, 0, S.device_data(), U.device_data());
hipDeviceSynchronize(); // TODO: remove after debugging
cuda_check_last_kernel("north-south kernel"); // TODO: remove after debugging
// apply stencil at corners
hipLaunchKernelGGL(( kernels::stencil_corners), dim3(1), dim3(1), 0, 0, S.device_data(), U.device_data());
hipDeviceSynchronize(); // TODO: remove after debugging
cuda_check_last_kernel("corner kernel"); // TODO: remove after debugging
}
} // namespace operators
| 8560f4ea9b090d96af946d9b958a00397a030205.cu | //******************************************
// operators
// based on min-app code written by Oliver Fuhrer, MeteoSwiss
// modified by Ben Cumming, CSCS
//
// implements
// *****************************************
// Description: Contains simple operators which can be used on 3d-meshes
#include "cuda_helpers.h"
#include "data.h"
#include "operators.h"
#include "stats.h"
namespace operators {
// POD type holding information for device
struct DiffusionParams {
int nx;
int ny;
double alpha;
double dxs;
double *x_old;
double *bndN;
double *bndE;
double *bndS;
double *bndW;
};
// TODO : explain what the params variable and setup_params_on_device() do
__device__
DiffusionParams params;
void setup_params_on_device(int nx, int ny, double alpha, double dxs)
{
auto p = DiffusionParams {
nx,
ny,
alpha,
dxs,
data::x_old.device_data(),
data::bndN.device_data(),
data::bndE.device_data(),
data::bndS.device_data(),
data::bndW.device_data()
};
cuda_check_status(
cudaMemcpyToSymbol(params, &p, sizeof(DiffusionParams))
);
}
namespace kernels {
__global__
void stencil_interior(double* S, const double *U)
{
// TODO : implement the interior stencil
// EXTRA : can you make it use shared memory?
// S(i,j) = -(4. + alpha) * U(i,j) // central point
// + U(i-1,j) + U(i+1,j) // east and west
// + U(i,j-1) + U(i,j+1) // north and south
// + alpha * x_old(i,j)
// + dxs * U(i,j) * (1.0 - U(i,j));
}
__global__
void stencil_east_west(double* S, const double *U) {
auto j = threadIdx.x + blockDim.x*blockIdx.x;
auto nx = params.nx;
auto ny = params.ny;
auto alpha = params.alpha;
auto dxs = params.dxs;
auto find_pos = [&nx] (size_t i, size_t j) {
return i + j * nx;
};
if(j>0 && j<ny-1) {
// EAST : i = nx-1
auto pos = find_pos(nx-1, j);
S[pos] = -(4. + alpha) * U[pos]
+ U[pos-1] + params.bndE[j]
+ U[pos-nx] + U[pos+nx]
+ alpha*params.x_old[pos]
+ dxs * U[pos] * (1.0 - U[pos]);
// TODO : do the stencil on the WEST side
// WEST : i = 0
}
}
__global__
void stencil_north_south(double* S, const double *U) {
auto i = threadIdx.x + blockDim.x*blockIdx.x;
auto nx = params.nx;
auto ny = params.ny;
auto alpha = params.alpha;
auto dxs = params.dxs;
auto find_pos = [&nx] (size_t i, size_t j) {
return i + j * nx;
};
if(i>0 && i<nx-1) {
// NORTH : j = ny -1
auto pos = find_pos(i, ny-1);
S[pos] = -(4. + alpha) * U[pos]
+ U[pos-1] + U[pos+1]
+ U[pos-nx] + params.bndN[i]
+ alpha*params.x_old[pos]
+ dxs * U[pos] * (1.0 - U[pos]);
// TODO : do the stencil on the SOUTH side
// SOUTH : j = 0
}
}
__global__
void stencil_corners(double* S, const double* U) {
auto i = threadIdx.x + blockDim.x*blockIdx.x;
auto nx = params.nx;
auto ny = params.ny;
auto alpha = params.alpha;
auto dxs = params.dxs;
auto find_pos = [&nx] (size_t i, size_t j) {
return i + j * nx;
};
// only 1 thread executes this kernel
if(i==0) {
// NORTH-EAST
auto pos = find_pos(nx-1, ny-1);
S[pos] = -(4. + alpha) * U[pos]
+ U[pos-1] + params.bndE[ny-1] // east and west
+ U[pos-nx] + params.bndN[nx-1] // north and south
+ alpha * params.x_old[pos]
+ dxs * U[pos] * (1.0 - U[pos]);
// SOUTH-EAST
pos = find_pos(nx-1, 0);
S[pos] = -(4. + alpha) * U[pos]
+ U[pos-1] + params.bndE[0] // east and west
+ params.bndS[nx-1]+ U[pos+nx] // north and south
+ alpha * params.x_old[pos]
+ dxs * U[pos] * (1.0 - U[pos]);
// SOUTH-WEST
pos = find_pos(0, 0);
S[pos] = -(4. + alpha) * U[pos]
+ params.bndW[0] + U[pos+1] // east and west
+ params.bndS[0] + U[pos+nx] // north and south
+ alpha * params.x_old[pos]
+ dxs * U[pos] * (1.0 - U[pos]);
// NORTH-WEST
pos = find_pos(0, ny-1);
S[pos] = -(4. + alpha) * U[pos]
+ params.bndW[nx-1]+ U[pos+1] // east and west
+ U[pos-nx] + params.bndN[0] // north and south
+ alpha * params.x_old[pos]
+ dxs * U[pos] * (1.0 - U[pos]);
}
}
}
void diffusion(data::Field const& U, data::Field &S)
{
using data::options;
using data::bndE;
using data::bndW;
using data::bndN;
using data::bndS;
using data::x_old;
double dxs = 1000. * (options.dx * options.dx);
double alpha = options.alpha;
int nx = options.nx;
int ny = options.ny;
static bool is_initialized = false;
if(!is_initialized) {
setup_params_on_device(nx, ny, alpha, dxs);
is_initialized = true;
}
// TODO: what is the purpose of the following?
auto calculate_grid_dim = [] (size_t n, size_t block_dim) {
return (n+block_dim-1)/block_dim;
};
// TODO: apply stencil to the interior grid points
cudaDeviceSynchronize(); // TODO: remove after debugging
cuda_check_last_kernel("interior kernel"); // TODO: remove after debugging
// apply stencil at east-west boundary
auto bnd_grid_dim_y = calculate_grid_dim(ny, 64);
kernels::stencil_east_west<<<bnd_grid_dim_y, 64>>>(S.device_data(), U.device_data());
cudaDeviceSynchronize(); // TODO: remove after debugging
cuda_check_last_kernel("east-west kernel"); // TODO: remove after debugging
// apply stencil at north-south boundary
auto bnd_grid_dim_x = calculate_grid_dim(nx, 64);
kernels::stencil_north_south<<<bnd_grid_dim_x, 64>>>(S.device_data(), U.device_data());
cudaDeviceSynchronize(); // TODO: remove after debugging
cuda_check_last_kernel("north-south kernel"); // TODO: remove after debugging
// apply stencil at corners
kernels::stencil_corners<<<1, 1>>>(S.device_data(), U.device_data());
cudaDeviceSynchronize(); // TODO: remove after debugging
cuda_check_last_kernel("corner kernel"); // TODO: remove after debugging
}
} // namespace operators
|
41a90b3345e21a5569a8f3dfd84109eb246d7a02.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdlib.h>
#include <stdio.h>
#include <stdarg.h>
#include <math.h>
#include <hdf5.h>
__host__ void updateTimer(time_t t0, int tstep, char str[]) {
int elapsedTime=(int)(time(0)-t0);
sprintf(str, "%02d:%02d:%02d", elapsedTime/3600, elapsedTime%3600/60, elapsedTime%60);
}
__host__ void exec(char *format, ...) {
char str[1024];
va_list ap;
va_start(ap, format);
vsprintf(str, format, ap);
system(str);
}
__host__ void dumpToH5(int Ni, int Nj, int Nk, int is, int js, int ks, int ie, int je, int ke, float ***f, char *format, ...) {
char filename[1024];
va_list ap;
va_start(ap, format);
vsprintf(filename, format, ap);
hid_t file, dataset, filespace, memspace;
hsize_t dimsm[3] = { Ni, Nj, Nk };
hsize_t start[3] = { is, js, ks };
hsize_t count[3] = { 1-is+ie, 1-js+je, 1-ks+ke };
memspace = H5Screate_simple(3, dimsm, 0);
filespace = H5Screate_simple(3, count, 0);
file = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT);
dataset = H5Dcreate(file, "Data", H5T_NATIVE_FLOAT, filespace, H5P_DEFAULT);
H5Sselect_hyperslab(memspace, H5S_SELECT_SET, start, 0, count, 0);
H5Dwrite(dataset, H5T_NATIVE_FLOAT, memspace, filespace, H5P_DEFAULT, f[0][0]);
H5Dclose(dataset);
H5Sclose(filespace);
H5Sclose(memspace);
H5Fclose(file);
}
__host__ void print_array(int Nx, int Ny, int Nz, float ***a) {
int j,k;
for (j=0; j<Ny; j++) {
for (k=0; k<Nz; k++) {
printf("%1.4f\t", a[Nx/2][j][k]);
}
printf("\n");
}
printf("\n");
}
__host__ float ***makeArray(int Nx, int Ny, int Nz) {
float ***f;
f = (float ***) calloc (Nx, sizeof(float **));
f[0] = (float **) calloc (Ny*Nx, sizeof(float *));
f[0][0] = (float *) calloc (Nz*Ny*Nx, sizeof(float));
for (int i=0; i<Nx; i++) f[i] = f[0] + i*Ny;
for (int i=0; i<Ny*Nx; i++) f[0][i] = f[0][0] + i*Nz;
return f;
}
__host__ void set_geometry(int Nx, int Ny, int Nz,
float ***CEx, float ***CEy, float ***CEz) {
int i,j,k;
for (i=0; i<Nx; i++) {
for (j=0; j<Ny; j++) {
for (k=0; k<Nz; k++) {
CEx[i][j][k] = 0.5;
CEy[i][j][k] = 0.5;
CEz[i][j][k] = 0.5;
}
}
}
}
__global__ void initArrays(int Nx, int Ny, int Nz, int Nzpit,
float *Ex, float *Ey, float *Ez,
float *Hx, float *Hy, float *Hz) {
int idx;
idx = blockIdx.x*blockDim.x + threadIdx.x;
//printf("gridDim.x=%d\n",gridDim.x);
//printf("blockIdx.x=%d, blockDim.x=%d, threadIdx.x=%d\n", blockIdx.x, blockDim.x, threadIdx.x);
if ( idx < Nx*Ny*Nzpit ) {
Ex[idx] = 0;
Ey[idx] = 0;
Ez[idx] = 0;
Hx[idx] = 0;
Hy[idx] = 0;
Hz[idx] = 0;
}
}
__global__ void updateE(int Nx, int Ny, int Nz, int Nzpit,
float *Ex, float *Ey, float *Ez,
float *Hx, float *Hy, float *Hz,
float *CEx, float *CEy, float *CEz) {
int idx;
idx = blockIdx.x*blockDim.x + threadIdx.x;
//printf("gridDim.x=%d\n",gridDim.x);
//printf("blockIdx.x=%d, blockDim.x=%d, threadIdx.x=%d\n", blockIdx.x, blockDim.x, threadIdx.x);
if ( idx < Nx*Ny*Nzpit ) {
int i,j,k;
int Nyz = Ny*Nzpit;
i = idx/Nyz;
j = ( idx - i*Nyz )/Nzpit;
k = idx - i*Nyz - j*Nzpit;
//printf("[%d](%d,%d,%d)\n",idx,i,j,k);
__shared__ float hx[500], hy[500], hz[500];
if ( k < Nz ) {
//printf("k=%d\n",k);
hx[k] = Hx[idx];
hy[k] = Hy[idx];
hz[k] = Hz[idx];
}
__syncthreads();
if ( k < Nz ) {
if ( j<Ny-1 && k<Nz-1 ) Ex[idx] += CEx[idx]*( Hz[idx+Nzpit] - hz[k] - hy[k+1] + hy[k] );
if ( i<Nx-1 && k<Nz-1 ) Ey[idx] += CEy[idx]*( hx[k+1] - hx[k] - Hz[idx+Nyz] + hz[k] );
if ( i<Nx-1 && j<Ny-1 ) Ez[idx] += CEz[idx]*( Hy[idx+Nyz] - hy[k] - Hx[idx+Nzpit] + hx[k] );
}
}
}
__global__ void updateSrc(int Nx, int Ny, int Nz, int Nzpit,
float *Ex, int tstep) {
int idx, ijk;
idx = blockIdx.x*blockDim.x + threadIdx.x;
ijk = idx*(Ny)*(Nzpit) + (Ny/2)*(Nzpit) + (Nz/2);
//printf("idx=%d, ijk=%d\n", idx, ijk);
//Ex[ijk] += __sinf(0.1*tstep);
if ( idx < Nx ) {
Ex[ijk] += sin(0.1*tstep);
}
}
__global__ void updateH(int Nx, int Ny, int Nz, int Nzpit,
float *Ex, float *Ey, float *Ez,
float *Hx, float *Hy, float *Hz) {
int idx;
idx = blockIdx.x*blockDim.x + threadIdx.x;
if ( idx < Nx*Ny*Nzpit ) {
int i,j,k;
int Nyz = Ny*Nzpit;
i = idx/Nyz;
j = ( idx - i*Nyz )/Nzpit;
k = idx - i*Nyz - j*Nzpit;
__shared__ float ex[500], ey[500], ez[500];
if ( k < Nz ) {
ex[k] = Ex[idx];
ey[k] = Ey[idx];
ez[k] = Ez[idx];
}
__syncthreads();
if ( k < Nz ) {
if ( j>0 && k>0 ) Hx[idx] -= 0.5*( ez[k] - Ez[idx-Nzpit] - ey[k] + ey[k-1] );
if ( i>0 && k>0 ) Hy[idx] -= 0.5*( ex[k] - ex[k-1] - ez[k] + Ez[idx-Nyz] );
if ( i>0 && j>0 ) Hz[idx] -= 0.5*( ey[k] - Ey[idx-Nyz] - ex[k] + Ex[idx-Nzpit] );
}
}
}
int main() {
int tstep;
char time_str[32];
time_t t0;
// Set the parameters
int Nx, Ny, Nz, TMAX;
Nx = 100;
Ny = 200;
Nz = 500;
TMAX = 1000;
// Allocate host memory
//float ***Ex;
float ***CEx, ***CEy, ***CEz;
//Ex = makeArray(Nx, Ny, Nz);
CEx = makeArray(Nx, Ny, Nz);
CEy = makeArray(Nx, Ny, Nz);
CEz = makeArray(Nx, Ny, Nz);
// Geometry
set_geometry(Nx, Ny, Nz, CEx, CEy, CEz);
// Allocate device memory
float *devEx, *devEy, *devEz;
float *devHx, *devHy, *devHz;
float *devCEx, *devCEy, *devCEz;
int z_size = Nz*sizeof(float);
size_t pitch;
hipMallocPitch ( (void**) &devEx, &pitch, z_size, Nx*Ny );
hipMallocPitch ( (void**) &devEy, &pitch, z_size, Nx*Ny );
hipMallocPitch ( (void**) &devEz, &pitch, z_size, Nx*Ny );
hipMallocPitch ( (void**) &devCEx, &pitch, z_size, Nx*Ny );
hipMallocPitch ( (void**) &devCEy, &pitch, z_size, Nx*Ny );
hipMallocPitch ( (void**) &devCEz, &pitch, z_size, Nx*Ny );
hipMallocPitch ( (void**) &devHx, &pitch, z_size, Nx*Ny );
hipMallocPitch ( (void**) &devHy, &pitch, z_size, Nx*Ny );
hipMallocPitch ( (void**) &devHz, &pitch, z_size, Nx*Ny );
// Copy arrays from host to device
hipMemcpy2D ( devCEx, pitch, CEx[0][0], z_size, z_size, Nx*Ny, hipMemcpyHostToDevice );
hipMemcpy2D ( devCEy, pitch, CEy[0][0], z_size, z_size, Nx*Ny, hipMemcpyHostToDevice );
hipMemcpy2D ( devCEz, pitch, CEz[0][0], z_size, z_size, Nx*Ny, hipMemcpyHostToDevice );
int Nz_pitch = pitch/4;
printf("pitch= %u, Nz_pitch= %d\n", pitch, Nz_pitch);
// Number of thread blocks in the grid
int N = Nx*Ny*Nz_pitch;
int TPB = Nz_pitch;
int BPG = N%TPB == 0 ? N/TPB : N/TPB + 1;
printf("TPB=%d, BPG=%d\n", TPB, BPG);
dim3 gridDim(BPG);
// Number of threads per block
dim3 blockDim(TPB);
//int BPGsrc = Nx%TPB == 0 ? Nx/TPB : Nx/TPB + 1;
int BPGsrc = 1;
dim3 gridDimsrc(BPGsrc);
dim3 blockDimsrc(Nx);
// Initialize the device arrays
hipLaunchKernelGGL(( initArrays) , dim3(gridDim),dim3(blockDim), 0, 0, Nx, Ny, Nz, Nz_pitch, devEx, devEy, devEz, devHx, devHy, devHz );
// Main time loop
t0 = time(0);
//for ( tstep=1; tstep<=TMAX; tstep++) {
for ( tstep=1; tstep<=10; tstep++) {
// Update on the GPU
hipLaunchKernelGGL(( updateE) , dim3(gridDim),dim3(blockDim), 0, 0, Nx, Ny, Nz, Nz_pitch, devEx, devEy, devEz, devHx, devHy, devHz, devCEx, devCEy, devCEz );
//updateSrc <<<gridDimsrc,blockDimsrc>>> (Nx, Ny, Nz, Nz_pitch, devEx, tstep);
//updateH <<<gridDim,blockDim>>> (Nx, Ny, Nz, Nz_pitch, devEx, devEy, devEz, devHx, devHy, devHz);
/*
//if ( tstep/10*10 == tstep ) {
// Copy arrays from device to host
hipMemcpy2D( Ex[0][0], z_size, devEx, pitch, z_size, Nx*Ny, hipMemcpyDeviceToHost );
//print_array(Nx, Ny, Nz, Ex);
dumpToH5(Nx, Ny, Nz, Nx/2, 0, 0, Nx/2, Ny-1, Nz-1, Ex, "gpu_png/Ex-%05d.h5", tstep);
exec("h5topng -ZM0.1 -x0 -S4 -c /usr/share/h5utils/colormaps/dkbluered gpu_png/Ex-%05d.h5", tstep);
updateTimer(t0, tstep, time_str);
printf("tstep=%d\t%s\n", tstep, time_str);
//}
*/
}
updateTimer(t0, tstep, time_str);
printf("tstep=%d\t%s\n", tstep, time_str);
}
| 41a90b3345e21a5569a8f3dfd84109eb246d7a02.cu | #include <stdlib.h>
#include <stdio.h>
#include <stdarg.h>
#include <math.h>
#include <hdf5.h>
__host__ void updateTimer(time_t t0, int tstep, char str[]) {
int elapsedTime=(int)(time(0)-t0);
sprintf(str, "%02d:%02d:%02d", elapsedTime/3600, elapsedTime%3600/60, elapsedTime%60);
}
__host__ void exec(char *format, ...) {
char str[1024];
va_list ap;
va_start(ap, format);
vsprintf(str, format, ap);
system(str);
}
__host__ void dumpToH5(int Ni, int Nj, int Nk, int is, int js, int ks, int ie, int je, int ke, float ***f, char *format, ...) {
char filename[1024];
va_list ap;
va_start(ap, format);
vsprintf(filename, format, ap);
hid_t file, dataset, filespace, memspace;
hsize_t dimsm[3] = { Ni, Nj, Nk };
hsize_t start[3] = { is, js, ks };
hsize_t count[3] = { 1-is+ie, 1-js+je, 1-ks+ke };
memspace = H5Screate_simple(3, dimsm, 0);
filespace = H5Screate_simple(3, count, 0);
file = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT);
dataset = H5Dcreate(file, "Data", H5T_NATIVE_FLOAT, filespace, H5P_DEFAULT);
H5Sselect_hyperslab(memspace, H5S_SELECT_SET, start, 0, count, 0);
H5Dwrite(dataset, H5T_NATIVE_FLOAT, memspace, filespace, H5P_DEFAULT, f[0][0]);
H5Dclose(dataset);
H5Sclose(filespace);
H5Sclose(memspace);
H5Fclose(file);
}
__host__ void print_array(int Nx, int Ny, int Nz, float ***a) {
int j,k;
for (j=0; j<Ny; j++) {
for (k=0; k<Nz; k++) {
printf("%1.4f\t", a[Nx/2][j][k]);
}
printf("\n");
}
printf("\n");
}
__host__ float ***makeArray(int Nx, int Ny, int Nz) {
float ***f;
f = (float ***) calloc (Nx, sizeof(float **));
f[0] = (float **) calloc (Ny*Nx, sizeof(float *));
f[0][0] = (float *) calloc (Nz*Ny*Nx, sizeof(float));
for (int i=0; i<Nx; i++) f[i] = f[0] + i*Ny;
for (int i=0; i<Ny*Nx; i++) f[0][i] = f[0][0] + i*Nz;
return f;
}
__host__ void set_geometry(int Nx, int Ny, int Nz,
float ***CEx, float ***CEy, float ***CEz) {
int i,j,k;
for (i=0; i<Nx; i++) {
for (j=0; j<Ny; j++) {
for (k=0; k<Nz; k++) {
CEx[i][j][k] = 0.5;
CEy[i][j][k] = 0.5;
CEz[i][j][k] = 0.5;
}
}
}
}
__global__ void initArrays(int Nx, int Ny, int Nz, int Nzpit,
float *Ex, float *Ey, float *Ez,
float *Hx, float *Hy, float *Hz) {
int idx;
idx = blockIdx.x*blockDim.x + threadIdx.x;
//printf("gridDim.x=%d\n",gridDim.x);
//printf("blockIdx.x=%d, blockDim.x=%d, threadIdx.x=%d\n", blockIdx.x, blockDim.x, threadIdx.x);
if ( idx < Nx*Ny*Nzpit ) {
Ex[idx] = 0;
Ey[idx] = 0;
Ez[idx] = 0;
Hx[idx] = 0;
Hy[idx] = 0;
Hz[idx] = 0;
}
}
__global__ void updateE(int Nx, int Ny, int Nz, int Nzpit,
float *Ex, float *Ey, float *Ez,
float *Hx, float *Hy, float *Hz,
float *CEx, float *CEy, float *CEz) {
int idx;
idx = blockIdx.x*blockDim.x + threadIdx.x;
//printf("gridDim.x=%d\n",gridDim.x);
//printf("blockIdx.x=%d, blockDim.x=%d, threadIdx.x=%d\n", blockIdx.x, blockDim.x, threadIdx.x);
if ( idx < Nx*Ny*Nzpit ) {
int i,j,k;
int Nyz = Ny*Nzpit;
i = idx/Nyz;
j = ( idx - i*Nyz )/Nzpit;
k = idx - i*Nyz - j*Nzpit;
//printf("[%d](%d,%d,%d)\n",idx,i,j,k);
__shared__ float hx[500], hy[500], hz[500];
if ( k < Nz ) {
//printf("k=%d\n",k);
hx[k] = Hx[idx];
hy[k] = Hy[idx];
hz[k] = Hz[idx];
}
__syncthreads();
if ( k < Nz ) {
if ( j<Ny-1 && k<Nz-1 ) Ex[idx] += CEx[idx]*( Hz[idx+Nzpit] - hz[k] - hy[k+1] + hy[k] );
if ( i<Nx-1 && k<Nz-1 ) Ey[idx] += CEy[idx]*( hx[k+1] - hx[k] - Hz[idx+Nyz] + hz[k] );
if ( i<Nx-1 && j<Ny-1 ) Ez[idx] += CEz[idx]*( Hy[idx+Nyz] - hy[k] - Hx[idx+Nzpit] + hx[k] );
}
}
}
__global__ void updateSrc(int Nx, int Ny, int Nz, int Nzpit,
float *Ex, int tstep) {
int idx, ijk;
idx = blockIdx.x*blockDim.x + threadIdx.x;
ijk = idx*(Ny)*(Nzpit) + (Ny/2)*(Nzpit) + (Nz/2);
//printf("idx=%d, ijk=%d\n", idx, ijk);
//Ex[ijk] += __sinf(0.1*tstep);
if ( idx < Nx ) {
Ex[ijk] += sin(0.1*tstep);
}
}
__global__ void updateH(int Nx, int Ny, int Nz, int Nzpit,
float *Ex, float *Ey, float *Ez,
float *Hx, float *Hy, float *Hz) {
int idx;
idx = blockIdx.x*blockDim.x + threadIdx.x;
if ( idx < Nx*Ny*Nzpit ) {
int i,j,k;
int Nyz = Ny*Nzpit;
i = idx/Nyz;
j = ( idx - i*Nyz )/Nzpit;
k = idx - i*Nyz - j*Nzpit;
__shared__ float ex[500], ey[500], ez[500];
if ( k < Nz ) {
ex[k] = Ex[idx];
ey[k] = Ey[idx];
ez[k] = Ez[idx];
}
__syncthreads();
if ( k < Nz ) {
if ( j>0 && k>0 ) Hx[idx] -= 0.5*( ez[k] - Ez[idx-Nzpit] - ey[k] + ey[k-1] );
if ( i>0 && k>0 ) Hy[idx] -= 0.5*( ex[k] - ex[k-1] - ez[k] + Ez[idx-Nyz] );
if ( i>0 && j>0 ) Hz[idx] -= 0.5*( ey[k] - Ey[idx-Nyz] - ex[k] + Ex[idx-Nzpit] );
}
}
}
int main() {
int tstep;
char time_str[32];
time_t t0;
// Set the parameters
int Nx, Ny, Nz, TMAX;
Nx = 100;
Ny = 200;
Nz = 500;
TMAX = 1000;
// Allocate host memory
//float ***Ex;
float ***CEx, ***CEy, ***CEz;
//Ex = makeArray(Nx, Ny, Nz);
CEx = makeArray(Nx, Ny, Nz);
CEy = makeArray(Nx, Ny, Nz);
CEz = makeArray(Nx, Ny, Nz);
// Geometry
set_geometry(Nx, Ny, Nz, CEx, CEy, CEz);
// Allocate device memory
float *devEx, *devEy, *devEz;
float *devHx, *devHy, *devHz;
float *devCEx, *devCEy, *devCEz;
int z_size = Nz*sizeof(float);
size_t pitch;
cudaMallocPitch ( (void**) &devEx, &pitch, z_size, Nx*Ny );
cudaMallocPitch ( (void**) &devEy, &pitch, z_size, Nx*Ny );
cudaMallocPitch ( (void**) &devEz, &pitch, z_size, Nx*Ny );
cudaMallocPitch ( (void**) &devCEx, &pitch, z_size, Nx*Ny );
cudaMallocPitch ( (void**) &devCEy, &pitch, z_size, Nx*Ny );
cudaMallocPitch ( (void**) &devCEz, &pitch, z_size, Nx*Ny );
cudaMallocPitch ( (void**) &devHx, &pitch, z_size, Nx*Ny );
cudaMallocPitch ( (void**) &devHy, &pitch, z_size, Nx*Ny );
cudaMallocPitch ( (void**) &devHz, &pitch, z_size, Nx*Ny );
// Copy arrays from host to device
cudaMemcpy2D ( devCEx, pitch, CEx[0][0], z_size, z_size, Nx*Ny, cudaMemcpyHostToDevice );
cudaMemcpy2D ( devCEy, pitch, CEy[0][0], z_size, z_size, Nx*Ny, cudaMemcpyHostToDevice );
cudaMemcpy2D ( devCEz, pitch, CEz[0][0], z_size, z_size, Nx*Ny, cudaMemcpyHostToDevice );
int Nz_pitch = pitch/4;
printf("pitch= %u, Nz_pitch= %d\n", pitch, Nz_pitch);
// Number of thread blocks in the grid
int N = Nx*Ny*Nz_pitch;
int TPB = Nz_pitch;
int BPG = N%TPB == 0 ? N/TPB : N/TPB + 1;
printf("TPB=%d, BPG=%d\n", TPB, BPG);
dim3 gridDim(BPG);
// Number of threads per block
dim3 blockDim(TPB);
//int BPGsrc = Nx%TPB == 0 ? Nx/TPB : Nx/TPB + 1;
int BPGsrc = 1;
dim3 gridDimsrc(BPGsrc);
dim3 blockDimsrc(Nx);
// Initialize the device arrays
initArrays <<<gridDim,blockDim>>> ( Nx, Ny, Nz, Nz_pitch, devEx, devEy, devEz, devHx, devHy, devHz );
// Main time loop
t0 = time(0);
//for ( tstep=1; tstep<=TMAX; tstep++) {
for ( tstep=1; tstep<=10; tstep++) {
// Update on the GPU
updateE <<<gridDim,blockDim>>> ( Nx, Ny, Nz, Nz_pitch, devEx, devEy, devEz, devHx, devHy, devHz, devCEx, devCEy, devCEz );
//updateSrc <<<gridDimsrc,blockDimsrc>>> (Nx, Ny, Nz, Nz_pitch, devEx, tstep);
//updateH <<<gridDim,blockDim>>> (Nx, Ny, Nz, Nz_pitch, devEx, devEy, devEz, devHx, devHy, devHz);
/*
//if ( tstep/10*10 == tstep ) {
// Copy arrays from device to host
cudaMemcpy2D( Ex[0][0], z_size, devEx, pitch, z_size, Nx*Ny, cudaMemcpyDeviceToHost );
//print_array(Nx, Ny, Nz, Ex);
dumpToH5(Nx, Ny, Nz, Nx/2, 0, 0, Nx/2, Ny-1, Nz-1, Ex, "gpu_png/Ex-%05d.h5", tstep);
exec("h5topng -ZM0.1 -x0 -S4 -c /usr/share/h5utils/colormaps/dkbluered gpu_png/Ex-%05d.h5", tstep);
updateTimer(t0, tstep, time_str);
printf("tstep=%d\t%s\n", tstep, time_str);
//}
*/
}
updateTimer(t0, tstep, time_str);
printf("tstep=%d\t%s\n", tstep, time_str);
}
|
7447c48ad72a97c558f30f3fdb6fc3cc024b3c3e.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void kernelGetPhi2(const int N, double *T, double *q)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < N)
{
T[i] = q[i] * q[i];
}
} | 7447c48ad72a97c558f30f3fdb6fc3cc024b3c3e.cu | #include "includes.h"
__global__ void kernelGetPhi2(const int N, double *T, double *q)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < N)
{
T[i] = q[i] * q[i];
}
} |
ad2ca47ee64bd33b9c095d9f91cfbea84bc5c974.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdlib.h>
#include <cutil.h>
#include <math.h>
// Includes
#include <stdio.h>
#include "../include/ContAcq-IntClk.h"
// includes, project
#include "../include/sdkHelper.h" // helper for shared functions common to CUDA SDK samples
//#include <shrQATest.h>
//#include <shrUtils.h>
// includes CUDA
#include <hip/hip_runtime.h>
#define THREADS_PER_BLOCK 256
#define NUM_OF_BLOCKS 15
#define F 1
#define ITERATIONS (unsigned)( 10000 )
#define ITERATIONS2 REPLACE_ITERATIONS
#define max_tid THREADS_PER_BLOCK*NUM_OF_BLOCKS
#define LINE_SIZE 128
#define SETS 64
#define ASSOC 6
#define SIMD_WIDTH 32
// Variables
int* h_A;
int* h_B;
int* h_C;
int* d_A;
int* d_B;
int* d_C;
bool noprompt = false;
unsigned int my_timer;
// Functions
void CleanupResources(void);
void RandomInit(int*, int);
void ParseArguments(int, char**);
////////////////////////////////////////////////////////////////////////////////
// These are CUDA Helper functions
// This will output the proper CUDA error strings in the event that a CUDA host call returns an error
#define checkCudaErrors(err) __checkCudaErrors (err, __FILE__, __LINE__)
inline void __checkCudaErrors(hipError_t err, const char *file, const int line ){
if(hipSuccess != err){
fprintf(stderr, "%s(%i) : CUDA Runtime API error %d: %s.\n",file, line, (int)err, hipGetErrorString( err ) );
exit(-1);
}
}
// This will output the proper error string when calling hipGetLastError
#define getLastCudaError(msg) __getLastCudaError (msg, __FILE__, __LINE__)
inline void __getLastCudaError(const char *errorMessage, const char *file, const int line ){
hipError_t err = hipGetLastError();
if (hipSuccess != err){
fprintf(stderr, "%s(%i) : getLastCudaError() CUDA error : %s : (%d) %s.\n",file, line, errorMessage, (int)err, hipGetErrorString( err ) );
exit(-1);
}
}
// end of CUDA Helper Functions
// Device code
__global__ void PowerKernal(int* A, int* C, int N){
int tid = blockDim.x * blockIdx.x + threadIdx.x;
//Do Some Computation
//int size = (LINE_SIZE*ASSOC*SETS)/sizeof(int);
//unsigned j=0, k=0;
int m_sum=N;
// m_sum = A[tid*F];
for(unsigned j=0; j<ITERATIONS2; j++){
for(unsigned k=0; k<ITERATIONS; ++k){
C[((unsigned)(tid*F)+(unsigned)(k*max_tid*F))]=m_sum;
}
m_sum+=j;
}
__syncthreads();
}
// Host code
int main(){
printf("Power Microbenchmarks\n");
//int N = LINE_SIZE*SETS*ASSOC;
unsigned N =((unsigned)(max_tid*F)+(unsigned)(ITERATIONS*max_tid*F));
size_t size = N * sizeof(int);
// Allocate input vectors h_A and h_B in host memory
h_A = (int*)malloc(size);
if (h_A == 0) CleanupResources();
//h_B = (float*)malloc(size);
//if (h_B == 0) CleanupResources();
h_C = (int*)malloc(size);
if (h_C == 0) CleanupResources();
// Initialize input vectors
RandomInit(h_A, N);
//RandomInit(h_B, N);
// Allocate vectors in device memory
checkCudaErrors( hipMalloc((void**)&d_A, size) );
//checkCudaErrors( hipMalloc((void**)&d_B, size) );
checkCudaErrors( hipMalloc((void**)&d_C, size) );
// Copy vectors from host memory to device memory
checkCudaErrors( hipMemcpy(d_A, h_A, size, hipMemcpyHostToDevice) );
//checkCudaErrors( hipMemcpy(d_B, h_B, size, hipMemcpyHostToDevice) );
//VecAdd<<<blocksPerGrid, threadsPerBlock>>>(d_A, d_B, d_C, N);
dim3 dimGrid(NUM_OF_BLOCKS,1);
dim3 dimBlock(THREADS_PER_BLOCK,1);
CUT_SAFE_CALL(cutCreateTimer(&my_timer));
TaskHandle taskhandle = LaunchDAQ();
CUT_SAFE_CALL(cutStartTimer(my_timer));
hipLaunchKernelGGL(( PowerKernal), dim3(dimGrid),dim3(dimBlock), 0, 0, d_A, d_C, N);
CUDA_SAFE_CALL( hipDeviceSynchronize() );
printf("execution time = %f\n", cutGetTimerValue(my_timer));
TurnOffDAQ(taskhandle, cutGetTimerValue(my_timer));
CUT_SAFE_CALL(cutStopTimer(my_timer));
CUT_SAFE_CALL(cutDeleteTimer(my_timer));
getLastCudaError("kernel launch failure");
#ifdef _DEBUG
checkCudaErrors( hipDeviceSynchronize() );
#endif
// Copy result from device memory to host memory
// h_C contains the result in host memory
checkCudaErrors( hipMemcpy(h_C, d_C, size, hipMemcpyDeviceToHost) );
CleanupResources();
return 0;
}
void CleanupResources(void){
// Free device memory
if (d_A)
hipFree(d_A);
//if (d_B)
// hipFree(d_B);
if (d_C)
hipFree(d_C);
// Free host memory
if (h_A)
free(h_A);
// if (h_B)
// free(h_B);
if (h_C)
free(h_C);
}
// Allocates an array with random float entries.
void RandomInit(int* data, int n){
for (int i = 0; i < n; ++i)
data[i] = (int)(rand() / RAND_MAX);
}
| ad2ca47ee64bd33b9c095d9f91cfbea84bc5c974.cu | #include <stdio.h>
#include <stdlib.h>
#include <cutil.h>
#include <math.h>
// Includes
#include <stdio.h>
#include "../include/ContAcq-IntClk.h"
// includes, project
#include "../include/sdkHelper.h" // helper for shared functions common to CUDA SDK samples
//#include <shrQATest.h>
//#include <shrUtils.h>
// includes CUDA
#include <cuda_runtime.h>
#define THREADS_PER_BLOCK 256
#define NUM_OF_BLOCKS 15
#define F 1
#define ITERATIONS (unsigned)( 10000 )
#define ITERATIONS2 REPLACE_ITERATIONS
#define max_tid THREADS_PER_BLOCK*NUM_OF_BLOCKS
#define LINE_SIZE 128
#define SETS 64
#define ASSOC 6
#define SIMD_WIDTH 32
// Variables
int* h_A;
int* h_B;
int* h_C;
int* d_A;
int* d_B;
int* d_C;
bool noprompt = false;
unsigned int my_timer;
// Functions
void CleanupResources(void);
void RandomInit(int*, int);
void ParseArguments(int, char**);
////////////////////////////////////////////////////////////////////////////////
// These are CUDA Helper functions
// This will output the proper CUDA error strings in the event that a CUDA host call returns an error
#define checkCudaErrors(err) __checkCudaErrors (err, __FILE__, __LINE__)
inline void __checkCudaErrors(cudaError err, const char *file, const int line ){
if(cudaSuccess != err){
fprintf(stderr, "%s(%i) : CUDA Runtime API error %d: %s.\n",file, line, (int)err, cudaGetErrorString( err ) );
exit(-1);
}
}
// This will output the proper error string when calling cudaGetLastError
#define getLastCudaError(msg) __getLastCudaError (msg, __FILE__, __LINE__)
inline void __getLastCudaError(const char *errorMessage, const char *file, const int line ){
cudaError_t err = cudaGetLastError();
if (cudaSuccess != err){
fprintf(stderr, "%s(%i) : getLastCudaError() CUDA error : %s : (%d) %s.\n",file, line, errorMessage, (int)err, cudaGetErrorString( err ) );
exit(-1);
}
}
// end of CUDA Helper Functions
// Device code
__global__ void PowerKernal(int* A, int* C, int N){
int tid = blockDim.x * blockIdx.x + threadIdx.x;
//Do Some Computation
//int size = (LINE_SIZE*ASSOC*SETS)/sizeof(int);
//unsigned j=0, k=0;
int m_sum=N;
// m_sum = A[tid*F];
for(unsigned j=0; j<ITERATIONS2; j++){
for(unsigned k=0; k<ITERATIONS; ++k){
C[((unsigned)(tid*F)+(unsigned)(k*max_tid*F))]=m_sum;
}
m_sum+=j;
}
__syncthreads();
}
// Host code
int main(){
printf("Power Microbenchmarks\n");
//int N = LINE_SIZE*SETS*ASSOC;
unsigned N =((unsigned)(max_tid*F)+(unsigned)(ITERATIONS*max_tid*F));
size_t size = N * sizeof(int);
// Allocate input vectors h_A and h_B in host memory
h_A = (int*)malloc(size);
if (h_A == 0) CleanupResources();
//h_B = (float*)malloc(size);
//if (h_B == 0) CleanupResources();
h_C = (int*)malloc(size);
if (h_C == 0) CleanupResources();
// Initialize input vectors
RandomInit(h_A, N);
//RandomInit(h_B, N);
// Allocate vectors in device memory
checkCudaErrors( cudaMalloc((void**)&d_A, size) );
//checkCudaErrors( cudaMalloc((void**)&d_B, size) );
checkCudaErrors( cudaMalloc((void**)&d_C, size) );
// Copy vectors from host memory to device memory
checkCudaErrors( cudaMemcpy(d_A, h_A, size, cudaMemcpyHostToDevice) );
//checkCudaErrors( cudaMemcpy(d_B, h_B, size, cudaMemcpyHostToDevice) );
//VecAdd<<<blocksPerGrid, threadsPerBlock>>>(d_A, d_B, d_C, N);
dim3 dimGrid(NUM_OF_BLOCKS,1);
dim3 dimBlock(THREADS_PER_BLOCK,1);
CUT_SAFE_CALL(cutCreateTimer(&my_timer));
TaskHandle taskhandle = LaunchDAQ();
CUT_SAFE_CALL(cutStartTimer(my_timer));
PowerKernal<<<dimGrid,dimBlock>>>(d_A, d_C, N);
CUDA_SAFE_CALL( cudaThreadSynchronize() );
printf("execution time = %f\n", cutGetTimerValue(my_timer));
TurnOffDAQ(taskhandle, cutGetTimerValue(my_timer));
CUT_SAFE_CALL(cutStopTimer(my_timer));
CUT_SAFE_CALL(cutDeleteTimer(my_timer));
getLastCudaError("kernel launch failure");
#ifdef _DEBUG
checkCudaErrors( cudaDeviceSynchronize() );
#endif
// Copy result from device memory to host memory
// h_C contains the result in host memory
checkCudaErrors( cudaMemcpy(h_C, d_C, size, cudaMemcpyDeviceToHost) );
CleanupResources();
return 0;
}
void CleanupResources(void){
// Free device memory
if (d_A)
cudaFree(d_A);
//if (d_B)
// cudaFree(d_B);
if (d_C)
cudaFree(d_C);
// Free host memory
if (h_A)
free(h_A);
// if (h_B)
// free(h_B);
if (h_C)
free(h_C);
}
// Allocates an array with random float entries.
void RandomInit(int* data, int n){
for (int i = 0; i < n; ++i)
data[i] = (int)(rand() / RAND_MAX);
}
|
75dec93cf00e1aa6b2ef211d4a8cfcfd59173e4d.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "Indice2D.h"
#include "cudaTools.h"
#include <stdio.h>
#include <reductiontool.h>
/*----------------------------------------------------------------------*\
|* Declaration *|
\*---------------------------------------------------------------------*/
/*--------------------------------------*\
|* Imported *|
\*-------------------------------------*/
/*--------------------------------------*\
|* Public *|
\*-------------------------------------*/
__global__ void piDevice(float* ptrDevPi, int nbSlice);
/*--------------------------------------*\
|* Private *|
\*-------------------------------------*/
static __device__ void reduceIntraThread(float* ptrDevTabSM, int nbSlice);
static __device__ float aireRectangle(int sliceNumber, int nombreSlice);
static __device__ float f(float x);
/*----------------------------------------------------------------------*\
|* Implementation *|
\*---------------------------------------------------------------------*/
/*--------------------------------------*\
|* Public *|
\*-------------------------------------*/
/**
* output : void required !!
*/
__global__ void piDevice(float* ptrDevPi, int nbSlice)
{
extern __shared__ float tabSM[]; // lu par tout les thread mais fait une fois
reduceIntraThread(tabSM, nbSlice);
__syncthreads();
ReductionTool<float>::reduce(tabSM , ptrDevPi);
}
/*--------------------------------------*\
|* Private *|
\*-------------------------------------*/
__device__ void reduceIntraThread(float* ptrDevTabSM, int nbSlice)
{
float sumThread = 0;
const int NB_THREAD=Indice2D::nbThread();
const int TID=Indice2D::tid();
const int TID_BLOC=Indice2D::tidLocalBlock();
int s = TID;
while(s < nbSlice)
{
sumThread += aireRectangle(s, nbSlice);
s+= NB_THREAD;
}
ptrDevTabSM[TID_BLOC] = sumThread / nbSlice;
}
__device__ float aireRectangle(int sliceNumber, int nombreSlice)
{
const float x = (1.0f / (float) nombreSlice) * (float) sliceNumber;
return f(x);
}
__device__ float f(float x)
{
return 4.0f/ (1.0f + x * x);
}
/*----------------------------------------------------------------------*\
|* End *|
\*---------------------------------------------------------------------*/
| 75dec93cf00e1aa6b2ef211d4a8cfcfd59173e4d.cu | #include "Indice2D.h"
#include "cudaTools.h"
#include <stdio.h>
#include <reductiontool.h>
/*----------------------------------------------------------------------*\
|* Declaration *|
\*---------------------------------------------------------------------*/
/*--------------------------------------*\
|* Imported *|
\*-------------------------------------*/
/*--------------------------------------*\
|* Public *|
\*-------------------------------------*/
__global__ void piDevice(float* ptrDevPi, int nbSlice);
/*--------------------------------------*\
|* Private *|
\*-------------------------------------*/
static __device__ void reduceIntraThread(float* ptrDevTabSM, int nbSlice);
static __device__ float aireRectangle(int sliceNumber, int nombreSlice);
static __device__ float f(float x);
/*----------------------------------------------------------------------*\
|* Implementation *|
\*---------------------------------------------------------------------*/
/*--------------------------------------*\
|* Public *|
\*-------------------------------------*/
/**
* output : void required !!
*/
__global__ void piDevice(float* ptrDevPi, int nbSlice)
{
extern __shared__ float tabSM[]; // lu par tout les thread mais fait une fois
reduceIntraThread(tabSM, nbSlice);
__syncthreads();
ReductionTool<float>::reduce(tabSM , ptrDevPi);
}
/*--------------------------------------*\
|* Private *|
\*-------------------------------------*/
__device__ void reduceIntraThread(float* ptrDevTabSM, int nbSlice)
{
float sumThread = 0;
const int NB_THREAD=Indice2D::nbThread();
const int TID=Indice2D::tid();
const int TID_BLOC=Indice2D::tidLocalBlock();
int s = TID;
while(s < nbSlice)
{
sumThread += aireRectangle(s, nbSlice);
s+= NB_THREAD;
}
ptrDevTabSM[TID_BLOC] = sumThread / nbSlice;
}
__device__ float aireRectangle(int sliceNumber, int nombreSlice)
{
const float x = (1.0f / (float) nombreSlice) * (float) sliceNumber;
return f(x);
}
__device__ float f(float x)
{
return 4.0f/ (1.0f + x * x);
}
/*----------------------------------------------------------------------*\
|* End *|
\*---------------------------------------------------------------------*/
|
a4ff32636cda801d4e9ba2ab28afd44208e79c27.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
__global__ void kernel_saxpy( int n, float a, float * x, float * y, float * z ) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if ( i < n ) {
z[i] = a * x[i] + y [i];
}
}
void saxpy( int nblocks, int nthreads, int n, float a, float * x, float * y, float * z ) {
hipLaunchKernelGGL(( kernel_saxpy), dim3(nblocks), dim3(nthreads), 0, 0, n, a, x, y, z );
}
| a4ff32636cda801d4e9ba2ab28afd44208e79c27.cu | #include "cuda.h"
__global__ void kernel_saxpy( int n, float a, float * x, float * y, float * z ) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if ( i < n ) {
z[i] = a * x[i] + y [i];
}
}
void saxpy( int nblocks, int nthreads, int n, float a, float * x, float * y, float * z ) {
kernel_saxpy<<<nblocks, nthreads>>>( n, a, x, y, z );
}
|
9a9ee8ac3b2a93e4128dc61c85203807c813f5b3.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <iostream>
#include <vector>
using namespace std;
__global__ void mult(const int *pA, const int *pB, int *pC, int N)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < N)
pC[i] = pA[i] * pB[i];
}
int main(void)
{
const int N = 8192;
vector<int> a(N), b(N), c(N);
for (int i = 0 ; i < N ; i++)
{
a[i] = i;
b[i] = -i;
}
int *cuA, *cuB, *cuC;
hipMalloc(&cuA, N*sizeof(int));
hipMalloc(&cuB, N*sizeof(int));
hipMalloc(&cuC, N*sizeof(int));
hipMemcpy(cuA, a.data(), N*sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(cuB, b.data(), N*sizeof(int), hipMemcpyHostToDevice);
int blockSize = 256;
int numBlocks = N/blockSize + ((N%blockSize == 0)?0:1);
hipLaunchKernelGGL(( mult), dim3(numBlocks), dim3(blockSize), 0, 0, cuA, cuB, cuC, N);
hipMemcpy(c.data(), cuC, N*sizeof(int), hipMemcpyDeviceToHost);
hipFree(cuA);
hipFree(cuB);
hipFree(cuC);
for (auto x : c)
cout << x << " ";
cout << endl;
return 0;
}
| 9a9ee8ac3b2a93e4128dc61c85203807c813f5b3.cu | #include <iostream>
#include <vector>
using namespace std;
__global__ void mult(const int *pA, const int *pB, int *pC, int N)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < N)
pC[i] = pA[i] * pB[i];
}
int main(void)
{
const int N = 8192;
vector<int> a(N), b(N), c(N);
for (int i = 0 ; i < N ; i++)
{
a[i] = i;
b[i] = -i;
}
int *cuA, *cuB, *cuC;
cudaMalloc(&cuA, N*sizeof(int));
cudaMalloc(&cuB, N*sizeof(int));
cudaMalloc(&cuC, N*sizeof(int));
cudaMemcpy(cuA, a.data(), N*sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(cuB, b.data(), N*sizeof(int), cudaMemcpyHostToDevice);
int blockSize = 256;
int numBlocks = N/blockSize + ((N%blockSize == 0)?0:1);
mult<<<numBlocks, blockSize>>>(cuA, cuB, cuC, N);
cudaMemcpy(c.data(), cuC, N*sizeof(int), cudaMemcpyDeviceToHost);
cudaFree(cuA);
cudaFree(cuB);
cudaFree(cuC);
for (auto x : c)
cout << x << " ";
cout << endl;
return 0;
}
|
445804183273c651c82a71571b5b5e882a8bc7ed.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#include "topk_impl.h"
#include "core/providers/cuda/cu_inc/common.cuh"
#include "device_atomic_functions.h"
#include "hipcub/hipcub.hpp"
#include "cub/util_type.cuh"
#include "hipcub/hipcub.hpp"
#include "hipcub/hipcub.hpp"
#include <limits>
//TODO:fix the warnings
#ifdef _MSC_VER
#pragma warning(disable : 4244)
#endif
namespace onnxruntime {
namespace cuda {
using namespace cub;
template <typename T>
struct KV {
T key;
int64_t val;
};
template <typename T>
struct NumericLimits {
static T Lowest() {
return std::numeric_limits<T>::lowest();
}
static T Max() {
return std::numeric_limits<T>::max();
}
};
template <>
struct NumericLimits<MLFloat16> {
static half Lowest() {
return -65504.0;
}
static half Max() {
return 65504.0;
}
};
#define BT GridDim::maxThreadsPerBlock
#define ALIGN(N) static_cast<int64_t>(pow(2, ceil(log2(static_cast<double>(N)))))
#define FROM(idx) (left_dim + (idx)*mid_dim + right_dim)
#define TO(idx) (left_dim * K / dimension + (idx)*mid_dim + right_dim)
#define TRIVIAL (1 == largest ? type_min : type_max)
#define BIGGER(n, m) (n.key > m.key ? n : (n.key < m.key ? m : (n.val > m.val ? (1 == largest ? m : n) : (1 == largest ? n : m))))
#define SMALLER(n, m) (n.key < m.key ? n : (n.key > m.key ? m : (n.val < m.val ? (1 == largest ? m : n) : (1 == largest ? n : m))))
#define IS_SMALLER(n, m) (n.key < m.key || !(n.key > m.key) && (1 == largest ? n.val > m.val : n.val < m.val))
#define LESS(n, m) ((n) <= (m) ? (n) : (m))
template <typename T>
__global__ void BitonicTopK(const T* X, T* V, int64_t* I, const TArray<int64_t> elem_nums, size_t size, int32_t axis, int64_t K, int64_t aligned_K, int64_t largest, int64_t sorted, int64_t dimension, int64_t aligned_dimension, T type_min, T type_max) {
auto tid = threadIdx.x;
auto bid = blockIdx.x;
extern __shared__ char shared_mem[];
auto S = (KV<T>*)(shared_mem);
auto mid_dim = axis == size - 1 ? 1 : elem_nums[axis + 1];
auto left_dim = bid / mid_dim * elem_nums[axis];
auto right_dim = axis == size - 1 ? 0 : bid % elem_nums[axis + 1];
for (auto i = tid; i < aligned_dimension; i += blockDim.x) {
S[i].key = i < dimension ? X[FROM(i)] : TRIVIAL;
S[i].val = i;
}
__syncthreads();
//sort each K
for (int64_t len = 1; len < aligned_K; len <<= 1) {
auto dir = len << 1;
for (auto inc = len; inc > 0; inc >>= 1) {
auto low = tid & (inc - 1);
auto i = (tid << 1) - low;
auto j = i + inc;
if (j < aligned_dimension) {
auto reverse = (dir & i) == 0;
auto swap = reverse ^ IS_SMALLER(S[i], S[j]);
if (swap) {
auto tmp = S[i];
S[i] = S[j];
S[j] = tmp;
}
}
__syncthreads();
}
__syncthreads();
}
//merge and rebuild K
for (int64_t len = aligned_K; len < aligned_dimension; len <<= 1) {
auto dir = len << 1;
auto i = (tid << 1) - (tid & (len - 1));
auto j = i + len;
if (i % dir < aligned_K && j < aligned_dimension) {
S[i] = 1 == largest ? BIGGER(S[i], S[j]) : SMALLER(S[i], S[j]);
}
__syncthreads();
for (auto inc = aligned_K >> 1; inc > 0; inc >>= 1) {
auto ii = (tid << 1) - (tid & (inc - 1));
auto jj = ii + inc;
if (ii % dir < aligned_K && jj < aligned_dimension) {
auto reverse = (dir & ii) == 0;
auto swap = reverse ^ IS_SMALLER(S[ii], S[jj]);
if (swap) {
auto tmp = S[ii];
S[ii] = S[jj];
S[jj] = tmp;
}
}
__syncthreads();
}
__syncthreads();
}
//save top K
if (1 == sorted) {
if (1 == largest) {
auto start = aligned_K - K;
if (tid >= start && tid < aligned_K) {
auto to = TO(aligned_K - 1 - tid);
V[to] = S[tid].key;
I[to] = S[tid].val;
}
} else {
if (tid < K) {
auto to = TO(tid);
V[to] = S[tid].key;
I[to] = S[tid].val;
}
}
} else {
if (1 == largest) {
auto start = aligned_K - K;
if (tid < start) {
S[tid].val = aligned_dimension;
}
} else {
if (tid >= K && tid < aligned_K) {
S[tid].val = aligned_dimension;
}
}
__syncthreads();
//sort by index ascending
for (int64_t len = 1; len < aligned_K; len <<= 1) {
auto dir = len << 1;
for (int64_t inc = len; inc > 0; inc >>= 1) {
auto low = tid & (inc - 1);
auto i = (tid << 1) - low;
auto j = i + inc;
if (j < aligned_K) {
auto reverse = (dir & i) == 0;
auto swap = reverse ^ (S[i].val < S[j].val);
if (swap) {
auto tmp = S[i];
S[i] = S[j];
S[j] = tmp;
}
}
__syncthreads();
}
__syncthreads();
}
if (tid < K) {
auto to = TO(tid);
V[to] = S[tid].key;
I[to] = S[tid].val;
}
}
}
template <typename T>
__device__ __inline__ bool Equal(const T& t0, const T& t1) {
return t0 == t1;
}
__device__ __inline__ bool Equal(const float& t0, const float& t1) {
auto t2 = t0 > t1 ? t0 - t1 : t1 - t0;
return t2 < std::numeric_limits<float>::epsilon();
}
__device__ __inline__ bool Equal(const double& t0, const double& t1) {
auto t2 = t0 > t1 ? t0 - t1 : t1 - t0;
return t2 < std::numeric_limits<double>::epsilon();
}
template<typename T>
__device__ bool SamePrefix(const T* t0, const T* t1, int64_t skip) {
return ((*t0)^(*t1))>>skip == 0;
}
__device__ bool SamePrefix(const half* f0, const half* f1, int64_t skip) {
return SamePrefix((const int16_t*)f0, (const int16_t*)f1, skip);
}
__device__ bool SamePrefix(const float* f0, const float* f1, int64_t skip) {
return SamePrefix((const int32_t*)f0, (const int32_t*)f1, skip);
}
__device__ bool SamePrefix(const double* d0, const double* d1, int64_t skip) {
return SamePrefix((const int64_t*)d0, (const int64_t*)d1, skip);
}
template<typename T>
__device__ int32_t Radix(const T* t, int64_t skip) {
return ((*t)>>skip)&255;
}
__device__ int32_t Radix(const half* f, int64_t skip) {
return Radix((const int16_t*)f, skip);
}
__device__ int32_t Radix(const float* f, int64_t skip) {
return Radix((const int32_t*)f, skip);
}
__device__ int32_t Radix(const double* d, int64_t skip) {
return Radix((const int64_t*)d, skip);
}
template<typename T>
__device__ void SetByte(T* t, int64_t byte) {
(*t) |= byte;
}
__device__ void SetByte(half* f, int64_t byte) {
SetByte((int16_t*)f, byte);
}
__device__ void SetByte(float* f, int64_t byte) {
SetByte((int32_t*)f, byte);
}
__device__ void SetByte(double* d, int64_t byte) {
SetByte((int64_t*)d, byte);
}
template<typename T, int64_t THREADS, int64_t KPT>
__global__ void RadixTopK(const T* X, T* V, int64_t* I, const TArray<int64_t> elem_nums, size_t size, int32_t axis, int64_t K, int64_t largest, int64_t sorted, int64_t dimension, int64_t XPT, T type_min, T type_max) {
auto tid = threadIdx.x;
auto bid = blockIdx.x;
extern __shared__ char shared_mem[];
auto H = (uint32_t*)shared_mem;
auto mid_dim = axis == size - 1 ? 1 : elem_nums[axis + 1];
auto left_dim = bid / mid_dim * elem_nums[axis];
auto right_dim = axis == size - 1 ? 0 : bid % elem_nums[axis + 1];
T Kth = (T)0, sign = (T)1;
typedef BlockScan<uint32_t, THREADS> BlockScan;
typedef BlockReduce<uint32_t, THREADS> BlockReduce;
typedef BlockRadixSort<T, THREADS, KPT, int64_t> BlockRadixSort;
__shared__ union {
typename BlockScan::TempStorage scan;
typename BlockReduce::TempStorage reduce;
typename BlockRadixSort::TempStorage sort;
} temp_storage;
uint32_t positive = 0, negative = 0;
for (int64_t x_i = tid; x_i < dimension; x_i += blockDim.x) {
T x = X[FROM(x_i)];
if (x > (T)0) {
++positive;
} else if (x < (T)0) {
++negative;
}
}
__syncthreads();
positive = BlockReduce(temp_storage.reduce).Sum(positive);
__syncthreads();
negative = BlockReduce(temp_storage.reduce).Sum(negative);
if (0 == tid) {
H[0] = positive;
H[1] = negative;
}
__syncthreads();
positive = H[0];
negative = H[1];
if ((1 == largest && (K <= positive || dimension - K + 1 <= negative)) ||
(0 == largest && (K <= negative || dimension - K + 1 <= positive))) {
auto KK = K;
if (1 == largest) {
if (KK > positive) {
KK = dimension - KK + 1;
sign = (T)-1;
}
} else {
if (KK > negative) {
KK = dimension - KK + 1;
} else {
sign = (T)-1;
}
}
__syncthreads();
#pragma unroll
for (int64_t byte = sizeof(T)-1; byte > -1; --byte) {
if (tid < 256) H[tid] = 0;
__syncthreads();
auto skip = 8 * byte, prev_skip = 8 * (byte + 1);
for (int64_t x_i = tid; x_i < dimension; x_i += blockDim.x) {
T x = sign*X[FROM(x_i)];
if (x > (T)0 && (byte == sizeof(T) - 1 || SamePrefix(&x, &Kth, prev_skip))) {
atomicAdd(&H[Radix(&x, skip)], 1);
}
}
__syncthreads();
for (int64_t radix = 255; radix > 0; --radix) {
if (H[radix] < KK) {
KK -= H[radix];
} else {
SetByte(&Kth, radix<<skip);
break;
}
}
__syncthreads();
}
Kth *= sign;
}
uint32_t superior = 0, equal = 0;
for (int64_t x_i = tid; x_i < dimension; x_i += blockDim.x) {
auto x = X[FROM(x_i)];
if (1 == largest && x > Kth || 0 == largest && x < Kth) {
++superior;
} else if (Equal(x, Kth)) {
++equal;
}
}
__syncthreads();
auto all_superior = superior;
all_superior = BlockReduce(temp_storage.reduce).Sum(all_superior);
if (0 == tid) {
H[0] = all_superior;
}
__syncthreads();
all_superior = H[0];
BlockScan(temp_storage.scan).ExclusiveSum(superior, superior);
__syncthreads();
BlockScan(temp_storage.scan).ExclusiveSum(equal, equal);
__syncthreads();
auto equal_quota = K - all_superior - equal;
auto output_i = superior + LESS(K - all_superior, equal);
for (int64_t x_i = tid; x_i < dimension; x_i += blockDim.x) {
auto x = X[FROM(x_i)];
if (1 == largest && x > Kth || 0 == largest && x < Kth) {
auto to_i = TO(output_i);
V[to_i] = x;
I[to_i] = x_i;
++output_i;
} else if (Equal(x, Kth) && equal_quota > 0) {
auto to_i = TO(output_i);
V[to_i] = x;
I[to_i] = x_i;
++output_i;
--equal_quota;
}
}
__syncthreads();
if (1 == sorted) {
T keys[KPT];
int64_t vals[KPT];
for (int64_t k_i = tid, k_c = 0; k_c < KPT; k_i += blockDim.x, ++k_c) {
if (k_i < K) {
auto to_i = TO(k_i);
keys[k_c] = V[to_i];
vals[k_c] = I[to_i];
} else {
if (1 == largest) {
keys[k_c] = type_min;
} else {
keys[k_c] = type_max;
}
}
}
__syncthreads();
if (1 == largest) {
BlockRadixSort(temp_storage.sort).SortDescending(keys, vals);
} else {
BlockRadixSort(temp_storage.sort).Sort(keys, vals);
}
__syncthreads();
#pragma unroll
for (int64_t k_c = 0; k_c < KPT; ++k_c) {
auto k_i = tid * KPT + k_c;
if (k_i < K) {
auto to_i = TO(k_i);
V[to_i] = keys[k_c];
I[to_i] = vals[k_c];
}
}
}
}
template <typename T>
__global__ void FillInput(const T* input_x, T* output_v, int64_t* output_i, const TArray<int64_t> elem_nums, size_t size, int32_t axis, int64_t K, int64_t offset, int64_t dimension) {
CALCULATE_ELEMENTWISE_INDEX_OR_EXIT(id, dimension);
auto left = offset / (axis == size - 1 ? 1 : elem_nums[axis + 1]) * elem_nums[axis];
auto right = axis == size - 1 ? 0 : offset % elem_nums[axis + 1];
auto input_offset = left + id * (axis == size - 1 ? 1 : elem_nums[axis + 1]) + right;
output_v[id] = input_x[input_offset];
output_i[id] = id;
}
template <typename T>
__global__ void FillOutput(const T* input_v, const int64_t* input_i, T* output_v, int64_t* output_i, const TArray<int64_t> elem_nums, size_t size, int32_t axis, int64_t K, int64_t offset, int64_t dimension) {
CALCULATE_ELEMENTWISE_INDEX_OR_EXIT(id, K);
auto left = offset / (axis == size - 1 ? 1 : elem_nums[axis + 1]) * elem_nums[axis] * K / dimension;
auto right = axis == size - 1 ? 0 : offset % elem_nums[axis + 1];
auto output_offset = left + id * (axis == size - 1 ? 1 : elem_nums[axis + 1]) + right;
output_v[output_offset] = input_v[id];
output_i[output_offset] = input_i[id];
}
__global__ void ExcludeOutput(int64_t* output_i, int64_t K, int64_t dimension) {
CALCULATE_ELEMENTWISE_INDEX_OR_EXIT(id, dimension);
if (id >= K) {
output_i[id] = dimension;
}
}
template <typename T>
Status TopKImpl(const CudaKernel* kernel, const T* input_x, T* output_v, int64_t* output_i, const TArray<int64_t>& elem_nums, size_t size, int32_t axis, int64_t K, int64_t largest, int64_t sorted, int64_t N, int64_t dimension) {
typedef typename ToCudaType<T>::MappedType CudaT;
hipStream_t stream = kernel->Stream();
const CudaT* input_x_ptr = reinterpret_cast<const CudaT*>(input_x);
CudaT* output_v_ptr = reinterpret_cast<CudaT*>(output_v);
auto aligned_K = ALIGN(K);
auto aligned_dimension = ALIGN(dimension);
if (aligned_dimension <= GridDim::maxThreadsPerBlock) {
hipLaunchKernelGGL(( BitonicTopK<CudaT>), dim3(N), dim3(GridDim::maxThreadsPerBlock), aligned_dimension * sizeof(KV<CudaT>), stream, input_x_ptr, output_v_ptr, output_i, elem_nums, size, axis, K, aligned_K, largest, sorted, dimension, aligned_dimension, NumericLimits<T>::Lowest(), NumericLimits<T>::Max());
} else if (K <= BT*16 || 0 == sorted) {
auto XPT = static_cast<int64_t>(ceil(static_cast<double>(dimension) / GridDim::maxThreadsPerBlock));
if (BT*2 >= K || 0 == sorted) {
hipLaunchKernelGGL(( RadixTopK<CudaT, BT, 2>), dim3(N), dim3(BT), 256 * sizeof(uint32_t), stream, input_x_ptr, output_v_ptr, output_i, elem_nums, size, axis, K, largest, sorted, dimension, XPT, NumericLimits<T>::Lowest(), NumericLimits<T>::Max());
} else if (BT*4>=K) {
hipLaunchKernelGGL(( RadixTopK<CudaT, BT, 4>), dim3(N), dim3(BT), 256 * sizeof(uint32_t), stream, input_x_ptr, output_v_ptr, output_i, elem_nums, size, axis, K, largest, sorted, dimension, XPT, NumericLimits<T>::Lowest(), NumericLimits<T>::Max());
} else if (BT*8>=K) {
hipLaunchKernelGGL(( RadixTopK<CudaT, BT, 8>), dim3(N), dim3(BT), 256 * sizeof(uint32_t), stream, input_x_ptr, output_v_ptr, output_i, elem_nums, size, axis, K, largest, sorted, dimension, XPT, NumericLimits<T>::Lowest(), NumericLimits<T>::Max());
} else {
hipLaunchKernelGGL(( RadixTopK<CudaT, BT, 16>), dim3(N), dim3(BT), 256 * sizeof(uint32_t), stream, input_x_ptr, output_v_ptr, output_i, elem_nums, size, axis, K, largest, sorted, dimension, XPT, NumericLimits<T>::Lowest(), NumericLimits<T>::Max());
}
} else {
auto input_key_buffer = kernel->GetScratchBuffer<CudaT>(dimension);
auto output_key_buffer = kernel->GetScratchBuffer<CudaT>(dimension);
auto input_value_buffer = kernel->GetScratchBuffer<int64_t>(dimension);
auto output_value_buffer = kernel->GetScratchBuffer<int64_t>(dimension);
auto* input_key = input_key_buffer.get();
auto* output_key = output_key_buffer.get();
auto* input_value = input_value_buffer.get();
auto* output_value = output_value_buffer.get();
size_t temp_bytes = 0;
CUDA_RETURN_IF_ERROR(hipcub::DeviceRadixSort::SortPairs(nullptr, temp_bytes, input_key, output_key, input_value, output_value, dimension, 0, sizeof(T)*8, stream));
auto temp_storage_buffer = kernel->GetScratchBuffer<char>(temp_bytes);
auto* temp_storage = temp_storage_buffer.get();
auto blocks_per_grid_D = (int)(ceil(static_cast<float>(dimension) / BT));
auto blocks_per_grid_K = (int)(ceil(static_cast<float>(K) / BT));
for (int64_t i = 0; i < N; i++) {
hipLaunchKernelGGL(( FillInput<CudaT>), dim3(blocks_per_grid_D), dim3(BT), 0, stream, input_x_ptr, input_key, input_value, elem_nums, size, axis, K, i, dimension);
CUDA_RETURN_IF_ERROR(1 == largest ? hipcub::DeviceRadixSort::SortPairsDescending(temp_storage, temp_bytes, input_key, output_key, input_value, output_value, dimension, 0, sizeof(T)*8, stream)
: hipcub::DeviceRadixSort::SortPairs(temp_storage, temp_bytes, input_key, output_key, input_value, output_value, dimension, 0, sizeof(T)*8, stream));
if (1 == sorted) {
hipLaunchKernelGGL(( FillOutput<CudaT>), dim3(blocks_per_grid_K), dim3(BT), 0, stream, output_key, output_value, output_v_ptr, output_i, elem_nums, size, axis, K, i, dimension);
} else { //reorder by ascending index
hipLaunchKernelGGL(( ExcludeOutput), dim3(blocks_per_grid_D), dim3(BT), 0, stream, output_value, K, dimension);
CUDA_RETURN_IF_ERROR(hipcub::DeviceRadixSort::SortPairs(temp_storage, temp_bytes, output_value, input_value, output_key, input_key, dimension, 0, sizeof(T)*8, stream));
hipLaunchKernelGGL(( FillOutput<CudaT>), dim3(blocks_per_grid_K), dim3(BT), 0, stream, input_key, input_value, output_v_ptr, output_i, elem_nums, size, axis, K, i, dimension);
}
}
}
return Status::OK();
}
#define TOPKIMPLE(T) template Status TopKImpl<T>(const CudaKernel* kernel, \
const T* input_x, \
T* output_v, \
int64_t* output_i, \
const TArray<int64_t>& elem_nums, \
size_t size, \
int32_t axis, \
int64_t K, \
int64_t largest, \
int64_t sorted, \
int64_t N, \
int64_t dimension)
TOPKIMPLE(uint8_t);
TOPKIMPLE(uint16_t);
TOPKIMPLE(uint32_t);
TOPKIMPLE(uint64_t);
TOPKIMPLE(int8_t);
TOPKIMPLE(int16_t);
TOPKIMPLE(int32_t);
TOPKIMPLE(int64_t);
TOPKIMPLE(float);
TOPKIMPLE(MLFloat16);
TOPKIMPLE(double);
} // namespace cuda
} // namespace onnxruntime
| 445804183273c651c82a71571b5b5e882a8bc7ed.cu | // Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#include "topk_impl.h"
#include "core/providers/cuda/cu_inc/common.cuh"
#include "device_atomic_functions.h"
#include "cub/cub.cuh"
#include "cub/util_type.cuh"
#include "cub/util_allocator.cuh"
#include "cub/device/device_radix_sort.cuh"
#include <limits>
//TODO:fix the warnings
#ifdef _MSC_VER
#pragma warning(disable : 4244)
#endif
namespace onnxruntime {
namespace cuda {
using namespace cub;
template <typename T>
struct KV {
T key;
int64_t val;
};
template <typename T>
struct NumericLimits {
static T Lowest() {
return std::numeric_limits<T>::lowest();
}
static T Max() {
return std::numeric_limits<T>::max();
}
};
template <>
struct NumericLimits<MLFloat16> {
static half Lowest() {
return -65504.0;
}
static half Max() {
return 65504.0;
}
};
#define BT GridDim::maxThreadsPerBlock
#define ALIGN(N) static_cast<int64_t>(pow(2, ceil(log2(static_cast<double>(N)))))
#define FROM(idx) (left_dim + (idx)*mid_dim + right_dim)
#define TO(idx) (left_dim * K / dimension + (idx)*mid_dim + right_dim)
#define TRIVIAL (1 == largest ? type_min : type_max)
#define BIGGER(n, m) (n.key > m.key ? n : (n.key < m.key ? m : (n.val > m.val ? (1 == largest ? m : n) : (1 == largest ? n : m))))
#define SMALLER(n, m) (n.key < m.key ? n : (n.key > m.key ? m : (n.val < m.val ? (1 == largest ? m : n) : (1 == largest ? n : m))))
#define IS_SMALLER(n, m) (n.key < m.key || !(n.key > m.key) && (1 == largest ? n.val > m.val : n.val < m.val))
#define LESS(n, m) ((n) <= (m) ? (n) : (m))
template <typename T>
__global__ void BitonicTopK(const T* X, T* V, int64_t* I, const TArray<int64_t> elem_nums, size_t size, int32_t axis, int64_t K, int64_t aligned_K, int64_t largest, int64_t sorted, int64_t dimension, int64_t aligned_dimension, T type_min, T type_max) {
auto tid = threadIdx.x;
auto bid = blockIdx.x;
extern __shared__ char shared_mem[];
auto S = (KV<T>*)(shared_mem);
auto mid_dim = axis == size - 1 ? 1 : elem_nums[axis + 1];
auto left_dim = bid / mid_dim * elem_nums[axis];
auto right_dim = axis == size - 1 ? 0 : bid % elem_nums[axis + 1];
for (auto i = tid; i < aligned_dimension; i += blockDim.x) {
S[i].key = i < dimension ? X[FROM(i)] : TRIVIAL;
S[i].val = i;
}
__syncthreads();
//sort each K
for (int64_t len = 1; len < aligned_K; len <<= 1) {
auto dir = len << 1;
for (auto inc = len; inc > 0; inc >>= 1) {
auto low = tid & (inc - 1);
auto i = (tid << 1) - low;
auto j = i + inc;
if (j < aligned_dimension) {
auto reverse = (dir & i) == 0;
auto swap = reverse ^ IS_SMALLER(S[i], S[j]);
if (swap) {
auto tmp = S[i];
S[i] = S[j];
S[j] = tmp;
}
}
__syncthreads();
}
__syncthreads();
}
//merge and rebuild K
for (int64_t len = aligned_K; len < aligned_dimension; len <<= 1) {
auto dir = len << 1;
auto i = (tid << 1) - (tid & (len - 1));
auto j = i + len;
if (i % dir < aligned_K && j < aligned_dimension) {
S[i] = 1 == largest ? BIGGER(S[i], S[j]) : SMALLER(S[i], S[j]);
}
__syncthreads();
for (auto inc = aligned_K >> 1; inc > 0; inc >>= 1) {
auto ii = (tid << 1) - (tid & (inc - 1));
auto jj = ii + inc;
if (ii % dir < aligned_K && jj < aligned_dimension) {
auto reverse = (dir & ii) == 0;
auto swap = reverse ^ IS_SMALLER(S[ii], S[jj]);
if (swap) {
auto tmp = S[ii];
S[ii] = S[jj];
S[jj] = tmp;
}
}
__syncthreads();
}
__syncthreads();
}
//save top K
if (1 == sorted) {
if (1 == largest) {
auto start = aligned_K - K;
if (tid >= start && tid < aligned_K) {
auto to = TO(aligned_K - 1 - tid);
V[to] = S[tid].key;
I[to] = S[tid].val;
}
} else {
if (tid < K) {
auto to = TO(tid);
V[to] = S[tid].key;
I[to] = S[tid].val;
}
}
} else {
if (1 == largest) {
auto start = aligned_K - K;
if (tid < start) {
S[tid].val = aligned_dimension;
}
} else {
if (tid >= K && tid < aligned_K) {
S[tid].val = aligned_dimension;
}
}
__syncthreads();
//sort by index ascending
for (int64_t len = 1; len < aligned_K; len <<= 1) {
auto dir = len << 1;
for (int64_t inc = len; inc > 0; inc >>= 1) {
auto low = tid & (inc - 1);
auto i = (tid << 1) - low;
auto j = i + inc;
if (j < aligned_K) {
auto reverse = (dir & i) == 0;
auto swap = reverse ^ (S[i].val < S[j].val);
if (swap) {
auto tmp = S[i];
S[i] = S[j];
S[j] = tmp;
}
}
__syncthreads();
}
__syncthreads();
}
if (tid < K) {
auto to = TO(tid);
V[to] = S[tid].key;
I[to] = S[tid].val;
}
}
}
template <typename T>
__device__ __inline__ bool Equal(const T& t0, const T& t1) {
return t0 == t1;
}
__device__ __inline__ bool Equal(const float& t0, const float& t1) {
auto t2 = t0 > t1 ? t0 - t1 : t1 - t0;
return t2 < std::numeric_limits<float>::epsilon();
}
__device__ __inline__ bool Equal(const double& t0, const double& t1) {
auto t2 = t0 > t1 ? t0 - t1 : t1 - t0;
return t2 < std::numeric_limits<double>::epsilon();
}
template<typename T>
__device__ bool SamePrefix(const T* t0, const T* t1, int64_t skip) {
return ((*t0)^(*t1))>>skip == 0;
}
__device__ bool SamePrefix(const half* f0, const half* f1, int64_t skip) {
return SamePrefix((const int16_t*)f0, (const int16_t*)f1, skip);
}
__device__ bool SamePrefix(const float* f0, const float* f1, int64_t skip) {
return SamePrefix((const int32_t*)f0, (const int32_t*)f1, skip);
}
__device__ bool SamePrefix(const double* d0, const double* d1, int64_t skip) {
return SamePrefix((const int64_t*)d0, (const int64_t*)d1, skip);
}
template<typename T>
__device__ int32_t Radix(const T* t, int64_t skip) {
return ((*t)>>skip)&255;
}
__device__ int32_t Radix(const half* f, int64_t skip) {
return Radix((const int16_t*)f, skip);
}
__device__ int32_t Radix(const float* f, int64_t skip) {
return Radix((const int32_t*)f, skip);
}
__device__ int32_t Radix(const double* d, int64_t skip) {
return Radix((const int64_t*)d, skip);
}
template<typename T>
__device__ void SetByte(T* t, int64_t byte) {
(*t) |= byte;
}
__device__ void SetByte(half* f, int64_t byte) {
SetByte((int16_t*)f, byte);
}
__device__ void SetByte(float* f, int64_t byte) {
SetByte((int32_t*)f, byte);
}
__device__ void SetByte(double* d, int64_t byte) {
SetByte((int64_t*)d, byte);
}
template<typename T, int64_t THREADS, int64_t KPT>
__global__ void RadixTopK(const T* X, T* V, int64_t* I, const TArray<int64_t> elem_nums, size_t size, int32_t axis, int64_t K, int64_t largest, int64_t sorted, int64_t dimension, int64_t XPT, T type_min, T type_max) {
auto tid = threadIdx.x;
auto bid = blockIdx.x;
extern __shared__ char shared_mem[];
auto H = (uint32_t*)shared_mem;
auto mid_dim = axis == size - 1 ? 1 : elem_nums[axis + 1];
auto left_dim = bid / mid_dim * elem_nums[axis];
auto right_dim = axis == size - 1 ? 0 : bid % elem_nums[axis + 1];
T Kth = (T)0, sign = (T)1;
typedef BlockScan<uint32_t, THREADS> BlockScan;
typedef BlockReduce<uint32_t, THREADS> BlockReduce;
typedef BlockRadixSort<T, THREADS, KPT, int64_t> BlockRadixSort;
__shared__ union {
typename BlockScan::TempStorage scan;
typename BlockReduce::TempStorage reduce;
typename BlockRadixSort::TempStorage sort;
} temp_storage;
uint32_t positive = 0, negative = 0;
for (int64_t x_i = tid; x_i < dimension; x_i += blockDim.x) {
T x = X[FROM(x_i)];
if (x > (T)0) {
++positive;
} else if (x < (T)0) {
++negative;
}
}
__syncthreads();
positive = BlockReduce(temp_storage.reduce).Sum(positive);
__syncthreads();
negative = BlockReduce(temp_storage.reduce).Sum(negative);
if (0 == tid) {
H[0] = positive;
H[1] = negative;
}
__syncthreads();
positive = H[0];
negative = H[1];
if ((1 == largest && (K <= positive || dimension - K + 1 <= negative)) ||
(0 == largest && (K <= negative || dimension - K + 1 <= positive))) {
auto KK = K;
if (1 == largest) {
if (KK > positive) {
KK = dimension - KK + 1;
sign = (T)-1;
}
} else {
if (KK > negative) {
KK = dimension - KK + 1;
} else {
sign = (T)-1;
}
}
__syncthreads();
#pragma unroll
for (int64_t byte = sizeof(T)-1; byte > -1; --byte) {
if (tid < 256) H[tid] = 0;
__syncthreads();
auto skip = 8 * byte, prev_skip = 8 * (byte + 1);
for (int64_t x_i = tid; x_i < dimension; x_i += blockDim.x) {
T x = sign*X[FROM(x_i)];
if (x > (T)0 && (byte == sizeof(T) - 1 || SamePrefix(&x, &Kth, prev_skip))) {
atomicAdd(&H[Radix(&x, skip)], 1);
}
}
__syncthreads();
for (int64_t radix = 255; radix > 0; --radix) {
if (H[radix] < KK) {
KK -= H[radix];
} else {
SetByte(&Kth, radix<<skip);
break;
}
}
__syncthreads();
}
Kth *= sign;
}
uint32_t superior = 0, equal = 0;
for (int64_t x_i = tid; x_i < dimension; x_i += blockDim.x) {
auto x = X[FROM(x_i)];
if (1 == largest && x > Kth || 0 == largest && x < Kth) {
++superior;
} else if (Equal(x, Kth)) {
++equal;
}
}
__syncthreads();
auto all_superior = superior;
all_superior = BlockReduce(temp_storage.reduce).Sum(all_superior);
if (0 == tid) {
H[0] = all_superior;
}
__syncthreads();
all_superior = H[0];
BlockScan(temp_storage.scan).ExclusiveSum(superior, superior);
__syncthreads();
BlockScan(temp_storage.scan).ExclusiveSum(equal, equal);
__syncthreads();
auto equal_quota = K - all_superior - equal;
auto output_i = superior + LESS(K - all_superior, equal);
for (int64_t x_i = tid; x_i < dimension; x_i += blockDim.x) {
auto x = X[FROM(x_i)];
if (1 == largest && x > Kth || 0 == largest && x < Kth) {
auto to_i = TO(output_i);
V[to_i] = x;
I[to_i] = x_i;
++output_i;
} else if (Equal(x, Kth) && equal_quota > 0) {
auto to_i = TO(output_i);
V[to_i] = x;
I[to_i] = x_i;
++output_i;
--equal_quota;
}
}
__syncthreads();
if (1 == sorted) {
T keys[KPT];
int64_t vals[KPT];
for (int64_t k_i = tid, k_c = 0; k_c < KPT; k_i += blockDim.x, ++k_c) {
if (k_i < K) {
auto to_i = TO(k_i);
keys[k_c] = V[to_i];
vals[k_c] = I[to_i];
} else {
if (1 == largest) {
keys[k_c] = type_min;
} else {
keys[k_c] = type_max;
}
}
}
__syncthreads();
if (1 == largest) {
BlockRadixSort(temp_storage.sort).SortDescending(keys, vals);
} else {
BlockRadixSort(temp_storage.sort).Sort(keys, vals);
}
__syncthreads();
#pragma unroll
for (int64_t k_c = 0; k_c < KPT; ++k_c) {
auto k_i = tid * KPT + k_c;
if (k_i < K) {
auto to_i = TO(k_i);
V[to_i] = keys[k_c];
I[to_i] = vals[k_c];
}
}
}
}
template <typename T>
__global__ void FillInput(const T* input_x, T* output_v, int64_t* output_i, const TArray<int64_t> elem_nums, size_t size, int32_t axis, int64_t K, int64_t offset, int64_t dimension) {
CALCULATE_ELEMENTWISE_INDEX_OR_EXIT(id, dimension);
auto left = offset / (axis == size - 1 ? 1 : elem_nums[axis + 1]) * elem_nums[axis];
auto right = axis == size - 1 ? 0 : offset % elem_nums[axis + 1];
auto input_offset = left + id * (axis == size - 1 ? 1 : elem_nums[axis + 1]) + right;
output_v[id] = input_x[input_offset];
output_i[id] = id;
}
template <typename T>
__global__ void FillOutput(const T* input_v, const int64_t* input_i, T* output_v, int64_t* output_i, const TArray<int64_t> elem_nums, size_t size, int32_t axis, int64_t K, int64_t offset, int64_t dimension) {
CALCULATE_ELEMENTWISE_INDEX_OR_EXIT(id, K);
auto left = offset / (axis == size - 1 ? 1 : elem_nums[axis + 1]) * elem_nums[axis] * K / dimension;
auto right = axis == size - 1 ? 0 : offset % elem_nums[axis + 1];
auto output_offset = left + id * (axis == size - 1 ? 1 : elem_nums[axis + 1]) + right;
output_v[output_offset] = input_v[id];
output_i[output_offset] = input_i[id];
}
__global__ void ExcludeOutput(int64_t* output_i, int64_t K, int64_t dimension) {
CALCULATE_ELEMENTWISE_INDEX_OR_EXIT(id, dimension);
if (id >= K) {
output_i[id] = dimension;
}
}
template <typename T>
Status TopKImpl(const CudaKernel* kernel, const T* input_x, T* output_v, int64_t* output_i, const TArray<int64_t>& elem_nums, size_t size, int32_t axis, int64_t K, int64_t largest, int64_t sorted, int64_t N, int64_t dimension) {
typedef typename ToCudaType<T>::MappedType CudaT;
cudaStream_t stream = kernel->Stream();
const CudaT* input_x_ptr = reinterpret_cast<const CudaT*>(input_x);
CudaT* output_v_ptr = reinterpret_cast<CudaT*>(output_v);
auto aligned_K = ALIGN(K);
auto aligned_dimension = ALIGN(dimension);
if (aligned_dimension <= GridDim::maxThreadsPerBlock) {
BitonicTopK<CudaT><<<N, GridDim::maxThreadsPerBlock, aligned_dimension * sizeof(KV<CudaT>), stream>>>(input_x_ptr, output_v_ptr, output_i, elem_nums, size, axis, K, aligned_K, largest, sorted, dimension, aligned_dimension, NumericLimits<T>::Lowest(), NumericLimits<T>::Max());
} else if (K <= BT*16 || 0 == sorted) {
auto XPT = static_cast<int64_t>(ceil(static_cast<double>(dimension) / GridDim::maxThreadsPerBlock));
if (BT*2 >= K || 0 == sorted) {
RadixTopK<CudaT, BT, 2><<<N, BT, 256 * sizeof(uint32_t), stream>>>(input_x_ptr, output_v_ptr, output_i, elem_nums, size, axis, K, largest, sorted, dimension, XPT, NumericLimits<T>::Lowest(), NumericLimits<T>::Max());
} else if (BT*4>=K) {
RadixTopK<CudaT, BT, 4><<<N, BT, 256 * sizeof(uint32_t), stream>>>(input_x_ptr, output_v_ptr, output_i, elem_nums, size, axis, K, largest, sorted, dimension, XPT, NumericLimits<T>::Lowest(), NumericLimits<T>::Max());
} else if (BT*8>=K) {
RadixTopK<CudaT, BT, 8><<<N, BT, 256 * sizeof(uint32_t), stream>>>(input_x_ptr, output_v_ptr, output_i, elem_nums, size, axis, K, largest, sorted, dimension, XPT, NumericLimits<T>::Lowest(), NumericLimits<T>::Max());
} else {
RadixTopK<CudaT, BT, 16><<<N, BT, 256 * sizeof(uint32_t), stream>>>(input_x_ptr, output_v_ptr, output_i, elem_nums, size, axis, K, largest, sorted, dimension, XPT, NumericLimits<T>::Lowest(), NumericLimits<T>::Max());
}
} else {
auto input_key_buffer = kernel->GetScratchBuffer<CudaT>(dimension);
auto output_key_buffer = kernel->GetScratchBuffer<CudaT>(dimension);
auto input_value_buffer = kernel->GetScratchBuffer<int64_t>(dimension);
auto output_value_buffer = kernel->GetScratchBuffer<int64_t>(dimension);
auto* input_key = input_key_buffer.get();
auto* output_key = output_key_buffer.get();
auto* input_value = input_value_buffer.get();
auto* output_value = output_value_buffer.get();
size_t temp_bytes = 0;
CUDA_RETURN_IF_ERROR(cub::DeviceRadixSort::SortPairs(nullptr, temp_bytes, input_key, output_key, input_value, output_value, dimension, 0, sizeof(T)*8, stream));
auto temp_storage_buffer = kernel->GetScratchBuffer<char>(temp_bytes);
auto* temp_storage = temp_storage_buffer.get();
auto blocks_per_grid_D = (int)(ceil(static_cast<float>(dimension) / BT));
auto blocks_per_grid_K = (int)(ceil(static_cast<float>(K) / BT));
for (int64_t i = 0; i < N; i++) {
FillInput<CudaT><<<blocks_per_grid_D, BT, 0, stream>>>(input_x_ptr, input_key, input_value, elem_nums, size, axis, K, i, dimension);
CUDA_RETURN_IF_ERROR(1 == largest ? cub::DeviceRadixSort::SortPairsDescending(temp_storage, temp_bytes, input_key, output_key, input_value, output_value, dimension, 0, sizeof(T)*8, stream)
: cub::DeviceRadixSort::SortPairs(temp_storage, temp_bytes, input_key, output_key, input_value, output_value, dimension, 0, sizeof(T)*8, stream));
if (1 == sorted) {
FillOutput<CudaT><<<blocks_per_grid_K, BT, 0, stream>>>(output_key, output_value, output_v_ptr, output_i, elem_nums, size, axis, K, i, dimension);
} else { //reorder by ascending index
ExcludeOutput<<<blocks_per_grid_D, BT, 0, stream>>>(output_value, K, dimension);
CUDA_RETURN_IF_ERROR(cub::DeviceRadixSort::SortPairs(temp_storage, temp_bytes, output_value, input_value, output_key, input_key, dimension, 0, sizeof(T)*8, stream));
FillOutput<CudaT><<<blocks_per_grid_K, BT, 0, stream>>>(input_key, input_value, output_v_ptr, output_i, elem_nums, size, axis, K, i, dimension);
}
}
}
return Status::OK();
}
#define TOPKIMPLE(T) template Status TopKImpl<T>(const CudaKernel* kernel, \
const T* input_x, \
T* output_v, \
int64_t* output_i, \
const TArray<int64_t>& elem_nums, \
size_t size, \
int32_t axis, \
int64_t K, \
int64_t largest, \
int64_t sorted, \
int64_t N, \
int64_t dimension)
TOPKIMPLE(uint8_t);
TOPKIMPLE(uint16_t);
TOPKIMPLE(uint32_t);
TOPKIMPLE(uint64_t);
TOPKIMPLE(int8_t);
TOPKIMPLE(int16_t);
TOPKIMPLE(int32_t);
TOPKIMPLE(int64_t);
TOPKIMPLE(float);
TOPKIMPLE(MLFloat16);
TOPKIMPLE(double);
} // namespace cuda
} // namespace onnxruntime
|
e62c4ad418a72f06d719fc604d5132fd39068123.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "task_updategradients.cuh"
template<typename T>
__global__ void d_updateGradients( DeviceMemory<T>* mem ) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if( idx >= mem->fframeW ) return;
int idxy = IDX2R( 0, idx, mem->fframeW );
for(int row = 0; row < mem->frameH; row++) {
if( mem->mask[idxy] ) {
int iX = static_cast<int>( mem->x[idxy] );
int iY = static_cast<int>( mem->y[idxy] );
int tidxy = IDX2R( iY, iX, mem->templateW );
T xFrac = mem->x[idxy] - static_cast<T>( iX );
T yFrac = mem->y[idxy] - static_cast<T>( iY );
mem->wxgrad[idxy] = ( 1 - yFrac ) * mem->xGradients[tidxy] + yFrac * mem->xGradients[tidxy + mem->templateW];
mem->wygrad[idxy] = ( 1 - xFrac ) * mem->yGradients[tidxy] + xFrac * mem->yGradients[tidxy + 1];
} else {
mem->wxgrad[idxy] = 0;
mem->wygrad[idxy] = 0;
}
idxy += mem->fframeW;
}
}
template<typename T>
void hd_updateGradients( DeviceMemory<T>& mem ) {
int numBlocks = ( mem.fframeW + THREADS_PER_BLOCK - 1 ) / THREADS_PER_BLOCK;
hipLaunchKernelGGL(( d_updateGradients<T>), dim3(numBlocks), dim3(THREADS_PER_BLOCK) , 0, 0, mem.d_mem );
}
template void hd_updateGradients( DeviceMemory<float>& mem );
template void hd_updateGradients( DeviceMemory<double>& mem );
//template void hd_updateBlockGradients( DeviceMemory<float>& mem, int groupBase );
//template void hd_updateBlockGradients( DeviceMemory<double>& mem, int groupBase );
| e62c4ad418a72f06d719fc604d5132fd39068123.cu | #include "task_updategradients.cuh"
template<typename T>
__global__ void d_updateGradients( DeviceMemory<T>* mem ) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if( idx >= mem->fframeW ) return;
int idxy = IDX2R( 0, idx, mem->fframeW );
for(int row = 0; row < mem->frameH; row++) {
if( mem->mask[idxy] ) {
int iX = static_cast<int>( mem->x[idxy] );
int iY = static_cast<int>( mem->y[idxy] );
int tidxy = IDX2R( iY, iX, mem->templateW );
T xFrac = mem->x[idxy] - static_cast<T>( iX );
T yFrac = mem->y[idxy] - static_cast<T>( iY );
mem->wxgrad[idxy] = ( 1 - yFrac ) * mem->xGradients[tidxy] + yFrac * mem->xGradients[tidxy + mem->templateW];
mem->wygrad[idxy] = ( 1 - xFrac ) * mem->yGradients[tidxy] + xFrac * mem->yGradients[tidxy + 1];
} else {
mem->wxgrad[idxy] = 0;
mem->wygrad[idxy] = 0;
}
idxy += mem->fframeW;
}
}
template<typename T>
void hd_updateGradients( DeviceMemory<T>& mem ) {
int numBlocks = ( mem.fframeW + THREADS_PER_BLOCK - 1 ) / THREADS_PER_BLOCK;
d_updateGradients<T><<< numBlocks, THREADS_PER_BLOCK >>>( mem.d_mem );
}
template void hd_updateGradients( DeviceMemory<float>& mem );
template void hd_updateGradients( DeviceMemory<double>& mem );
//template void hd_updateBlockGradients( DeviceMemory<float>& mem, int groupBase );
//template void hd_updateBlockGradients( DeviceMemory<double>& mem, int groupBase );
|
fc0dc12f9b9f5e12b0a033be9d87ec76941d9606.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "common/common.h"
#include <iostream>
#include <fstream>
#include <stdlib.h>
#include <iostream>
#include <fstream>
#include <stdlib.h>
#include <ctype.h>
#include <time.h>
#include <stdio.h>
#include<string.h>
#include <iostream>
#include <fstream>
#include <vector>
#include <algorithm>
#include <sstream>
#include<cuda.h>
#include<cuda_runtime.h>
#include<cstring>
using namespace std;
#define THREADS_PER_BLOCK 512
struct constraint
{
string name;
string op;
int value;
int filterid;
int rowid;
};
struct newConstraint
{
char name[5];
char op[5];
int value;
int filterid;
int rowid;
};
struct filter
{
int size;
unsigned int count;
int filtid;
};
struct names
{
string name;
int rowid;
int count;
};
struct input
{
string name;
string op;
int value;
};
struct newInput
{
char name[5];
char op[5];
int value;
};
//Start of Cuda Kernel match
__global__ void match(newConstraint *constraints, int *sizec, filter *filters, newInput *inputs, int nlines)
{
int x = blockIdx.x*blockDim.x+threadIdx.x;
int y = blockIdx.y;
__shared__ char name[5];
__shared__ int inputValue;
__shared__ int rowid;
/*if(x==0&&y==0){
printf("\nsize:%d",sizec[0]);
printf("\nsize:%d",sizec[1]);
printf("\nsize:%d",sizec[2]);
}*/
if(threadIdx.x==0)
{
int i=0;
while(inputs[y].name[i]!='\0'){
name[i]=inputs[y].name[i];
i++;
}
name[i]='\0';
inputValue=inputs[y].value;
rowid=y;//
}
__syncthreads();
//printf(" %d",r);
if(x>=sizec[rowid])
return;
if(name[0]=='*')
return;
//printf("\n%d %d",blockIdx.y,threadIdx.x);
//data of constraints
int z=nlines*rowid+x;
int valsub=constraints[z].value;
//char constraintName[5]=constraints[nlines*rowid+x].name;
char op1[5];
int i=0;
while(constraints[z].op[i]!='\0'){
op1[i]=constraints[z].op[i];
i++;
}
op1[i]='\0';
int filterid=constraints[z].filterid;
//data of shared input
int valpub=inputValue;
/*if(threadIdx.x==0&&blockIdx.y==0)
printf("\n%d %s %d %s",inputValue,name,valpub,op1);
*/
int satisfied = 0;
if(op1[0]=='<' && op1[1]=='=')
{
if(valpub <= valsub)
satisfied =1;
}
else if(op1[0]=='<')
{
if(valpub < valsub)
satisfied =1;
}
else if(op1[0]=='>' && op1[1]=='=')
{
if(valpub >= valsub)
satisfied =1;
}
else if(op1[0]=='>')
{
if(valpub > valsub)
satisfied =1;
}
else if(op1[0]=='=')
{
if(valpub == valsub)
satisfied =1;
}
else if(op1[0]=='!' && op1[1]=='=')
{
if(valpub != valsub)
satisfied =1;
}
if(satisfied==0)
return;
atomicInc(&filters[filterid].count, filters[filterid].size+1);
//filters[filterid].count++;
//__syncthreads();
//filters[filterid].filtid = filterid+1;
//int q = 0;
//for( ; q < filters.size() ; q++)
//if(filters[filterid].count == filters[filterid].size)
}
//End of kernel
/*char CONSTRAINT_FILE[] = "subs_20000.txt";
char EVENTS_FILE [] = "pubs_1000.txt";*/
char CONSTRAINT_FILE[] = "b.txt";
//CONSTRAINT_FILE =new char["1st_1_lack.txt";
char EVENTS_FILE [] = "a.txt";
int contains(string line, vector<names> namesArray)
{
int length = namesArray.size();
for(int i=0;i<length;i++)
{
names name = namesArray[i];
if(name.name==line)
{
return i;
}
}
return -1;
}
void printStrings(vector<string> listt)
{
int length = listt.size();
for(int i=0;i<length;i++)
{
cout<<"\n"<<listt[i];
}
return;
}
//Functions to split filter by ';'
std::vector<std::string> &split(const std::string &s, char delim,std::vector<std::string> &elems) {
std::stringstream ss(s);
std::string item;
while (std::getline(ss, item, delim)) {
elems.push_back(item);
}
return elems;
}
std::vector<std::string> split(const std::string &s, char delim)
{
std::vector<std::string> elems;
split(s, delim, elems);
return elems;
}
vector<filter> getFilters(vector <string> c_list , vector<constraint> constraints)
{
vector<filter> filters;
int length = c_list.size();
filter tempFilter;
for(int i=0;i<length;i++)
{
vector <string> splitted = split(c_list[i],';');
tempFilter.size=splitted.size();
tempFilter.count=0;
tempFilter.filtid= i ;
filters.push_back(tempFilter);
}
return filters;
}
//End of split function
/*vector<string> extractNames(vector<string> c_list){
vector <string> names;
for(int i=0;i<c_list.size();i++){
string filter = c_list[i];
vector <string> constraints = split(filter,';');
for(int j=0;j<constraints.size();j++){
string constraint = constraints[j];
if(!contains(constraint,names))
names.push_back();
}
}
}*/
int getIntFromString(string value)
{
int length = value.size();
int val=0;
for(int i=0;i<length;i++)
{
int charval = value[i]-48;
val =val *10 +charval;
}
return val;
}
void addName(string name,vector<names> &namesArray)
{
if(contains(name,namesArray))
return;
names newName;
newName.name=name;
newName.count=1;
newName.rowid=namesArray.size();
}
vector<constraint> getConstraints(vector <string> c_list)
{
int length = c_list.size();
vector<constraint> constraints;
for(int i=0;i<length;i++)
{
constraint tempConstraint;
vector<string> splitted = split(c_list[i],';');
int splitLength = splitted.size();
for(int j=0;j<splitLength;j++)
{
string eachConstraint= splitted[j];
string name,op,value;
int filterid=i;
int constraintLength = eachConstraint.size();
for(int k=0;k<constraintLength;k++)
{
char temp = eachConstraint[k];
if((temp>=65&&temp<=90)||(temp>=97&&temp<=122))
name+=temp;
else if(temp>=48&&temp<=57)
value+=temp;
else
op+=temp;
}
int attrValue = getIntFromString(value);
tempConstraint.name=name;
tempConstraint.op=op;
tempConstraint.filterid=filterid;
tempConstraint.value=attrValue;
//tempConstraint.rowid=filterid; //modified
constraints.push_back(tempConstraint);
}
}
return constraints;
}
void getSizeC(vector<names> namesArray,int * const sizeC, int * maxVal)
{
int length = namesArray.size();
for(int i=0;i<length;i++)
{
sizeC[i]=namesArray[i].count;
if(sizeC[i]>*maxVal)
*maxVal=sizeC[i];
}
return;
}
void printFilters(vector<filter> filters)
{
int length = filters.size();
for(int i=0;i<length;i++)
{
printf("\nFilterSize:%d FilterCount:%d FiltRowid:%d",filters[i].size,filters[i].count,filters[i].filtid);
}
return;
}
void printConstraints(vector<constraint> constraints)
{
int length = constraints.size();
for(int i=0;i<length;i++)
{
cout<<"\n"<<i<<" : "<<"name "<<constraints[i].name<<" op "<<constraints[i].op<<" filterid "<<constraints[i].filterid;
cout<<" value "<<constraints[i].value;//<<" rowid "<<constraints[i].rowid;
}
return;
}
void printNames(vector<names> namesArray)
{
int length = namesArray.size();
for(int i=0;i<length;i++)
{
cout<<"\n"<<namesArray[i].name<<" "<<namesArray[i].rowid<<" "<<namesArray[i].count;
}
//cout<<"LLLL: "<<length<<endl;
}
vector<names> getNames(vector<constraint> &constraints)
{
vector <names> namesArray;
int length = constraints.size();
for(int i=0;i<length;i++)
{
string attrName = constraints[i].name;
int id=contains(attrName,namesArray);
if(id!=-1)
{
namesArray[id].count++;
constraints[i].rowid=id;
}
else
{
names tempName;
tempName.name=attrName;
tempName.count=1;
tempName.rowid=namesArray.size();
constraints[i].rowid=namesArray.size();
namesArray.push_back(tempName);
}
}
return namesArray;
}
void printInput(vector<input> inputEvents)
{
for(int i=0;i<inputEvents.size();i++)
{
cout<<"\n name "<<inputEvents[i].name<<" op "<<inputEvents[i].op<<" value "<<inputEvents[i].value;
}
}
void setRowIds(vector <constraint> &constraints,vector<names> names)
{
for(int i=0;i<constraints.size();i++)
{
int match=0;
for(int j=0;j<names.size();j++)
{
if(names[j].name==constraints[i].name)
{
constraints[i].rowid=j;
match=1;
break;
}
}
if(match==0)
constraints[i].rowid=101;
}
}
int main()
{
vector<string> c_list , e_list;
float resultTime = 0.0f;
//Events and Constraints reading
ifstream in_stream;
string line;
in_stream.open(EVENTS_FILE);
while(!in_stream.eof())
{
in_stream >> line;
e_list.push_back(line);
}
in_stream.close();
in_stream.open(CONSTRAINT_FILE);
while(!in_stream.eof())
{
in_stream >> line;
c_list.push_back(line);
}
in_stream.close();
c_list.erase(c_list.begin()+c_list.size()-1);
//cout<<*(c_list.begin())<<endl;
e_list.erase(e_list.begin()+e_list.size()-1);
//printStrings(c_list);
//printStrings(e_list);
int ii=0,cSize=c_list.size(),index=0;
//int results[cSize];
//for(int k=0;k<cSize;k++)results[k]=0;
int MAX_LINES=200000;
cout<<"Filters Size : "<<cSize <<endl;
cout<<"Events Size : "<<e_list.size()<<endl;
while(true)
{
if(ii>=cSize)
break;
vector <string> cc_list;
for(;(ii)<((index+1)*MAX_LINES)&&((ii)<cSize);ii++)
{
cc_list.push_back(c_list[ii]);
}
vector<constraint> constraints = getConstraints(cc_list);
vector<filter> filters = getFilters(cc_list,constraints);
vector <names> namesArray = getNames(constraints);
//cout<<"\n\n"<<"Filters";
//printFilters(filters);
//cout<<"\n\n"<<"Constraints";
//printConstraints(constraints);
/*cout<<"\n\n"<<"Names Array";
printNames(namesArray);//name , rowid , count 100;*/
/*cout<<"\n\n"<<"SizeC";
for(int i=0;i<namesArray.size();i++)
cout<<"\n"<<sizeC[i];*/
//Start of cuda code
//cout<<"GridSize"<<gridSize.y;
//cout<<"MaxC: "<<namesArray.size();
hipEvent_t start_event, stop_event;
float time=0.0f, timefinal=0.0f;
int ngpus;
hipGetDeviceCount(&ngpus);
cout<<"CUDA-capable devices : "<<ngpus<<endl;
/*int **sizeC = (int **)malloc(sizeof(int *)* ngpus) ;
int *maxConstraints = new int [ngpus];
for(int device = 0 ; device < ngpus ; device++)
{
sizeC[device] =(int*)malloc(sizeof(int)*namesArray.size());
getSizeC(namesArray,sizeC[device],&maxConstraints[device]);
}
int maxC = 2145 ;*/
int *sizeC , maxConstraints=0;
sizeC = new int[namesArray.size()];
getSizeC(namesArray,sizeC,&maxConstraints);
/*cout<<"\n\n"<<"SizeC";
for(int i=0;i<namesArray.size();i++)
{
cout<<namesArray.at(i);
cout<<":"<<sizeC[i]<<endl;
}*/
/*for(int i = 0 ; i<ngpus ; i++)
{
cout<<"maxCons : "<<maxConstraints[i]<<endl;
if(maxC < maxConstraints[i] )
maxC = maxConstraints[i];
}*/
//cout<<"maxC : "<<maxConstraints<<endl; 2145
dim3 dimBlock;
dimBlock.x=512;
dimBlock.y=1;
dim3 gridSize;
gridSize.x = (maxConstraints/dimBlock.x)+1;
gridSize.y = namesArray.size();
int separatedConstraintsSize = maxConstraints*namesArray.size();
vector <constraint> separatedConstraints;
separatedConstraints.reserve(separatedConstraintsSize);
int constraintsCounters[namesArray.size()];
for(int i=0;i<namesArray.size();i++)
constraintsCounters[i]=0;
constraint dummyConstraint;
dummyConstraint.name="*";
dummyConstraint.filterid=1;
dummyConstraint.op="=";
dummyConstraint.value=100;
dummyConstraint.rowid = 0;
for(int i=0;i<separatedConstraintsSize;i++)
separatedConstraints.push_back(dummyConstraint);
//logic to form namevector
for(int i=0;i<constraints.size();i++)
{
constraint tempConstarint=constraints[i];
separatedConstraints[tempConstarint.rowid*maxConstraints+constraintsCounters[tempConstarint.rowid]]=tempConstarint;
constraintsCounters[tempConstarint.rowid]++;
}
//Done with constraints
/*cout<<"\n\n"<<"Sepatrated Constarints";
printConstraints(separatedConstraints);*/
//Copy Constraints to struct array
//newConstraint *allConstraints; //[separatedConstraintsSize];
newConstraint **allConstraints = (newConstraint **)malloc(sizeof(newConstraint *) * ngpus);
//allConstraints = new newConstraint[separatedConstraintsSize];
int p = 0;
for(int device = 0 ; device < ngpus ; device++)
allConstraints[device] = new newConstraint[(separatedConstraintsSize)/ngpus];
for(int device = 0 ; device < ngpus ; device++)
{
if(p < separatedConstraintsSize/ngpus)
for( ; p < separatedConstraintsSize/ngpus ; p++)
{
int size=separatedConstraints[p].name.length();
int j=0;
for(;j<size;j++){
allConstraints[device][p].name[j]=separatedConstraints[p].name[j];
}
allConstraints[device][p].name[j]='\0';
j=0;
int size1=separatedConstraints[p].op.length();
for(;j<size1;j++){
allConstraints[device][p].op[j]=separatedConstraints[p].op[j];
}
allConstraints[device][p].op[j]='\0';
allConstraints[device][p].value=separatedConstraints[p].value;
allConstraints[device][p].filterid=separatedConstraints[p].filterid;
allConstraints[device][p].rowid=separatedConstraints[p].rowid;
}
else
{
int i = 0;
//cout<<p<<endl;exit(1);
for( ; p < separatedConstraintsSize; p++ )
{
int size=separatedConstraints[p].name.length();
int j=0;
for(;j<size;j++){
allConstraints[device][i].name[j]=separatedConstraints[p].name[j];
}
//cout<<p<<" : "<<allConstraints[device][i].name<<"";
allConstraints[device][i].name[j]='\0';
j=0;
int size1=separatedConstraints[p].op.length();
for(;j<size1;j++){
allConstraints[device][i].op[j]=separatedConstraints[p].op[j];
}
allConstraints[device][i].op[j]='\0';
allConstraints[device][i].value=separatedConstraints[p].value;
allConstraints[device][i].filterid=separatedConstraints[p].filterid;
allConstraints[device][i].rowid=separatedConstraints[p].rowid;
i++;
}
}
}
/*for(int i = 0 ; i < separatedConstraintsSize/ngpus ; i++)
{
cout<<i<<">"<<allConstraints[1][i].name<<" "<<allConstraints[1][i].filterid<<endl;
}
exit(1);*/
/*int x =0;
cout<<"\n\n"<<"Names of separated cs ";
for(int device = 0 ; device < ngpus ; device++)
{
if(x < separatedConstraintsSize/ngpus-1)
{
for( ; x < separatedConstraintsSize/ngpus ; x++)
{
cout<<x<<" : "<<allConstraints[device][x].name<<" ";
}
cout<<endl<<endl;
}
else
//while(x>separatedConstraintsSize/ngpus)
{
for( ; x < separatedConstraintsSize ; x++)
cout<<x<<" : "<<allConstraints[device][x].name<<endl;
x++;
}
}
exit(1);*/
//End of constraint copy to array
//newConstraint **d_allConstraints;
newConstraint **d_allConstraints = (newConstraint **)malloc(sizeof(newConstraint *) * ngpus);
int **d_sizeC =(int**)malloc(sizeof(int *) * ngpus) ;
//Stream for asynchronous command execution
hipStream_t *stream = (hipStream_t *)malloc(sizeof(hipStream_t) * ngpus);//non-default stream is declared.
//copy filters to struct array
//filter *allFilters;
//allFilters = new filter[filters.size()*ngpus];
/*filter **resultFilt =(filter **)malloc(sizeof(filter *)*ngpus);
for(int ndev = 0 ; ndev < ngpus ; ndev++)
{
resultFilt[ndev] = new filter[filters.size()/ngpus];
}*/
//filter *resultFilt;
//resultFilt = new filter[sizeof(filter) * filters.size()];
/*cout<<"\n\n"<<"Filters Size";
for(int device = 0; device < ngpus ; device++ )
for(int i=0;i<filters.size();i++)
{
cout<<i<<":"<<allFilters[device][i].count<<" "<<allFilters[device][i].size<<" "<<allFilters[device][i].filtid<<endl;;
}
exit(1);*/
//End of copy filters to struct array
filter **d_allFilters = (filter **)malloc(sizeof(filter *) * ngpus);// * filters.size());
for(int device = 0 ; device < ngpus ;device++)
{
hipSetDevice(device);
hipMalloc((void **) &d_allConstraints[device], (separatedConstraintsSize*sizeof(struct newConstraint))/ngpus);
(hipMalloc((void **) &d_sizeC[device],namesArray.size()*sizeof(int)));
//CHECK(hipMalloc((void **)&resultFilt[device],filters.size()/ngpus));
//CHECK(cudaMemSet(resultFilt[device], 0 , sizeof(filter)));
(hipStreamCreate(&stream[device]));
// CHECK(hipHostMalloc((void **)&allFilters[device],filters.size()));
}
for(int ndevice = 0 ; ndevice < ngpus ; ndevice++)
{
hipSetDevice(ndevice);
(hipMemcpyAsync(d_allConstraints[ndevice],allConstraints[ndevice],separatedConstraintsSize/ngpus*sizeof(struct newConstraint), hipMemcpyHostToDevice, stream[ndevice]));
(hipMemcpyAsync(d_sizeC[ndevice],sizeC,namesArray.size()*sizeof(int),hipMemcpyHostToDevice,stream[ndevice]));
}
//Input part starts
input dummyInput;
dummyInput.name="*";
dummyInput.op="=";
dummyInput.value=100;
int *finalres;
finalres = new int[filters.size()]; //for saving results
//for(int i=0;i<e_list.size();i++)
for(int i = 0 ; i <3 ; i++)
{
vector <string> inputString;
inputString.push_back(e_list[i]);
vector <constraint> inputConstraints = getConstraints(inputString);//event separating
//printConstraints(inputConstraints);
//exit(1);
setRowIds(inputConstraints,namesArray);
vector <input> inputEvents;
for(int j=0;j<namesArray.size();j++)
{
inputEvents.push_back(dummyInput);
}
for(int j=0;j<inputConstraints.size();j++)
{
input tempInput;
tempInput.name=inputConstraints[j].name;
tempInput.op=inputConstraints[j].op;
tempInput.value=inputConstraints[j].value;
inputEvents[inputConstraints[j].rowid]=tempInput;
}
//printInput(inputEvents);
//exit(1);
newInput *allInputs ;//[inputEvents.size()] ;
allInputs = new newInput[inputEvents.size()];
//newInput **allInputs = (newInput **)malloc(sizeof(newInput *)*ngpus ) ; // * inputEvents.size());
// allInputs[ndevice] = new newInput[inputEvents.size()];
for(int jj=0;jj<inputEvents.size();jj++)
{
int size=inputEvents[jj].name.length();
int j=0;
for(;j<size;j++){
allInputs[jj].name[j]=inputEvents[jj].name[j];
}
allInputs[jj].name[j]='\0';
j=0;
int size1=inputEvents[jj].op.length();
for(;j<size1;j++){
allInputs[jj].op[j]=inputEvents[jj].op[j];
}
allInputs[jj].op[j]='\0';
allInputs[jj].value=inputEvents[jj].value;
}
/*cout<<"\n\n"<<"InputEvents";
static int xxx = 0;
//for(int d = 0 ;d < ngpus ; d++)
for(int i=0;i<inputEvents.size();i++)
{
cout<<i<<":"<<allInputs[i].name<<" "<<allInputs[i].op<<" "<<allInputs[i].value<<endl ;
xxx++;
}
cout<<xxx<<endl;*/
//exit(1);
//continue;
filter **allFilters = (filter **)malloc(sizeof(filter *) * ngpus);
for(int device = 0 ; device < ngpus ; device++)
{
allFilters[device] = new filter[filters.size()];
for(int i=0 ; i < filters.size() ; i++)
{
allFilters[device][i].size=filters[i].size;
allFilters[device][i].count=filters[i].count;
allFilters[device][i].filtid=0;
}
}
newInput **d_allInputs = (newInput **)malloc(sizeof(newInput *) * ngpus);
for(int ndevice = 0 ; ndevice < ngpus ;ndevice++)
{
CHECK(hipSetDevice(ndevice));
hipDeviceProp_t devProp;
hipGetDeviceProperties(&devProp , ndevice);
cout<<"Device "<<devProp.name<< " has compute capability : "<< devProp.major<<"."<<devProp.minor<<endl;
(hipMalloc((void **)&d_allFilters[ndevice],(filters.size()*sizeof(struct filter))));
(hipMemcpy(d_allFilters[ndevice], allFilters[ndevice], filters.size()*sizeof(struct filter), hipMemcpyHostToDevice));
(hipMalloc((void **)&d_allInputs[ndevice], inputEvents.size()*sizeof(struct newInput)));
(hipMemcpyAsync(d_allInputs[ndevice], allInputs, inputEvents.size()*sizeof(struct newInput), hipMemcpyHostToDevice,stream[ndevice]));
hipEventCreate(&start_event) ;
hipEventCreate(&stop_event) ;
hipEventRecord(start_event, stream[ndevice]);
hipLaunchKernelGGL(( match), dim3(gridSize),dim3(dimBlock),0,stream[ndevice], d_allConstraints[ndevice], d_sizeC[ndevice], d_allFilters[ndevice], d_allInputs[ndevice], maxConstraints);
//match<<<(separatedConstraintsSize/ngpus)/THREADS_PER_BLOCK,THREADS_PER_BLOCK,0,stream[ndevice]>>>(d_allConstraints[ndevice], d_sizeC[ndevice], d_allFilters[ndevice], d_allInputs[ndevice], maxConstraints);
(hipDeviceSynchronize());
//(hipStreamQuery(stream[ndevice]));
//timefinal+=time;
hipEventRecord(stop_event, stream[ndevice]);
hipEventSynchronize( stop_event);
hipEventElapsedTime( &time, start_event, stop_event );
hipEventDestroy( start_event ); // cleanup
hipEventDestroy( stop_event );
timefinal+=time;
//printf("\ndone and it took: %f milliseconds\n", time);
//we want count of filter
(hipMemcpy(allFilters[ndevice], d_allFilters[ndevice], filters.size()*sizeof(struct filter), hipMemcpyDeviceToHost));
//CHECK(hipStreamSynchronize(stream(ndevice)));
(hipStreamQuery(stream[ndevice]));
//if(allFilters[ndevice][i].count == allFilters[ndevice][i].size)
//cout<<e_list.at(i)<<" : "<<allFilters[ndevice][i].count<<endl;
//cout<<e_list.at(i)<<" : "<<allFilters[ndevice][i].count<<endl;
cout<<"Time Required : "<<time<<endl;
}
//for(int device = 0 ; device < ngpus ; device++)
/*{
for(int p = 0; p <filters.size() ; p++)
if(allFilters[0][p].count > 0 || allFilters[1][p].count > 0 )
cout<<p<<":"<<allFilters[0][p].count<<" "<<allFilters[1][p].count<<"\n";
cout<<endl;
}*/
cout<<endl<<e_list.at(i)<<endl;
// exit(1);
/*int count= 0 ;
//for(int ndevice = 0 ; ndevice < ngpus; ndevice++)
for(int x = 0 ; x < filters.size() ; x++)
{
int counter = 0 ;
counter = allFilters[0][x].count + allFilters[1][x].count ;
if(counter == allFilters[0][x].size)
{
cout<<x+1 <<" : "<<e_list.at(i)<<" count : "<<counter<<endl;
count++;
}
}
cout<<"count : "<<count<<endl;*/
// int counter = 0 ;
// counter = allFilters[0][i].count + allFilters[1][i].count;
// allFilters[0][i].count =counter;
// allFilters[1][i].count =counter;
/*for(int dev = 0 ; dev < ngpus ;dev++)
{
for(int y = 0 ; y < filters.size() ; y++)
if(allFilters[dev][y].count == allFilters[dev][y].size)
cout<<e_list.at(i)<<" : "<<allFilters[dev][y].count<<endl;
cout<<endl;
}*/
for(int ndevice = 0 ;ndevice < ngpus ; ndevice++)
{
hipSetDevice(ndevice);
hipStreamSynchronize(stream[ndevice]);
}
filter *allFilt;
allFilt = new filter[filters.size()];
int p;
for(p = 0; p < filters.size() ; p++)
{
allFilt[p].count = allFilters[0][p].count + allFilters[1][p].count;
}
//cout<<p<<": "<<allFilt[6011].count<<endl;
/*for(int p = 0 ; p < 6013; p++)
cout<<p<<":"<<allFilters[0][p].count<<" "<<allFilters[1][p].count<<" "<<allFilt[p].count<<endl;
cout<<endl<<endl;*/
//exit(1);
int *results;
results = new int[cSize];
//for(int ndevice = 0 ; ndevice < ngpus ; ndevice++)
{
for(int q = 0; q < filters.size();q++)
{
//cout<<allFilters[ndevice][i].count<<endl;
//continue;
int res=allFilt[q].count/allFilters[0][q].size;
finalres[q]+=res;
results[q]+=res;
allFilt[q].count=0;
//printf("%d \n ",finalres[i]);
}
}
int count1 = 0;
for(int i=0;i<cSize;i++)
{
if(results[i] >0)
{
printf("\nResult-> %d %d ",i,results[i]);
count1++;
}
}
for(int ndevice = 0 ;ndevice < ngpus ; ndevice++)
{
hipSetDevice(ndevice);
hipFree(d_allInputs[ndevice]);
hipFree(d_allFilters[ndevice]);
}
delete[] allInputs ;
for(int dev =0 ; dev < ngpus ; dev++)
delete[] allFilters[dev];
delete[] allFilters;
delete[] allFilt;
delete[] results;
cout<<"count is :"<<count1<<endl;
//exit(1);
/* for(int dev = 0 ; dev < ngpus ; dev++)
{
for(int x = 0 ; x < filters.size() ; x++)
{
if(allFilters[dev][x].size == allFilters[dev][x].count)
{
cout<<e_list.at(i);
cout<<" : "<<allFilters[dev][x].filtid<<": "<<allFilters[dev][x].count<<endl;
}
}
cout<<endl<<endl;
}*/
}
//End of loop for input
index++;
//printf("\ndone and it took: %f milliseconds\n", timefinal);
resultTime+=timefinal;
cout<<"\nresultTime : "<<resultTime<<endl;
//int count = 0;
/*for(int i=0;i<cSize;i++)
{
if(results[i] >0)
{
printf("\nResult-> %d %d ",i,results[i]);
count++;
}
}*/
//cout<<"\n Count = "<<count<<endl;
for(int device = 0 ; device < ngpus ; device++)
{
hipSetDevice(device);
hipFree(d_allConstraints[device]);
hipFree(d_sizeC[device]);
}
exit(1);
}
//printf("\ndone and it took: %f milliseconds\n",resultTime);
/*int count = 0;
for(int i=0;i<cSize;i++)
{
if(results[i] >0)
{
printf("\nResult-> %d %d ",i,results[i]);
count++;
}
}
cout<<"\n Count = "<<count<<endl;
cout<<" done and it took:"<< resultTime<<" milliseconds \n";
*/
return 0;
}
| fc0dc12f9b9f5e12b0a033be9d87ec76941d9606.cu | #include "common/common.h"
#include <iostream>
#include <fstream>
#include <stdlib.h>
#include <iostream>
#include <fstream>
#include <stdlib.h>
#include <ctype.h>
#include <time.h>
#include <stdio.h>
#include<string.h>
#include <iostream>
#include <fstream>
#include <vector>
#include <algorithm>
#include <sstream>
#include<cuda.h>
#include<cuda_runtime.h>
#include<cstring>
using namespace std;
#define THREADS_PER_BLOCK 512
struct constraint
{
string name;
string op;
int value;
int filterid;
int rowid;
};
struct newConstraint
{
char name[5];
char op[5];
int value;
int filterid;
int rowid;
};
struct filter
{
int size;
unsigned int count;
int filtid;
};
struct names
{
string name;
int rowid;
int count;
};
struct input
{
string name;
string op;
int value;
};
struct newInput
{
char name[5];
char op[5];
int value;
};
//Start of Cuda Kernel match
__global__ void match(newConstraint *constraints, int *sizec, filter *filters, newInput *inputs, int nlines)
{
int x = blockIdx.x*blockDim.x+threadIdx.x;
int y = blockIdx.y;
__shared__ char name[5];
__shared__ int inputValue;
__shared__ int rowid;
/*if(x==0&&y==0){
printf("\nsize:%d",sizec[0]);
printf("\nsize:%d",sizec[1]);
printf("\nsize:%d",sizec[2]);
}*/
if(threadIdx.x==0)
{
int i=0;
while(inputs[y].name[i]!='\0'){
name[i]=inputs[y].name[i];
i++;
}
name[i]='\0';
inputValue=inputs[y].value;
rowid=y;//
}
__syncthreads();
//printf(" %d",r);
if(x>=sizec[rowid])
return;
if(name[0]=='*')
return;
//printf("\n%d %d",blockIdx.y,threadIdx.x);
//data of constraints
int z=nlines*rowid+x;
int valsub=constraints[z].value;
//char constraintName[5]=constraints[nlines*rowid+x].name;
char op1[5];
int i=0;
while(constraints[z].op[i]!='\0'){
op1[i]=constraints[z].op[i];
i++;
}
op1[i]='\0';
int filterid=constraints[z].filterid;
//data of shared input
int valpub=inputValue;
/*if(threadIdx.x==0&&blockIdx.y==0)
printf("\n%d %s %d %s",inputValue,name,valpub,op1);
*/
int satisfied = 0;
if(op1[0]=='<' && op1[1]=='=')
{
if(valpub <= valsub)
satisfied =1;
}
else if(op1[0]=='<')
{
if(valpub < valsub)
satisfied =1;
}
else if(op1[0]=='>' && op1[1]=='=')
{
if(valpub >= valsub)
satisfied =1;
}
else if(op1[0]=='>')
{
if(valpub > valsub)
satisfied =1;
}
else if(op1[0]=='=')
{
if(valpub == valsub)
satisfied =1;
}
else if(op1[0]=='!' && op1[1]=='=')
{
if(valpub != valsub)
satisfied =1;
}
if(satisfied==0)
return;
atomicInc(&filters[filterid].count, filters[filterid].size+1);
//filters[filterid].count++;
//__syncthreads();
//filters[filterid].filtid = filterid+1;
//int q = 0;
//for( ; q < filters.size() ; q++)
//if(filters[filterid].count == filters[filterid].size)
}
//End of kernel
/*char CONSTRAINT_FILE[] = "subs_20000.txt";
char EVENTS_FILE [] = "pubs_1000.txt";*/
char CONSTRAINT_FILE[] = "b.txt";
//CONSTRAINT_FILE =new char["1st_1_lack.txt";
char EVENTS_FILE [] = "a.txt";
int contains(string line, vector<names> namesArray)
{
int length = namesArray.size();
for(int i=0;i<length;i++)
{
names name = namesArray[i];
if(name.name==line)
{
return i;
}
}
return -1;
}
void printStrings(vector<string> listt)
{
int length = listt.size();
for(int i=0;i<length;i++)
{
cout<<"\n"<<listt[i];
}
return;
}
//Functions to split filter by ';'
std::vector<std::string> &split(const std::string &s, char delim,std::vector<std::string> &elems) {
std::stringstream ss(s);
std::string item;
while (std::getline(ss, item, delim)) {
elems.push_back(item);
}
return elems;
}
std::vector<std::string> split(const std::string &s, char delim)
{
std::vector<std::string> elems;
split(s, delim, elems);
return elems;
}
vector<filter> getFilters(vector <string> c_list , vector<constraint> constraints)
{
vector<filter> filters;
int length = c_list.size();
filter tempFilter;
for(int i=0;i<length;i++)
{
vector <string> splitted = split(c_list[i],';');
tempFilter.size=splitted.size();
tempFilter.count=0;
tempFilter.filtid= i ;
filters.push_back(tempFilter);
}
return filters;
}
//End of split function
/*vector<string> extractNames(vector<string> c_list){
vector <string> names;
for(int i=0;i<c_list.size();i++){
string filter = c_list[i];
vector <string> constraints = split(filter,';');
for(int j=0;j<constraints.size();j++){
string constraint = constraints[j];
if(!contains(constraint,names))
names.push_back();
}
}
}*/
int getIntFromString(string value)
{
int length = value.size();
int val=0;
for(int i=0;i<length;i++)
{
int charval = value[i]-48;
val =val *10 +charval;
}
return val;
}
void addName(string name,vector<names> &namesArray)
{
if(contains(name,namesArray))
return;
names newName;
newName.name=name;
newName.count=1;
newName.rowid=namesArray.size();
}
vector<constraint> getConstraints(vector <string> c_list)
{
int length = c_list.size();
vector<constraint> constraints;
for(int i=0;i<length;i++)
{
constraint tempConstraint;
vector<string> splitted = split(c_list[i],';');
int splitLength = splitted.size();
for(int j=0;j<splitLength;j++)
{
string eachConstraint= splitted[j];
string name,op,value;
int filterid=i;
int constraintLength = eachConstraint.size();
for(int k=0;k<constraintLength;k++)
{
char temp = eachConstraint[k];
if((temp>=65&&temp<=90)||(temp>=97&&temp<=122))
name+=temp;
else if(temp>=48&&temp<=57)
value+=temp;
else
op+=temp;
}
int attrValue = getIntFromString(value);
tempConstraint.name=name;
tempConstraint.op=op;
tempConstraint.filterid=filterid;
tempConstraint.value=attrValue;
//tempConstraint.rowid=filterid; //modified
constraints.push_back(tempConstraint);
}
}
return constraints;
}
void getSizeC(vector<names> namesArray,int * const sizeC, int * maxVal)
{
int length = namesArray.size();
for(int i=0;i<length;i++)
{
sizeC[i]=namesArray[i].count;
if(sizeC[i]>*maxVal)
*maxVal=sizeC[i];
}
return;
}
void printFilters(vector<filter> filters)
{
int length = filters.size();
for(int i=0;i<length;i++)
{
printf("\nFilterSize:%d FilterCount:%d FiltRowid:%d",filters[i].size,filters[i].count,filters[i].filtid);
}
return;
}
void printConstraints(vector<constraint> constraints)
{
int length = constraints.size();
for(int i=0;i<length;i++)
{
cout<<"\n"<<i<<" : "<<"name "<<constraints[i].name<<" op "<<constraints[i].op<<" filterid "<<constraints[i].filterid;
cout<<" value "<<constraints[i].value;//<<" rowid "<<constraints[i].rowid;
}
return;
}
void printNames(vector<names> namesArray)
{
int length = namesArray.size();
for(int i=0;i<length;i++)
{
cout<<"\n"<<namesArray[i].name<<" "<<namesArray[i].rowid<<" "<<namesArray[i].count;
}
//cout<<"LLLL: "<<length<<endl;
}
vector<names> getNames(vector<constraint> &constraints)
{
vector <names> namesArray;
int length = constraints.size();
for(int i=0;i<length;i++)
{
string attrName = constraints[i].name;
int id=contains(attrName,namesArray);
if(id!=-1)
{
namesArray[id].count++;
constraints[i].rowid=id;
}
else
{
names tempName;
tempName.name=attrName;
tempName.count=1;
tempName.rowid=namesArray.size();
constraints[i].rowid=namesArray.size();
namesArray.push_back(tempName);
}
}
return namesArray;
}
void printInput(vector<input> inputEvents)
{
for(int i=0;i<inputEvents.size();i++)
{
cout<<"\n name "<<inputEvents[i].name<<" op "<<inputEvents[i].op<<" value "<<inputEvents[i].value;
}
}
void setRowIds(vector <constraint> &constraints,vector<names> names)
{
for(int i=0;i<constraints.size();i++)
{
int match=0;
for(int j=0;j<names.size();j++)
{
if(names[j].name==constraints[i].name)
{
constraints[i].rowid=j;
match=1;
break;
}
}
if(match==0)
constraints[i].rowid=101;
}
}
int main()
{
vector<string> c_list , e_list;
float resultTime = 0.0f;
//Events and Constraints reading
ifstream in_stream;
string line;
in_stream.open(EVENTS_FILE);
while(!in_stream.eof())
{
in_stream >> line;
e_list.push_back(line);
}
in_stream.close();
in_stream.open(CONSTRAINT_FILE);
while(!in_stream.eof())
{
in_stream >> line;
c_list.push_back(line);
}
in_stream.close();
c_list.erase(c_list.begin()+c_list.size()-1);
//cout<<*(c_list.begin())<<endl;
e_list.erase(e_list.begin()+e_list.size()-1);
//printStrings(c_list);
//printStrings(e_list);
int ii=0,cSize=c_list.size(),index=0;
//int results[cSize];
//for(int k=0;k<cSize;k++)results[k]=0;
int MAX_LINES=200000;
cout<<"Filters Size : "<<cSize <<endl;
cout<<"Events Size : "<<e_list.size()<<endl;
while(true)
{
if(ii>=cSize)
break;
vector <string> cc_list;
for(;(ii)<((index+1)*MAX_LINES)&&((ii)<cSize);ii++)
{
cc_list.push_back(c_list[ii]);
}
vector<constraint> constraints = getConstraints(cc_list);
vector<filter> filters = getFilters(cc_list,constraints);
vector <names> namesArray = getNames(constraints);
//cout<<"\n\n"<<"Filters";
//printFilters(filters);
//cout<<"\n\n"<<"Constraints";
//printConstraints(constraints);
/*cout<<"\n\n"<<"Names Array";
printNames(namesArray);//name , rowid , count 100;*/
/*cout<<"\n\n"<<"SizeC";
for(int i=0;i<namesArray.size();i++)
cout<<"\n"<<sizeC[i];*/
//Start of cuda code
//cout<<"GridSize"<<gridSize.y;
//cout<<"MaxC: "<<namesArray.size();
cudaEvent_t start_event, stop_event;
float time=0.0f, timefinal=0.0f;
int ngpus;
cudaGetDeviceCount(&ngpus);
cout<<"CUDA-capable devices : "<<ngpus<<endl;
/*int **sizeC = (int **)malloc(sizeof(int *)* ngpus) ;
int *maxConstraints = new int [ngpus];
for(int device = 0 ; device < ngpus ; device++)
{
sizeC[device] =(int*)malloc(sizeof(int)*namesArray.size());
getSizeC(namesArray,sizeC[device],&maxConstraints[device]);
}
int maxC = 2145 ;*/
int *sizeC , maxConstraints=0;
sizeC = new int[namesArray.size()];
getSizeC(namesArray,sizeC,&maxConstraints);
/*cout<<"\n\n"<<"SizeC";
for(int i=0;i<namesArray.size();i++)
{
cout<<namesArray.at(i);
cout<<":"<<sizeC[i]<<endl;
}*/
/*for(int i = 0 ; i<ngpus ; i++)
{
cout<<"maxCons : "<<maxConstraints[i]<<endl;
if(maxC < maxConstraints[i] )
maxC = maxConstraints[i];
}*/
//cout<<"maxC : "<<maxConstraints<<endl; 2145
dim3 dimBlock;
dimBlock.x=512;
dimBlock.y=1;
dim3 gridSize;
gridSize.x = (maxConstraints/dimBlock.x)+1;
gridSize.y = namesArray.size();
int separatedConstraintsSize = maxConstraints*namesArray.size();
vector <constraint> separatedConstraints;
separatedConstraints.reserve(separatedConstraintsSize);
int constraintsCounters[namesArray.size()];
for(int i=0;i<namesArray.size();i++)
constraintsCounters[i]=0;
constraint dummyConstraint;
dummyConstraint.name="*";
dummyConstraint.filterid=1;
dummyConstraint.op="=";
dummyConstraint.value=100;
dummyConstraint.rowid = 0;
for(int i=0;i<separatedConstraintsSize;i++)
separatedConstraints.push_back(dummyConstraint);
//logic to form namevector
for(int i=0;i<constraints.size();i++)
{
constraint tempConstarint=constraints[i];
separatedConstraints[tempConstarint.rowid*maxConstraints+constraintsCounters[tempConstarint.rowid]]=tempConstarint;
constraintsCounters[tempConstarint.rowid]++;
}
//Done with constraints
/*cout<<"\n\n"<<"Sepatrated Constarints";
printConstraints(separatedConstraints);*/
//Copy Constraints to struct array
//newConstraint *allConstraints; //[separatedConstraintsSize];
newConstraint **allConstraints = (newConstraint **)malloc(sizeof(newConstraint *) * ngpus);
//allConstraints = new newConstraint[separatedConstraintsSize];
int p = 0;
for(int device = 0 ; device < ngpus ; device++)
allConstraints[device] = new newConstraint[(separatedConstraintsSize)/ngpus];
for(int device = 0 ; device < ngpus ; device++)
{
if(p < separatedConstraintsSize/ngpus)
for( ; p < separatedConstraintsSize/ngpus ; p++)
{
int size=separatedConstraints[p].name.length();
int j=0;
for(;j<size;j++){
allConstraints[device][p].name[j]=separatedConstraints[p].name[j];
}
allConstraints[device][p].name[j]='\0';
j=0;
int size1=separatedConstraints[p].op.length();
for(;j<size1;j++){
allConstraints[device][p].op[j]=separatedConstraints[p].op[j];
}
allConstraints[device][p].op[j]='\0';
allConstraints[device][p].value=separatedConstraints[p].value;
allConstraints[device][p].filterid=separatedConstraints[p].filterid;
allConstraints[device][p].rowid=separatedConstraints[p].rowid;
}
else
{
int i = 0;
//cout<<p<<endl;exit(1);
for( ; p < separatedConstraintsSize; p++ )
{
int size=separatedConstraints[p].name.length();
int j=0;
for(;j<size;j++){
allConstraints[device][i].name[j]=separatedConstraints[p].name[j];
}
//cout<<p<<" : "<<allConstraints[device][i].name<<"";
allConstraints[device][i].name[j]='\0';
j=0;
int size1=separatedConstraints[p].op.length();
for(;j<size1;j++){
allConstraints[device][i].op[j]=separatedConstraints[p].op[j];
}
allConstraints[device][i].op[j]='\0';
allConstraints[device][i].value=separatedConstraints[p].value;
allConstraints[device][i].filterid=separatedConstraints[p].filterid;
allConstraints[device][i].rowid=separatedConstraints[p].rowid;
i++;
}
}
}
/*for(int i = 0 ; i < separatedConstraintsSize/ngpus ; i++)
{
cout<<i<<">"<<allConstraints[1][i].name<<" "<<allConstraints[1][i].filterid<<endl;
}
exit(1);*/
/*int x =0;
cout<<"\n\n"<<"Names of separated cs ";
for(int device = 0 ; device < ngpus ; device++)
{
if(x < separatedConstraintsSize/ngpus-1)
{
for( ; x < separatedConstraintsSize/ngpus ; x++)
{
cout<<x<<" : "<<allConstraints[device][x].name<<" ";
}
cout<<endl<<endl;
}
else
//while(x>separatedConstraintsSize/ngpus)
{
for( ; x < separatedConstraintsSize ; x++)
cout<<x<<" : "<<allConstraints[device][x].name<<endl;
x++;
}
}
exit(1);*/
//End of constraint copy to array
//newConstraint **d_allConstraints;
newConstraint **d_allConstraints = (newConstraint **)malloc(sizeof(newConstraint *) * ngpus);
int **d_sizeC =(int**)malloc(sizeof(int *) * ngpus) ;
//Stream for asynchronous command execution
cudaStream_t *stream = (cudaStream_t *)malloc(sizeof(cudaStream_t) * ngpus);//non-default stream is declared.
//copy filters to struct array
//filter *allFilters;
//allFilters = new filter[filters.size()*ngpus];
/*filter **resultFilt =(filter **)malloc(sizeof(filter *)*ngpus);
for(int ndev = 0 ; ndev < ngpus ; ndev++)
{
resultFilt[ndev] = new filter[filters.size()/ngpus];
}*/
//filter *resultFilt;
//resultFilt = new filter[sizeof(filter) * filters.size()];
/*cout<<"\n\n"<<"Filters Size";
for(int device = 0; device < ngpus ; device++ )
for(int i=0;i<filters.size();i++)
{
cout<<i<<":"<<allFilters[device][i].count<<" "<<allFilters[device][i].size<<" "<<allFilters[device][i].filtid<<endl;;
}
exit(1);*/
//End of copy filters to struct array
filter **d_allFilters = (filter **)malloc(sizeof(filter *) * ngpus);// * filters.size());
for(int device = 0 ; device < ngpus ;device++)
{
cudaSetDevice(device);
cudaMalloc((void **) &d_allConstraints[device], (separatedConstraintsSize*sizeof(struct newConstraint))/ngpus);
(cudaMalloc((void **) &d_sizeC[device],namesArray.size()*sizeof(int)));
//CHECK(cudaMalloc((void **)&resultFilt[device],filters.size()/ngpus));
//CHECK(cudaMemSet(resultFilt[device], 0 , sizeof(filter)));
(cudaStreamCreate(&stream[device]));
// CHECK(cudaMallocHost((void **)&allFilters[device],filters.size()));
}
for(int ndevice = 0 ; ndevice < ngpus ; ndevice++)
{
cudaSetDevice(ndevice);
(cudaMemcpyAsync(d_allConstraints[ndevice],allConstraints[ndevice],separatedConstraintsSize/ngpus*sizeof(struct newConstraint), cudaMemcpyHostToDevice, stream[ndevice]));
(cudaMemcpyAsync(d_sizeC[ndevice],sizeC,namesArray.size()*sizeof(int),cudaMemcpyHostToDevice,stream[ndevice]));
}
//Input part starts
input dummyInput;
dummyInput.name="*";
dummyInput.op="=";
dummyInput.value=100;
int *finalres;
finalres = new int[filters.size()]; //for saving results
//for(int i=0;i<e_list.size();i++)
for(int i = 0 ; i <3 ; i++)
{
vector <string> inputString;
inputString.push_back(e_list[i]);
vector <constraint> inputConstraints = getConstraints(inputString);//event separating
//printConstraints(inputConstraints);
//exit(1);
setRowIds(inputConstraints,namesArray);
vector <input> inputEvents;
for(int j=0;j<namesArray.size();j++)
{
inputEvents.push_back(dummyInput);
}
for(int j=0;j<inputConstraints.size();j++)
{
input tempInput;
tempInput.name=inputConstraints[j].name;
tempInput.op=inputConstraints[j].op;
tempInput.value=inputConstraints[j].value;
inputEvents[inputConstraints[j].rowid]=tempInput;
}
//printInput(inputEvents);
//exit(1);
newInput *allInputs ;//[inputEvents.size()] ;
allInputs = new newInput[inputEvents.size()];
//newInput **allInputs = (newInput **)malloc(sizeof(newInput *)*ngpus ) ; // * inputEvents.size());
// allInputs[ndevice] = new newInput[inputEvents.size()];
for(int jj=0;jj<inputEvents.size();jj++)
{
int size=inputEvents[jj].name.length();
int j=0;
for(;j<size;j++){
allInputs[jj].name[j]=inputEvents[jj].name[j];
}
allInputs[jj].name[j]='\0';
j=0;
int size1=inputEvents[jj].op.length();
for(;j<size1;j++){
allInputs[jj].op[j]=inputEvents[jj].op[j];
}
allInputs[jj].op[j]='\0';
allInputs[jj].value=inputEvents[jj].value;
}
/*cout<<"\n\n"<<"InputEvents";
static int xxx = 0;
//for(int d = 0 ;d < ngpus ; d++)
for(int i=0;i<inputEvents.size();i++)
{
cout<<i<<":"<<allInputs[i].name<<" "<<allInputs[i].op<<" "<<allInputs[i].value<<endl ;
xxx++;
}
cout<<xxx<<endl;*/
//exit(1);
//continue;
filter **allFilters = (filter **)malloc(sizeof(filter *) * ngpus);
for(int device = 0 ; device < ngpus ; device++)
{
allFilters[device] = new filter[filters.size()];
for(int i=0 ; i < filters.size() ; i++)
{
allFilters[device][i].size=filters[i].size;
allFilters[device][i].count=filters[i].count;
allFilters[device][i].filtid=0;
}
}
newInput **d_allInputs = (newInput **)malloc(sizeof(newInput *) * ngpus);
for(int ndevice = 0 ; ndevice < ngpus ;ndevice++)
{
CHECK(cudaSetDevice(ndevice));
cudaDeviceProp devProp;
cudaGetDeviceProperties(&devProp , ndevice);
cout<<"Device "<<devProp.name<< " has compute capability : "<< devProp.major<<"."<<devProp.minor<<endl;
(cudaMalloc((void **)&d_allFilters[ndevice],(filters.size()*sizeof(struct filter))));
(cudaMemcpy(d_allFilters[ndevice], allFilters[ndevice], filters.size()*sizeof(struct filter), cudaMemcpyHostToDevice));
(cudaMalloc((void **)&d_allInputs[ndevice], inputEvents.size()*sizeof(struct newInput)));
(cudaMemcpyAsync(d_allInputs[ndevice], allInputs, inputEvents.size()*sizeof(struct newInput), cudaMemcpyHostToDevice,stream[ndevice]));
cudaEventCreate(&start_event) ;
cudaEventCreate(&stop_event) ;
cudaEventRecord(start_event, stream[ndevice]);
match<<<gridSize,dimBlock,0,stream[ndevice]>>>(d_allConstraints[ndevice], d_sizeC[ndevice], d_allFilters[ndevice], d_allInputs[ndevice], maxConstraints);
//match<<<(separatedConstraintsSize/ngpus)/THREADS_PER_BLOCK,THREADS_PER_BLOCK,0,stream[ndevice]>>>(d_allConstraints[ndevice], d_sizeC[ndevice], d_allFilters[ndevice], d_allInputs[ndevice], maxConstraints);
(cudaThreadSynchronize());
//(cudaStreamQuery(stream[ndevice]));
//timefinal+=time;
cudaEventRecord(stop_event, stream[ndevice]);
cudaEventSynchronize( stop_event);
cudaEventElapsedTime( &time, start_event, stop_event );
cudaEventDestroy( start_event ); // cleanup
cudaEventDestroy( stop_event );
timefinal+=time;
//printf("\ndone and it took: %f milliseconds\n", time);
//we want count of filter
(cudaMemcpy(allFilters[ndevice], d_allFilters[ndevice], filters.size()*sizeof(struct filter), cudaMemcpyDeviceToHost));
//CHECK(cudaStreamSynchronize(stream(ndevice)));
(cudaStreamQuery(stream[ndevice]));
//if(allFilters[ndevice][i].count == allFilters[ndevice][i].size)
//cout<<e_list.at(i)<<" : "<<allFilters[ndevice][i].count<<endl;
//cout<<e_list.at(i)<<" : "<<allFilters[ndevice][i].count<<endl;
cout<<"Time Required : "<<time<<endl;
}
//for(int device = 0 ; device < ngpus ; device++)
/*{
for(int p = 0; p <filters.size() ; p++)
if(allFilters[0][p].count > 0 || allFilters[1][p].count > 0 )
cout<<p<<":"<<allFilters[0][p].count<<" "<<allFilters[1][p].count<<"\n";
cout<<endl;
}*/
cout<<endl<<e_list.at(i)<<endl;
// exit(1);
/*int count= 0 ;
//for(int ndevice = 0 ; ndevice < ngpus; ndevice++)
for(int x = 0 ; x < filters.size() ; x++)
{
int counter = 0 ;
counter = allFilters[0][x].count + allFilters[1][x].count ;
if(counter == allFilters[0][x].size)
{
cout<<x+1 <<" : "<<e_list.at(i)<<" count : "<<counter<<endl;
count++;
}
}
cout<<"count : "<<count<<endl;*/
// int counter = 0 ;
// counter = allFilters[0][i].count + allFilters[1][i].count;
// allFilters[0][i].count =counter;
// allFilters[1][i].count =counter;
/*for(int dev = 0 ; dev < ngpus ;dev++)
{
for(int y = 0 ; y < filters.size() ; y++)
if(allFilters[dev][y].count == allFilters[dev][y].size)
cout<<e_list.at(i)<<" : "<<allFilters[dev][y].count<<endl;
cout<<endl;
}*/
for(int ndevice = 0 ;ndevice < ngpus ; ndevice++)
{
cudaSetDevice(ndevice);
cudaStreamSynchronize(stream[ndevice]);
}
filter *allFilt;
allFilt = new filter[filters.size()];
int p;
for(p = 0; p < filters.size() ; p++)
{
allFilt[p].count = allFilters[0][p].count + allFilters[1][p].count;
}
//cout<<p<<": "<<allFilt[6011].count<<endl;
/*for(int p = 0 ; p < 6013; p++)
cout<<p<<":"<<allFilters[0][p].count<<" "<<allFilters[1][p].count<<" "<<allFilt[p].count<<endl;
cout<<endl<<endl;*/
//exit(1);
int *results;
results = new int[cSize];
//for(int ndevice = 0 ; ndevice < ngpus ; ndevice++)
{
for(int q = 0; q < filters.size();q++)
{
//cout<<allFilters[ndevice][i].count<<endl;
//continue;
int res=allFilt[q].count/allFilters[0][q].size;
finalres[q]+=res;
results[q]+=res;
allFilt[q].count=0;
//printf("%d \n ",finalres[i]);
}
}
int count1 = 0;
for(int i=0;i<cSize;i++)
{
if(results[i] >0)
{
printf("\nResult-> %d %d ",i,results[i]);
count1++;
}
}
for(int ndevice = 0 ;ndevice < ngpus ; ndevice++)
{
cudaSetDevice(ndevice);
cudaFree(d_allInputs[ndevice]);
cudaFree(d_allFilters[ndevice]);
}
delete[] allInputs ;
for(int dev =0 ; dev < ngpus ; dev++)
delete[] allFilters[dev];
delete[] allFilters;
delete[] allFilt;
delete[] results;
cout<<"count is :"<<count1<<endl;
//exit(1);
/* for(int dev = 0 ; dev < ngpus ; dev++)
{
for(int x = 0 ; x < filters.size() ; x++)
{
if(allFilters[dev][x].size == allFilters[dev][x].count)
{
cout<<e_list.at(i);
cout<<" : "<<allFilters[dev][x].filtid<<": "<<allFilters[dev][x].count<<endl;
}
}
cout<<endl<<endl;
}*/
}
//End of loop for input
index++;
//printf("\ndone and it took: %f milliseconds\n", timefinal);
resultTime+=timefinal;
cout<<"\nresultTime : "<<resultTime<<endl;
//int count = 0;
/*for(int i=0;i<cSize;i++)
{
if(results[i] >0)
{
printf("\nResult-> %d %d ",i,results[i]);
count++;
}
}*/
//cout<<"\n Count = "<<count<<endl;
for(int device = 0 ; device < ngpus ; device++)
{
cudaSetDevice(device);
cudaFree(d_allConstraints[device]);
cudaFree(d_sizeC[device]);
}
exit(1);
}
//printf("\ndone and it took: %f milliseconds\n",resultTime);
/*int count = 0;
for(int i=0;i<cSize;i++)
{
if(results[i] >0)
{
printf("\nResult-> %d %d ",i,results[i]);
count++;
}
}
cout<<"\n Count = "<<count<<endl;
cout<<" done and it took:"<< resultTime<<" milliseconds \n";
*/
return 0;
}
|
518334f99a852847cd064506bbda47c8c1786b6e.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
*
* stream2Async.cu
*
* Microbenchmark to illustrate a bandwidth-limited workload.
*
* It separately measures the host->device transfer time, kernel
* processing time, and device->host transfer time. Due to low
* arithmetic density in the saxpyGPU() kernel, the bulk of time
* is spent transferring data.
*
* Build with: nvcc -I ../chLib stream2Async.cu
*
* Copyright (c) 2012, Archaea Software, LLC.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*
*/
#include <chError.h>
#include <chCommandLine.h>
#include <chTimer.h>
#include <stdio.h>
#include <stdlib.h>
#include "saxpyCPU.h"
#include "saxpyGPU.cuh"
hipError_t
MeasureTimes(
float *msTotal,
float *msWallClock,
float *msHtoD,
float *msKernel,
float *msDtoH,
size_t N,
float alpha,
int nBlocks,
int nThreads )
{
hipError_t status;
chTimerTimestamp chStart, chStop;
float *dptrOut = 0, *hptrOut = 0;
float *dptrY = 0, *hptrY = 0;
float *dptrX = 0, *hptrX = 0;
hipEvent_t evStart = 0;
hipEvent_t evHtoD = 0;
hipEvent_t evKernel = 0;
hipEvent_t evDtoH = 0;
cuda(HostAlloc( &hptrOut, N*sizeof(float), 0 ) );
memset( hptrOut, 0, N*sizeof(float) );
cuda(HostAlloc( &hptrY, N*sizeof(float), 0 ) );
cuda(HostAlloc( &hptrX, N*sizeof(float), 0 ) );
cuda(Malloc( &dptrOut, N*sizeof(float) ) );
cuda(Memset( dptrOut, 0, N*sizeof(float) ) );
cuda(Malloc( &dptrY, N*sizeof(float) ) );
cuda(Memset( dptrY, 0, N*sizeof(float) ) );
cuda(Malloc( &dptrX, N*sizeof(float) ) );
cuda(Memset( dptrY, 0, N*sizeof(float) ) );
cuda(EventCreate( &evStart ) );
cuda(EventCreate( &evHtoD ) );
cuda(EventCreate( &evKernel ) );
cuda(EventCreate( &evDtoH ) );
for ( size_t i = 0; i < N; i++ ) {
hptrX[i] = (float) rand() / RAND_MAX;
hptrY[i] = (float) rand() / RAND_MAX;
}
//
// begin timing
//
chTimerGetTime( &chStart );
cuda(EventRecord( evStart, 0 ) );
cuda(MemcpyAsync( dptrX, hptrX, N*sizeof(float), hipMemcpyHostToDevice, NULL ) );
cuda(MemcpyAsync( dptrY, hptrY, N*sizeof(float), hipMemcpyHostToDevice, NULL ) );
cuda(EventRecord( evHtoD, 0 ) );
hipLaunchKernelGGL(( saxpyGPU), dim3(nBlocks), dim3(nThreads), 0, 0, dptrOut, dptrX, dptrY, N, alpha );
cuda(EventRecord( evKernel, 0 ) );
cuda(MemcpyAsync( hptrOut, dptrOut, N*sizeof(float), hipMemcpyDeviceToHost, NULL ) );
cuda(EventRecord( evDtoH, 0 ) );
cuda(DeviceSynchronize() );
chTimerGetTime( &chStop );
*msWallClock = 1000.0f*chTimerElapsedTime( &chStart, &chStop );
//
// end timing
//
for ( size_t i = 0; i < N; i++ ) {
if ( fabsf( hptrOut[i] - (alpha*hptrX[i]+hptrY[i]) ) > 1e-5f ) {
status = hipErrorUnknown;
goto Error;
}
}
cuda(EventElapsedTime( msHtoD, evStart, evHtoD ) );
cuda(EventElapsedTime( msKernel, evHtoD, evKernel ) );
cuda(EventElapsedTime( msDtoH, evKernel, evDtoH ) );
cuda(EventElapsedTime( msTotal, evStart, evDtoH ) );
Error:
hipEventDestroy( evDtoH );
hipEventDestroy( evKernel );
hipEventDestroy( evHtoD );
hipEventDestroy( evStart );
hipFree( dptrOut );
hipFree( dptrX );
hipFree( dptrY );
hipHostFree( hptrOut );
hipHostFree( hptrX );
hipHostFree( hptrY );
return status;
}
double
Bandwidth( float ms, double NumBytes )
{
return NumBytes / (1000.0*ms);
}
int
main( int argc, char *argv[] )
{
hipError_t status;
int N_Mfloats = 128;
size_t N;
int nBlocks = 1500;
int nThreads = 256;
float alpha = 2.0f;
chCommandLineGet( &nBlocks, "nBlocks", argc, argv );
chCommandLineGet( &nThreads, "nThreads", argc, argv );
chCommandLineGet( &N_Mfloats, "N", argc, argv );
printf( "Measuring times with %dM floats", N_Mfloats );
if ( N_Mfloats==128 ) {
printf( " (use --N to specify number of Mfloats)");
}
printf( "\n" );
N = 1048576*N_Mfloats;
cuda(SetDeviceFlags( hipDeviceMapHost ) );
{
float msTotal, msWallClock, msHtoD, msKernel, msDtoH;
CUDART_CHECK( MeasureTimes( &msTotal, &msWallClock, &msHtoD, &msKernel, &msDtoH, N, alpha, nBlocks, nThreads ) );
printf( "Memcpy( host->device ): %.2f ms (%.2f MB/s)\n", msHtoD, Bandwidth( msHtoD, 2*N*sizeof(float) ) );
printf( "Kernel processing : %.2f ms (%.2f MB/s)\n", msKernel, Bandwidth( msKernel, 3*N*sizeof(float) ) );
printf( "Memcpy (device->host ): %.2f ms (%.2f MB/s)\n\n", msDtoH, Bandwidth( msDtoH, N*sizeof(float) ) );
printf( "Total time (wall clock): %.2f ms (%.2f MB/s)\n", msWallClock, Bandwidth( msWallClock, 3*N*sizeof(float) ) );
}
Error:
if ( status == hipErrorMemoryAllocation ) {
printf( "Memory allocation failed\n" );
}
else if ( hipSuccess != status ) {
printf( "Failed\n" );
}
return hipSuccess != status;
}
| 518334f99a852847cd064506bbda47c8c1786b6e.cu | /*
*
* stream2Async.cu
*
* Microbenchmark to illustrate a bandwidth-limited workload.
*
* It separately measures the host->device transfer time, kernel
* processing time, and device->host transfer time. Due to low
* arithmetic density in the saxpyGPU() kernel, the bulk of time
* is spent transferring data.
*
* Build with: nvcc -I ../chLib stream2Async.cu
*
* Copyright (c) 2012, Archaea Software, LLC.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*
*/
#include <chError.h>
#include <chCommandLine.h>
#include <chTimer.h>
#include <stdio.h>
#include <stdlib.h>
#include "saxpyCPU.h"
#include "saxpyGPU.cuh"
cudaError_t
MeasureTimes(
float *msTotal,
float *msWallClock,
float *msHtoD,
float *msKernel,
float *msDtoH,
size_t N,
float alpha,
int nBlocks,
int nThreads )
{
cudaError_t status;
chTimerTimestamp chStart, chStop;
float *dptrOut = 0, *hptrOut = 0;
float *dptrY = 0, *hptrY = 0;
float *dptrX = 0, *hptrX = 0;
cudaEvent_t evStart = 0;
cudaEvent_t evHtoD = 0;
cudaEvent_t evKernel = 0;
cudaEvent_t evDtoH = 0;
cuda(HostAlloc( &hptrOut, N*sizeof(float), 0 ) );
memset( hptrOut, 0, N*sizeof(float) );
cuda(HostAlloc( &hptrY, N*sizeof(float), 0 ) );
cuda(HostAlloc( &hptrX, N*sizeof(float), 0 ) );
cuda(Malloc( &dptrOut, N*sizeof(float) ) );
cuda(Memset( dptrOut, 0, N*sizeof(float) ) );
cuda(Malloc( &dptrY, N*sizeof(float) ) );
cuda(Memset( dptrY, 0, N*sizeof(float) ) );
cuda(Malloc( &dptrX, N*sizeof(float) ) );
cuda(Memset( dptrY, 0, N*sizeof(float) ) );
cuda(EventCreate( &evStart ) );
cuda(EventCreate( &evHtoD ) );
cuda(EventCreate( &evKernel ) );
cuda(EventCreate( &evDtoH ) );
for ( size_t i = 0; i < N; i++ ) {
hptrX[i] = (float) rand() / RAND_MAX;
hptrY[i] = (float) rand() / RAND_MAX;
}
//
// begin timing
//
chTimerGetTime( &chStart );
cuda(EventRecord( evStart, 0 ) );
cuda(MemcpyAsync( dptrX, hptrX, N*sizeof(float), cudaMemcpyHostToDevice, NULL ) );
cuda(MemcpyAsync( dptrY, hptrY, N*sizeof(float), cudaMemcpyHostToDevice, NULL ) );
cuda(EventRecord( evHtoD, 0 ) );
saxpyGPU<<<nBlocks, nThreads>>>( dptrOut, dptrX, dptrY, N, alpha );
cuda(EventRecord( evKernel, 0 ) );
cuda(MemcpyAsync( hptrOut, dptrOut, N*sizeof(float), cudaMemcpyDeviceToHost, NULL ) );
cuda(EventRecord( evDtoH, 0 ) );
cuda(DeviceSynchronize() );
chTimerGetTime( &chStop );
*msWallClock = 1000.0f*chTimerElapsedTime( &chStart, &chStop );
//
// end timing
//
for ( size_t i = 0; i < N; i++ ) {
if ( fabsf( hptrOut[i] - (alpha*hptrX[i]+hptrY[i]) ) > 1e-5f ) {
status = cudaErrorUnknown;
goto Error;
}
}
cuda(EventElapsedTime( msHtoD, evStart, evHtoD ) );
cuda(EventElapsedTime( msKernel, evHtoD, evKernel ) );
cuda(EventElapsedTime( msDtoH, evKernel, evDtoH ) );
cuda(EventElapsedTime( msTotal, evStart, evDtoH ) );
Error:
cudaEventDestroy( evDtoH );
cudaEventDestroy( evKernel );
cudaEventDestroy( evHtoD );
cudaEventDestroy( evStart );
cudaFree( dptrOut );
cudaFree( dptrX );
cudaFree( dptrY );
cudaFreeHost( hptrOut );
cudaFreeHost( hptrX );
cudaFreeHost( hptrY );
return status;
}
double
Bandwidth( float ms, double NumBytes )
{
return NumBytes / (1000.0*ms);
}
int
main( int argc, char *argv[] )
{
cudaError_t status;
int N_Mfloats = 128;
size_t N;
int nBlocks = 1500;
int nThreads = 256;
float alpha = 2.0f;
chCommandLineGet( &nBlocks, "nBlocks", argc, argv );
chCommandLineGet( &nThreads, "nThreads", argc, argv );
chCommandLineGet( &N_Mfloats, "N", argc, argv );
printf( "Measuring times with %dM floats", N_Mfloats );
if ( N_Mfloats==128 ) {
printf( " (use --N to specify number of Mfloats)");
}
printf( "\n" );
N = 1048576*N_Mfloats;
cuda(SetDeviceFlags( cudaDeviceMapHost ) );
{
float msTotal, msWallClock, msHtoD, msKernel, msDtoH;
CUDART_CHECK( MeasureTimes( &msTotal, &msWallClock, &msHtoD, &msKernel, &msDtoH, N, alpha, nBlocks, nThreads ) );
printf( "Memcpy( host->device ): %.2f ms (%.2f MB/s)\n", msHtoD, Bandwidth( msHtoD, 2*N*sizeof(float) ) );
printf( "Kernel processing : %.2f ms (%.2f MB/s)\n", msKernel, Bandwidth( msKernel, 3*N*sizeof(float) ) );
printf( "Memcpy (device->host ): %.2f ms (%.2f MB/s)\n\n", msDtoH, Bandwidth( msDtoH, N*sizeof(float) ) );
printf( "Total time (wall clock): %.2f ms (%.2f MB/s)\n", msWallClock, Bandwidth( msWallClock, 3*N*sizeof(float) ) );
}
Error:
if ( status == cudaErrorMemoryAllocation ) {
printf( "Memory allocation failed\n" );
}
else if ( cudaSuccess != status ) {
printf( "Failed\n" );
}
return cudaSuccess != status;
}
|
e210706ce622c2acb10ff13cdf0c24e49c1fbdb3.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//Udacity HW 4
//Radix Sorting
#include "utils.h"
#include <thrust/host_vector.h>
/* Red Eye Removal
===============
For this assignment we are implementing red eye removal. This is
accomplished by first creating a score for every pixel that tells us how
likely it is to be a red eye pixel. We have already done this for you - you
are receiving the scores and need to sort them in ascending order so that we
know which pixels to alter to remove the red eye.
Note: ascending order == smallest to largest
Each score is associated with a position, when you sort the scores, you must
also move the positions accordingly.
Implementing Parallel Radix Sort with CUDA
==========================================
The basic idea is to construct a histogram on each pass of how many of each
"digit" there are. Then we scan this histogram so that we know where to put
the output of each digit. For example, the first 1 must come after all the
0s so we have to know how many 0s there are to be able to start moving 1s
into the correct position.
1) Histogram of the number of occurrences of each digit
2) Exclusive Prefix Sum of Histogram
3) Determine relative offset of each digit
For example [0 0 1 1 0 0 1]
-> [0 1 0 1 2 3 2]
4) Combine the results of steps 2 & 3 to determine the final
output location for each element and move it there
LSB Radix sort is an out-of-place sort and you will need to ping-pong values
between the input and output buffers we have provided. Make sure the final
sorted results end up in the output buffer! Hint: You may need to do a copy
at the end.
*/
//=========================================================
// Device functions
//=========================================================
__global__
void histogram_kernel(unsigned int pass,
unsigned int * d_bins,
unsigned int* const d_input,
const int size){
int id = threadIdx.x + blockDim.x * blockIdx.x;
if(id >= size){
return;
}
}
__global__
void scan_kernel(unsigned int pass,
unsigned int const * d_inputVals,
unsigned int * d_output,
const int size,
unsigned int base,
unsigned int threadSize){
int id = threadIdx.x + blockDim.x * blockIdx.x;
if(id >= size){
return;
}
}
__global__
void move_kernel(unsigned int pass,
unsigned int* const d_inputVals,
unsigned int* const d_inputPos,
unsigned int* d_outputVals,
unsigned int* d_outputPos,
unsigned int* d_outputMove,
unsigned int* const d_scanned,
unsigned int one_pos,
const size_t numElems){
int id = threadIdx.x + blockDim.x * blockIdx.x;
if(id >= size){
return;
}
}
//=========================================================
// Host functions
//=========================================================
int get_max_size(int problemSize, int blockSize){
return (int)ceil((float)problemSize / (float)blockSize) + 1;
}
void your_sort(unsigned int* const d_inputVals,
unsigned int* const d_inputPos,
unsigned int* const d_outputVals,
unsigned int* const d_outputPos,
const size_t numElems)
{
//TODO
//PUT YOUR SORT HERE
dim3 blockSize(1024);
dim3 gridSize(get_max_size(numElems, blockSize.x));
}
| e210706ce622c2acb10ff13cdf0c24e49c1fbdb3.cu | //Udacity HW 4
//Radix Sorting
#include "utils.h"
#include <thrust/host_vector.h>
/* Red Eye Removal
===============
For this assignment we are implementing red eye removal. This is
accomplished by first creating a score for every pixel that tells us how
likely it is to be a red eye pixel. We have already done this for you - you
are receiving the scores and need to sort them in ascending order so that we
know which pixels to alter to remove the red eye.
Note: ascending order == smallest to largest
Each score is associated with a position, when you sort the scores, you must
also move the positions accordingly.
Implementing Parallel Radix Sort with CUDA
==========================================
The basic idea is to construct a histogram on each pass of how many of each
"digit" there are. Then we scan this histogram so that we know where to put
the output of each digit. For example, the first 1 must come after all the
0s so we have to know how many 0s there are to be able to start moving 1s
into the correct position.
1) Histogram of the number of occurrences of each digit
2) Exclusive Prefix Sum of Histogram
3) Determine relative offset of each digit
For example [0 0 1 1 0 0 1]
-> [0 1 0 1 2 3 2]
4) Combine the results of steps 2 & 3 to determine the final
output location for each element and move it there
LSB Radix sort is an out-of-place sort and you will need to ping-pong values
between the input and output buffers we have provided. Make sure the final
sorted results end up in the output buffer! Hint: You may need to do a copy
at the end.
*/
//=========================================================
// Device functions
//=========================================================
__global__
void histogram_kernel(unsigned int pass,
unsigned int * d_bins,
unsigned int* const d_input,
const int size){
int id = threadIdx.x + blockDim.x * blockIdx.x;
if(id >= size){
return;
}
}
__global__
void scan_kernel(unsigned int pass,
unsigned int const * d_inputVals,
unsigned int * d_output,
const int size,
unsigned int base,
unsigned int threadSize){
int id = threadIdx.x + blockDim.x * blockIdx.x;
if(id >= size){
return;
}
}
__global__
void move_kernel(unsigned int pass,
unsigned int* const d_inputVals,
unsigned int* const d_inputPos,
unsigned int* d_outputVals,
unsigned int* d_outputPos,
unsigned int* d_outputMove,
unsigned int* const d_scanned,
unsigned int one_pos,
const size_t numElems){
int id = threadIdx.x + blockDim.x * blockIdx.x;
if(id >= size){
return;
}
}
//=========================================================
// Host functions
//=========================================================
int get_max_size(int problemSize, int blockSize){
return (int)ceil((float)problemSize / (float)blockSize) + 1;
}
void your_sort(unsigned int* const d_inputVals,
unsigned int* const d_inputPos,
unsigned int* const d_outputVals,
unsigned int* const d_outputPos,
const size_t numElems)
{
//TODO
//PUT YOUR SORT HERE
dim3 blockSize(1024);
dim3 gridSize(get_max_size(numElems, blockSize.x));
}
|
71dbcafb0612dd0cece66da5e11e9b0e8e737aa4.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/**
* Copyright (c) 2016-present, Facebook, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
// Based on https://github.com/daijifeng001/caffe-rfcn/blob/r-fcn/src/caffe/layers/psroi_pooling_layer.cu
//
// ------------------------------------------------------------------
// R-FCN
// Copyright (c) 2016 Microsoft
// Licensed under The MIT License [see r-fcn/LICENSE for details]
// Written by Yi Li
// ------------------------------------------------------------------
//
// COPYRIGHT
//
// All contributions by the University of California:
// Copyright (c) 2014, 2015, The Regents of the University of California
// (Regents)
// All rights reserved.
//
// All other contributions:
// Copyright (c) 2014, 2015, the respective contributors
// All rights reserved.
//
// Caffe uses a shared copyright model: each contributor holds copyright over
// their contributions to Caffe. The project versioning records all such
// contribution and copyright details. If a contributor wants to further mark
// their specific copyright on a particular contribution, they should indicate
// their copyright solely in the commit message of the change when it is
// committed.
//
// LICENSE
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// 1. Redistributions of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
// 2. Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
// POSSIBILITY OF SUCH DAMAGE.
//
// CONTRIBUTION AGREEMENT
//
// By contributing to the BVLC/caffe repository through pull-request, comment,
// or otherwise, the contributor releases their content to the
// license and copyright terms herein.
#include <cfloat>
#include "caffe2/core/context_gpu.h"
#include "modules/detectron/ps_roi_pool_op.h"
namespace caffe2 {
namespace {
template <typename T>
inline __device__ T gpu_atomic_add(const T val, T* address);
template <>
inline __device__
float gpu_atomic_add(const float val, float* address) {
return atomicAdd(address, val);
}
template <typename T>
__global__ void PSRoIPoolForward(
const int nthreads,
const T* bottom_data,
const T spatial_scale,
const int channels,
const int height,
const int width,
const int pooled_height,
const int pooled_width,
const T* bottom_rois,
const int output_dim,
const int group_size,
T* top_data,
int* mapping_channel) {
CUDA_1D_KERNEL_LOOP(index, nthreads) {
// The output is in order (n, ctop, ph, pw)
int pw = index % pooled_width;
int ph = (index / pooled_width) % pooled_height;
int ctop = (index / pooled_width / pooled_height) % output_dim;
int n = index / pooled_width / pooled_height / output_dim;
// [start, end) interval for spatial sampling
const T* offset_bottom_rois = bottom_rois + n * 5;
int roi_batch_ind = offset_bottom_rois[0];
T roi_start_w = static_cast<T>(
roundf(offset_bottom_rois[1])) * spatial_scale;
T roi_start_h = static_cast<T>(
roundf(offset_bottom_rois[2])) * spatial_scale;
T roi_end_w = static_cast<T>(
roundf(offset_bottom_rois[3]) + 1.) * spatial_scale;
T roi_end_h = static_cast<T>(
roundf(offset_bottom_rois[4]) + 1.) * spatial_scale;
// Force too small ROIs to be 1x1
T roi_width = c10::hip::compat::max(roi_end_w - roi_start_w, static_cast<T>(0.1)); // avoid 0
T roi_height = c10::hip::compat::max(roi_end_h - roi_start_h, static_cast<T>(0.1));
// Compute w and h at bottom
T bin_size_h = roi_height / static_cast<T>(pooled_height);
T bin_size_w = roi_width / static_cast<T>(pooled_width);
// Add roi offsets and clip to input boundaries
int hstart = floor(
static_cast<T>(ph) * bin_size_h + roi_start_h);
int wstart = floor(
static_cast<T>(pw)* bin_size_w + roi_start_w);
int hend = ceil(
static_cast<T>(ph + 1) * bin_size_h + roi_start_h);
int wend = ceil(
static_cast<T>(pw + 1) * bin_size_w + roi_start_w);
hstart = min(max(hstart, 0), height);
hend = min(max(hend, 0), height);
wstart = min(max(wstart, 0),width);
wend = min(max(wend, 0), width);
bool is_empty = (hend <= hstart) || (wend <= wstart);
int gw = pw;
int gh = ph;
int c = (ctop * group_size + gh) * group_size + gw;
const T* offset_bottom_data =
bottom_data + (roi_batch_ind * channels + c) * height * width;
T out_sum = 0;
for (int h = hstart; h < hend; ++h){
for (int w = wstart; w < wend; ++w){
int bottom_index = h*width + w;
out_sum += offset_bottom_data[bottom_index];
}
}
T bin_area = (hend - hstart) * (wend - wstart);
top_data[index] = is_empty ? 0. : out_sum / bin_area;
mapping_channel[index] = c;
}
}
template <typename T>
__global__ void PSRoIPoolBackward(
const int nthreads,
const T* top_diff,
const int* mapping_channel,
const int num_rois,
const T spatial_scale,
const int channels,
const int height,
const int width,
const int pooled_height,
const int pooled_width,
const int output_dim,
T* bottom_diff,
const T* bottom_rois) {
CUDA_1D_KERNEL_LOOP(index, nthreads) {
// The output is in order (n, ctop, ph, pw)
int pw = index % pooled_width;
int ph = (index / pooled_width) % pooled_height;
int n = index / pooled_width / pooled_height / output_dim;
// [start, end) interval for spatial sampling
const T* offset_bottom_rois = bottom_rois + n * 5;
int roi_batch_ind = offset_bottom_rois[0];
T roi_start_w = static_cast<T>(
roundf(offset_bottom_rois[1])) * spatial_scale;
T roi_start_h = static_cast<T>(
roundf(offset_bottom_rois[2])) * spatial_scale;
T roi_end_w = static_cast<T>(
roundf(offset_bottom_rois[3]) + 1.) * spatial_scale;
T roi_end_h = static_cast<T>(
roundf(offset_bottom_rois[4]) + 1.) * spatial_scale;
// Force too small ROIs to be 1x1
T roi_width = c10::hip::compat::max(roi_end_w - roi_start_w, static_cast<T>(0.1)); //avoid 0
T roi_height = c10::hip::compat::max(roi_end_h - roi_start_h, static_cast<T>(0.1));
// Compute w and h at bottom
T bin_size_h = roi_height / static_cast<T>(pooled_height);
T bin_size_w = roi_width / static_cast<T>(pooled_width);
int hstart = floor(
static_cast<T>(ph)* bin_size_h + roi_start_h);
int wstart = floor(
static_cast<T>(pw)* bin_size_w + roi_start_w);
int hend = ceil(
static_cast<T>(ph + 1) * bin_size_h + roi_start_h);
int wend = ceil(
static_cast<T>(pw + 1) * bin_size_w + roi_start_w);
// Add roi offsets and clip to input boundaries
hstart = min(max(hstart, 0), height);
hend = min(max(hend, 0), height);
wstart = min(max(wstart, 0), width);
wend = min(max(wend, 0), width);
bool is_empty = (hend <= hstart) || (wend <= wstart);
// Compute c at bottom
int c = mapping_channel[index];
T* offset_bottom_diff =
bottom_diff + (roi_batch_ind * channels + c) * height * width;
T bin_area = (hend - hstart) * (wend - wstart);
T diff_val = is_empty ? 0. : top_diff[index] / bin_area;
for (int h = hstart; h < hend; ++h){
for (int w = wstart; w < wend; ++w){
int bottom_index = h * width + w;
gpu_atomic_add(diff_val, offset_bottom_diff + bottom_index);
}
}
}
}
} // namespace
template<>
bool PSRoIPoolOp<float, CUDAContext>::RunOnDevice() {
auto& X = Input(0); // Input data to pool
auto& R = Input(1); // RoIs
auto* Y = Output(0, {R.dim32(0), output_dim_, pooled_height_, pooled_width_}, at::dtype<float>()); // PSRoI pooled data
auto* A = Output(1, Y->sizes(), at::dtype<int>()); // mapping_channel
int output_size = Y->numel();
hipLaunchKernelGGL(( PSRoIPoolForward<float>), dim3(CAFFE_GET_BLOCKS(output_size)),
dim3(CAFFE_CUDA_NUM_THREADS),
0, context_.cuda_stream(),
output_size, X.data<float>(), spatial_scale_, X.dim32(1), X.dim32(2),
X.dim32(3), pooled_height_, pooled_width_, R.data<float>(), output_dim_,
group_size_, Y->mutable_data<float>(), A->mutable_data<int>());
return true;
}
template<>
bool PSRoIPoolGradientOp<float, CUDAContext>::RunOnDevice() {
auto& X = Input(0); // Input data to pool
auto& R = Input(1); // RoIs
auto& A = Input(2); // mapping channels
auto& dY = Input(3); // Gradient of net w.r.t. output of "forward" op
// (aka "gradOutput")
auto* dX = Output(0, X.sizes(), at::dtype<float>()); // Gradient of net w.r.t. input to "forward" op
// (aka "gradInput")
// Must zero-out dX before accumulating gradients
math::Set<float, CUDAContext>(
dX->size(), 0.f, dX->mutable_data<float>(), &context_);
hipLaunchKernelGGL(( PSRoIPoolBackward<float>), dim3(CAFFE_GET_BLOCKS(dY.size())),
dim3(CAFFE_CUDA_NUM_THREADS),
0, context_.cuda_stream(),
dY.size(), dY.data<float>(), A.data<int>(), R.dim32(0), spatial_scale_,
X.dim32(1), X.dim32(2), X.dim32(3), pooled_height_, pooled_width_,
output_dim_, dX->mutable_data<float>(), R.data<float>());
return true;
}
REGISTER_CUDA_OPERATOR(PSRoIPool,
PSRoIPoolOp<float, CUDAContext>);
REGISTER_CUDA_OPERATOR(PSRoIPoolGradient,
PSRoIPoolGradientOp<float, CUDAContext>);
} // namespace caffe2
| 71dbcafb0612dd0cece66da5e11e9b0e8e737aa4.cu | /**
* Copyright (c) 2016-present, Facebook, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
// Based on https://github.com/daijifeng001/caffe-rfcn/blob/r-fcn/src/caffe/layers/psroi_pooling_layer.cu
//
// ------------------------------------------------------------------
// R-FCN
// Copyright (c) 2016 Microsoft
// Licensed under The MIT License [see r-fcn/LICENSE for details]
// Written by Yi Li
// ------------------------------------------------------------------
//
// COPYRIGHT
//
// All contributions by the University of California:
// Copyright (c) 2014, 2015, The Regents of the University of California
// (Regents)
// All rights reserved.
//
// All other contributions:
// Copyright (c) 2014, 2015, the respective contributors
// All rights reserved.
//
// Caffe uses a shared copyright model: each contributor holds copyright over
// their contributions to Caffe. The project versioning records all such
// contribution and copyright details. If a contributor wants to further mark
// their specific copyright on a particular contribution, they should indicate
// their copyright solely in the commit message of the change when it is
// committed.
//
// LICENSE
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// 1. Redistributions of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
// 2. Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
// POSSIBILITY OF SUCH DAMAGE.
//
// CONTRIBUTION AGREEMENT
//
// By contributing to the BVLC/caffe repository through pull-request, comment,
// or otherwise, the contributor releases their content to the
// license and copyright terms herein.
#include <cfloat>
#include "caffe2/core/context_gpu.h"
#include "modules/detectron/ps_roi_pool_op.h"
namespace caffe2 {
namespace {
template <typename T>
inline __device__ T gpu_atomic_add(const T val, T* address);
template <>
inline __device__
float gpu_atomic_add(const float val, float* address) {
return atomicAdd(address, val);
}
template <typename T>
__global__ void PSRoIPoolForward(
const int nthreads,
const T* bottom_data,
const T spatial_scale,
const int channels,
const int height,
const int width,
const int pooled_height,
const int pooled_width,
const T* bottom_rois,
const int output_dim,
const int group_size,
T* top_data,
int* mapping_channel) {
CUDA_1D_KERNEL_LOOP(index, nthreads) {
// The output is in order (n, ctop, ph, pw)
int pw = index % pooled_width;
int ph = (index / pooled_width) % pooled_height;
int ctop = (index / pooled_width / pooled_height) % output_dim;
int n = index / pooled_width / pooled_height / output_dim;
// [start, end) interval for spatial sampling
const T* offset_bottom_rois = bottom_rois + n * 5;
int roi_batch_ind = offset_bottom_rois[0];
T roi_start_w = static_cast<T>(
roundf(offset_bottom_rois[1])) * spatial_scale;
T roi_start_h = static_cast<T>(
roundf(offset_bottom_rois[2])) * spatial_scale;
T roi_end_w = static_cast<T>(
roundf(offset_bottom_rois[3]) + 1.) * spatial_scale;
T roi_end_h = static_cast<T>(
roundf(offset_bottom_rois[4]) + 1.) * spatial_scale;
// Force too small ROIs to be 1x1
T roi_width = c10::cuda::compat::max(roi_end_w - roi_start_w, static_cast<T>(0.1)); // avoid 0
T roi_height = c10::cuda::compat::max(roi_end_h - roi_start_h, static_cast<T>(0.1));
// Compute w and h at bottom
T bin_size_h = roi_height / static_cast<T>(pooled_height);
T bin_size_w = roi_width / static_cast<T>(pooled_width);
// Add roi offsets and clip to input boundaries
int hstart = floor(
static_cast<T>(ph) * bin_size_h + roi_start_h);
int wstart = floor(
static_cast<T>(pw)* bin_size_w + roi_start_w);
int hend = ceil(
static_cast<T>(ph + 1) * bin_size_h + roi_start_h);
int wend = ceil(
static_cast<T>(pw + 1) * bin_size_w + roi_start_w);
hstart = min(max(hstart, 0), height);
hend = min(max(hend, 0), height);
wstart = min(max(wstart, 0),width);
wend = min(max(wend, 0), width);
bool is_empty = (hend <= hstart) || (wend <= wstart);
int gw = pw;
int gh = ph;
int c = (ctop * group_size + gh) * group_size + gw;
const T* offset_bottom_data =
bottom_data + (roi_batch_ind * channels + c) * height * width;
T out_sum = 0;
for (int h = hstart; h < hend; ++h){
for (int w = wstart; w < wend; ++w){
int bottom_index = h*width + w;
out_sum += offset_bottom_data[bottom_index];
}
}
T bin_area = (hend - hstart) * (wend - wstart);
top_data[index] = is_empty ? 0. : out_sum / bin_area;
mapping_channel[index] = c;
}
}
template <typename T>
__global__ void PSRoIPoolBackward(
const int nthreads,
const T* top_diff,
const int* mapping_channel,
const int num_rois,
const T spatial_scale,
const int channels,
const int height,
const int width,
const int pooled_height,
const int pooled_width,
const int output_dim,
T* bottom_diff,
const T* bottom_rois) {
CUDA_1D_KERNEL_LOOP(index, nthreads) {
// The output is in order (n, ctop, ph, pw)
int pw = index % pooled_width;
int ph = (index / pooled_width) % pooled_height;
int n = index / pooled_width / pooled_height / output_dim;
// [start, end) interval for spatial sampling
const T* offset_bottom_rois = bottom_rois + n * 5;
int roi_batch_ind = offset_bottom_rois[0];
T roi_start_w = static_cast<T>(
roundf(offset_bottom_rois[1])) * spatial_scale;
T roi_start_h = static_cast<T>(
roundf(offset_bottom_rois[2])) * spatial_scale;
T roi_end_w = static_cast<T>(
roundf(offset_bottom_rois[3]) + 1.) * spatial_scale;
T roi_end_h = static_cast<T>(
roundf(offset_bottom_rois[4]) + 1.) * spatial_scale;
// Force too small ROIs to be 1x1
T roi_width = c10::cuda::compat::max(roi_end_w - roi_start_w, static_cast<T>(0.1)); //avoid 0
T roi_height = c10::cuda::compat::max(roi_end_h - roi_start_h, static_cast<T>(0.1));
// Compute w and h at bottom
T bin_size_h = roi_height / static_cast<T>(pooled_height);
T bin_size_w = roi_width / static_cast<T>(pooled_width);
int hstart = floor(
static_cast<T>(ph)* bin_size_h + roi_start_h);
int wstart = floor(
static_cast<T>(pw)* bin_size_w + roi_start_w);
int hend = ceil(
static_cast<T>(ph + 1) * bin_size_h + roi_start_h);
int wend = ceil(
static_cast<T>(pw + 1) * bin_size_w + roi_start_w);
// Add roi offsets and clip to input boundaries
hstart = min(max(hstart, 0), height);
hend = min(max(hend, 0), height);
wstart = min(max(wstart, 0), width);
wend = min(max(wend, 0), width);
bool is_empty = (hend <= hstart) || (wend <= wstart);
// Compute c at bottom
int c = mapping_channel[index];
T* offset_bottom_diff =
bottom_diff + (roi_batch_ind * channels + c) * height * width;
T bin_area = (hend - hstart) * (wend - wstart);
T diff_val = is_empty ? 0. : top_diff[index] / bin_area;
for (int h = hstart; h < hend; ++h){
for (int w = wstart; w < wend; ++w){
int bottom_index = h * width + w;
gpu_atomic_add(diff_val, offset_bottom_diff + bottom_index);
}
}
}
}
} // namespace
template<>
bool PSRoIPoolOp<float, CUDAContext>::RunOnDevice() {
auto& X = Input(0); // Input data to pool
auto& R = Input(1); // RoIs
auto* Y = Output(0, {R.dim32(0), output_dim_, pooled_height_, pooled_width_}, at::dtype<float>()); // PSRoI pooled data
auto* A = Output(1, Y->sizes(), at::dtype<int>()); // mapping_channel
int output_size = Y->numel();
PSRoIPoolForward<float><<<CAFFE_GET_BLOCKS(output_size),
CAFFE_CUDA_NUM_THREADS,
0, context_.cuda_stream()>>>(
output_size, X.data<float>(), spatial_scale_, X.dim32(1), X.dim32(2),
X.dim32(3), pooled_height_, pooled_width_, R.data<float>(), output_dim_,
group_size_, Y->mutable_data<float>(), A->mutable_data<int>());
return true;
}
template<>
bool PSRoIPoolGradientOp<float, CUDAContext>::RunOnDevice() {
auto& X = Input(0); // Input data to pool
auto& R = Input(1); // RoIs
auto& A = Input(2); // mapping channels
auto& dY = Input(3); // Gradient of net w.r.t. output of "forward" op
// (aka "gradOutput")
auto* dX = Output(0, X.sizes(), at::dtype<float>()); // Gradient of net w.r.t. input to "forward" op
// (aka "gradInput")
// Must zero-out dX before accumulating gradients
math::Set<float, CUDAContext>(
dX->size(), 0.f, dX->mutable_data<float>(), &context_);
PSRoIPoolBackward<float><<<CAFFE_GET_BLOCKS(dY.size()),
CAFFE_CUDA_NUM_THREADS,
0, context_.cuda_stream()>>>(
dY.size(), dY.data<float>(), A.data<int>(), R.dim32(0), spatial_scale_,
X.dim32(1), X.dim32(2), X.dim32(3), pooled_height_, pooled_width_,
output_dim_, dX->mutable_data<float>(), R.data<float>());
return true;
}
REGISTER_CUDA_OPERATOR(PSRoIPool,
PSRoIPoolOp<float, CUDAContext>);
REGISTER_CUDA_OPERATOR(PSRoIPoolGradient,
PSRoIPoolGradientOp<float, CUDAContext>);
} // namespace caffe2
|
a1f683e76eed6f59a5ed93f83ec088956dd232b0.hip | // !!! This is a file automatically generated by hipify!!!
#include "AddThrustFunctor.hh"
__device__ fptype device_AddPdfs (fptype* evt, fptype* p, unsigned int* indices) {
int numParameters = indices[0];
fptype ret = 0;
fptype totalWeight = 0;
for (int i = 1; i < numParameters-3; i += 3) {
totalWeight += p[indices[i+2]];
fptype curr = (*(reinterpret_cast<device_function_ptr>(device_function_table[indices[i]])))(evt, p, paramIndices + indices[i+1]);
fptype weight = p[indices[i+2]];
ret += weight * curr * normalisationFactors[indices[i+1]];
//if ((gpuDebug & 1) && (0 == threadIdx.x) && (0 == blockIdx.x))
//if ((1 > (int) floor(0.5 + evt[8])) && (gpuDebug & 1) && (paramIndices + debugParamIndex == indices))
//printf("Add comp %i: %f * %f * %f = %f (%f)\n", i, weight, curr, normalisationFactors[indices[i+1]], weight*curr*normalisationFactors[indices[i+1]], ret);
}
// numParameters does not count itself. So the array structure for two functions is
// nP | F P w | F P
// in which nP = 5. Therefore the parameter index for the last function pointer is nP, and the function index is nP-1.
fptype last = (*(reinterpret_cast<device_function_ptr>(device_function_table[indices[numParameters-1]])))(evt, p, paramIndices + indices[numParameters]);
ret += (1 - totalWeight) * last * normalisationFactors[indices[numParameters]];
//if ((gpuDebug & 1) && (0 == threadIdx.x) && (0 == blockIdx.x))
//if ((1 > (int) floor(0.5 + evt[8])) && (gpuDebug & 1) && (paramIndices + debugParamIndex == indices))
//printf("Add final: %f * %f * %f = %f (%f)\n", (1 - totalWeight), last, normalisationFactors[indices[numParameters]], (1 - totalWeight) *last* normalisationFactors[indices[numParameters]], ret);
return ret;
}
__device__ fptype device_AddPdfsExt (fptype* evt, fptype* p, unsigned int* indices) {
// numParameters does not count itself. So the array structure for two functions is
// nP | F P w | F P w
// in which nP = 6.
int numParameters = indices[0];
fptype ret = 0;
fptype totalWeight = 0;
for (int i = 1; i < numParameters; i += 3) {
fptype curr = (*(reinterpret_cast<device_function_ptr>(device_function_table[indices[i]])))(evt, p, paramIndices + indices[i+1]);
fptype weight = p[indices[i+2]];
ret += weight * curr * normalisationFactors[indices[i+1]];
totalWeight += weight;
//if ((gpuDebug & 1) && (threadIdx.x == 0) && (0 == blockIdx.x))
//if ((1 > (int) floor(0.5 + evt[8])) && (gpuDebug & 1) && (paramIndices + debugParamIndex == indices))
//printf("AddExt: %i %E %f %f %f %f %f %f\n", i, curr, weight, ret, totalWeight, normalisationFactors[indices[i+1]], evt[0], evt[8]);
}
ret /= totalWeight;
//if ((1 > (int) floor(0.5 + evt[8])) && (gpuDebug & 1) && (paramIndices + debugParamIndex == indices))
//if ((gpuDebug & 1) && (threadIdx.x == 0) && (0 == blockIdx.x))
//printf("AddExt result: %f\n", ret);
return ret;
}
__device__ device_function_ptr ptr_to_AddPdfs = device_AddPdfs;
__device__ device_function_ptr ptr_to_AddPdfsExt = device_AddPdfsExt;
AddThrustFunctor::AddThrustFunctor (std::string n, std::vector<Variable*> weights, std::vector<FunctorBase*> comps)
: ThrustPdfFunctor(0, n)
, extended(true)
{
assert((weights.size() == comps.size()) || (weights.size() + 1 == comps.size()));
// Indices stores (function index)(function parameter index)(weight index) triplet for each component.
// Last component has no weight index unless function is extended.
for (std::vector<FunctorBase*>::iterator p = comps.begin(); p != comps.end(); ++p) {
components.push_back(*p);
assert(components.back());
}
getObservables(observables);
std::vector<unsigned int> pindices;
for (unsigned int w = 0; w < weights.size(); ++w) {
assert(components[w]);
pindices.push_back(components[w]->getFunctionIndex());
pindices.push_back(components[w]->getParameterIndex());
pindices.push_back(registerParameter(weights[w]));
}
assert(components.back());
if (weights.size() < components.size()) {
pindices.push_back(components.back()->getFunctionIndex());
pindices.push_back(components.back()->getParameterIndex());
extended = false;
}
if (extended) hipMemcpyFromSymbol((void**) &host_fcn_ptr, ptr_to_AddPdfsExt, sizeof(void*));
else hipMemcpyFromSymbol((void**) &host_fcn_ptr, ptr_to_AddPdfs, sizeof(void*));
initialise(pindices);
}
__host__ fptype AddThrustFunctor::normalise () const {
//if (cpuDebug & 1) std::cout << "Normalising AddThrustFunctor " << getName() << std::endl;
fptype ret = 0;
fptype totalWeight = 0;
for (unsigned int i = 0; i < components.size()-1; ++i) {
fptype weight = host_params[host_indices[parameters + 3*(i+1)]];
totalWeight += weight;
fptype curr = components[i]->normalise();
//if (cpuDebug & 1) std::cout << getName() << " normalised comp " << i << " (" << components[i]->getName() << ") to get " << curr << " " << weight << " " << (1.0 / curr) << "\n";
ret += curr*weight;
}
fptype last = components.back()->normalise();
if (extended) {
fptype lastWeight = host_params[host_indices[parameters + 3*components.size()]];
totalWeight += lastWeight;
ret += last * lastWeight;
ret /= totalWeight;
//if (cpuDebug & 1) std::cout << getName() << " normalised comp " << components.back()->getName() << " to get " << last << " " << lastWeight << " " << (1.0 / last) << " " << totalWeight << " " << ret << "\n";
}
else {
ret += (1 - totalWeight) * last;
}
host_normalisation[parameters] = 1.0;
if (getSpecialMask() & FunctorBase::ForceCommonNorm) {
// Want to normalise this as
// (f1 A + (1-f1) B) / int (f1 A + (1-f1) B)
// instead of default
// (f1 A / int A) + ((1-f1) B / int B).
for (unsigned int i = 0; i < components.size(); ++i) {
host_normalisation[components[i]->getParameterIndex()] = (1.0 / ret);
}
}
return ret;
}
__host__ double AddThrustFunctor::sumOfNll (int numVars) const {
static thrust::plus<double> cudaPlus;
thrust::constant_iterator<int> eventSize(numVars);
thrust::constant_iterator<fptype*> arrayAddress(cudaDataArray);
double dummy = 0;
thrust::counting_iterator<int> eventIndex(0);
double ret = thrust::transform_reduce(thrust::make_zip_iterator(thrust::make_tuple(eventIndex, arrayAddress, eventSize)),
thrust::make_zip_iterator(thrust::make_tuple(eventIndex + numEntries, arrayAddress, eventSize)),
*logger, dummy, cudaPlus);
if (extended) {
fptype expEvents = 0;
//std::cout << "Weights:";
for (unsigned int i = 0; i < components.size(); ++i) {
expEvents += host_params[host_indices[parameters + 3*(i+1)]];
//std::cout << " " << host_params[host_indices[parameters + 3*(i+1)]];
}
// Log-likelihood of numEvents with expectation of exp is (-exp + numEvents*ln(exp) - ln(numEvents!)).
// The last is constant, so we drop it; and then multiply by minus one to get the negative log-likelihood.
ret += (expEvents - numEvents*log(expEvents));
//std::cout << " " << expEvents << " " << numEvents << " " << (expEvents - numEvents*log(expEvents)) << std::endl;
}
return ret;
}
| a1f683e76eed6f59a5ed93f83ec088956dd232b0.cu | #include "AddThrustFunctor.hh"
__device__ fptype device_AddPdfs (fptype* evt, fptype* p, unsigned int* indices) {
int numParameters = indices[0];
fptype ret = 0;
fptype totalWeight = 0;
for (int i = 1; i < numParameters-3; i += 3) {
totalWeight += p[indices[i+2]];
fptype curr = (*(reinterpret_cast<device_function_ptr>(device_function_table[indices[i]])))(evt, p, paramIndices + indices[i+1]);
fptype weight = p[indices[i+2]];
ret += weight * curr * normalisationFactors[indices[i+1]];
//if ((gpuDebug & 1) && (0 == threadIdx.x) && (0 == blockIdx.x))
//if ((1 > (int) floor(0.5 + evt[8])) && (gpuDebug & 1) && (paramIndices + debugParamIndex == indices))
//printf("Add comp %i: %f * %f * %f = %f (%f)\n", i, weight, curr, normalisationFactors[indices[i+1]], weight*curr*normalisationFactors[indices[i+1]], ret);
}
// numParameters does not count itself. So the array structure for two functions is
// nP | F P w | F P
// in which nP = 5. Therefore the parameter index for the last function pointer is nP, and the function index is nP-1.
fptype last = (*(reinterpret_cast<device_function_ptr>(device_function_table[indices[numParameters-1]])))(evt, p, paramIndices + indices[numParameters]);
ret += (1 - totalWeight) * last * normalisationFactors[indices[numParameters]];
//if ((gpuDebug & 1) && (0 == threadIdx.x) && (0 == blockIdx.x))
//if ((1 > (int) floor(0.5 + evt[8])) && (gpuDebug & 1) && (paramIndices + debugParamIndex == indices))
//printf("Add final: %f * %f * %f = %f (%f)\n", (1 - totalWeight), last, normalisationFactors[indices[numParameters]], (1 - totalWeight) *last* normalisationFactors[indices[numParameters]], ret);
return ret;
}
__device__ fptype device_AddPdfsExt (fptype* evt, fptype* p, unsigned int* indices) {
// numParameters does not count itself. So the array structure for two functions is
// nP | F P w | F P w
// in which nP = 6.
int numParameters = indices[0];
fptype ret = 0;
fptype totalWeight = 0;
for (int i = 1; i < numParameters; i += 3) {
fptype curr = (*(reinterpret_cast<device_function_ptr>(device_function_table[indices[i]])))(evt, p, paramIndices + indices[i+1]);
fptype weight = p[indices[i+2]];
ret += weight * curr * normalisationFactors[indices[i+1]];
totalWeight += weight;
//if ((gpuDebug & 1) && (threadIdx.x == 0) && (0 == blockIdx.x))
//if ((1 > (int) floor(0.5 + evt[8])) && (gpuDebug & 1) && (paramIndices + debugParamIndex == indices))
//printf("AddExt: %i %E %f %f %f %f %f %f\n", i, curr, weight, ret, totalWeight, normalisationFactors[indices[i+1]], evt[0], evt[8]);
}
ret /= totalWeight;
//if ((1 > (int) floor(0.5 + evt[8])) && (gpuDebug & 1) && (paramIndices + debugParamIndex == indices))
//if ((gpuDebug & 1) && (threadIdx.x == 0) && (0 == blockIdx.x))
//printf("AddExt result: %f\n", ret);
return ret;
}
__device__ device_function_ptr ptr_to_AddPdfs = device_AddPdfs;
__device__ device_function_ptr ptr_to_AddPdfsExt = device_AddPdfsExt;
AddThrustFunctor::AddThrustFunctor (std::string n, std::vector<Variable*> weights, std::vector<FunctorBase*> comps)
: ThrustPdfFunctor(0, n)
, extended(true)
{
assert((weights.size() == comps.size()) || (weights.size() + 1 == comps.size()));
// Indices stores (function index)(function parameter index)(weight index) triplet for each component.
// Last component has no weight index unless function is extended.
for (std::vector<FunctorBase*>::iterator p = comps.begin(); p != comps.end(); ++p) {
components.push_back(*p);
assert(components.back());
}
getObservables(observables);
std::vector<unsigned int> pindices;
for (unsigned int w = 0; w < weights.size(); ++w) {
assert(components[w]);
pindices.push_back(components[w]->getFunctionIndex());
pindices.push_back(components[w]->getParameterIndex());
pindices.push_back(registerParameter(weights[w]));
}
assert(components.back());
if (weights.size() < components.size()) {
pindices.push_back(components.back()->getFunctionIndex());
pindices.push_back(components.back()->getParameterIndex());
extended = false;
}
if (extended) cudaMemcpyFromSymbol((void**) &host_fcn_ptr, ptr_to_AddPdfsExt, sizeof(void*));
else cudaMemcpyFromSymbol((void**) &host_fcn_ptr, ptr_to_AddPdfs, sizeof(void*));
initialise(pindices);
}
__host__ fptype AddThrustFunctor::normalise () const {
//if (cpuDebug & 1) std::cout << "Normalising AddThrustFunctor " << getName() << std::endl;
fptype ret = 0;
fptype totalWeight = 0;
for (unsigned int i = 0; i < components.size()-1; ++i) {
fptype weight = host_params[host_indices[parameters + 3*(i+1)]];
totalWeight += weight;
fptype curr = components[i]->normalise();
//if (cpuDebug & 1) std::cout << getName() << " normalised comp " << i << " (" << components[i]->getName() << ") to get " << curr << " " << weight << " " << (1.0 / curr) << "\n";
ret += curr*weight;
}
fptype last = components.back()->normalise();
if (extended) {
fptype lastWeight = host_params[host_indices[parameters + 3*components.size()]];
totalWeight += lastWeight;
ret += last * lastWeight;
ret /= totalWeight;
//if (cpuDebug & 1) std::cout << getName() << " normalised comp " << components.back()->getName() << " to get " << last << " " << lastWeight << " " << (1.0 / last) << " " << totalWeight << " " << ret << "\n";
}
else {
ret += (1 - totalWeight) * last;
}
host_normalisation[parameters] = 1.0;
if (getSpecialMask() & FunctorBase::ForceCommonNorm) {
// Want to normalise this as
// (f1 A + (1-f1) B) / int (f1 A + (1-f1) B)
// instead of default
// (f1 A / int A) + ((1-f1) B / int B).
for (unsigned int i = 0; i < components.size(); ++i) {
host_normalisation[components[i]->getParameterIndex()] = (1.0 / ret);
}
}
return ret;
}
__host__ double AddThrustFunctor::sumOfNll (int numVars) const {
static thrust::plus<double> cudaPlus;
thrust::constant_iterator<int> eventSize(numVars);
thrust::constant_iterator<fptype*> arrayAddress(cudaDataArray);
double dummy = 0;
thrust::counting_iterator<int> eventIndex(0);
double ret = thrust::transform_reduce(thrust::make_zip_iterator(thrust::make_tuple(eventIndex, arrayAddress, eventSize)),
thrust::make_zip_iterator(thrust::make_tuple(eventIndex + numEntries, arrayAddress, eventSize)),
*logger, dummy, cudaPlus);
if (extended) {
fptype expEvents = 0;
//std::cout << "Weights:";
for (unsigned int i = 0; i < components.size(); ++i) {
expEvents += host_params[host_indices[parameters + 3*(i+1)]];
//std::cout << " " << host_params[host_indices[parameters + 3*(i+1)]];
}
// Log-likelihood of numEvents with expectation of exp is (-exp + numEvents*ln(exp) - ln(numEvents!)).
// The last is constant, so we drop it; and then multiply by minus one to get the negative log-likelihood.
ret += (expEvents - numEvents*log(expEvents));
//std::cout << " " << expEvents << " " << numEvents << " " << (expEvents - numEvents*log(expEvents)) << std::endl;
}
return ret;
}
|
45cbf6771e92249c2187d15bd1adc31efa892756.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#ifndef SATURATED_FLOW_ITERATION_STEP_CU
#define SATURATED_FLOW_ITERATION_STEP_CU
#include "../common/memory_management.cuh"
namespace kernels
{
__global__ void standard_step(struct CA ca, double *headsWrite)
{
unsigned idx_x = blockIdx.x * blockDim.x + threadIdx.x;
unsigned idx_y = blockIdx.y * blockDim.y + threadIdx.y;
unsigned idx_g = idx_y * COLS + idx_x;
if (idx_x < COLS && idx_y < ROWS)
{
double Q{}, diff_head, tmp_t, ht1, ht2;
#ifdef LOOP
for (int i = 0; i < KERNEL_LOOP_SIZE; i++)
{
if (i == KERNEL_LOOP_SIZE - 1)
{
if (Q) { Q = 0; }
}
#endif
if (idx_x >= 1)
{
diff_head = ca.heads[idx_g - 1] - ca.heads[idx_g];
tmp_t = ca.K[idx_g] * THICKNESS;
Q += diff_head * tmp_t;
}
if (idx_y >= 1)
{
diff_head = ca.heads[(idx_y - 1) * COLS + idx_x] - ca.heads[idx_g];
tmp_t = ca.K[idx_g] * THICKNESS;
Q += diff_head * tmp_t;
}
if (idx_x + 1 < COLS)
{
diff_head = ca.heads[idx_g + 1] - ca.heads[idx_g];
tmp_t = ca.K[idx_g] * THICKNESS;
Q += diff_head * tmp_t;
}
if (idx_y + 1 < ROWS)
{
diff_head = ca.heads[(idx_y + 1) * COLS + idx_x] - ca.heads[idx_g];
tmp_t = ca.K[idx_g] * THICKNESS;
Q += diff_head * tmp_t;
}
#ifdef LOOP
}
#endif
Q -= ca.sources[idx_g];
ht1 = Q * DELTA_T;
ht2 = AREA * ca.Sy[idx_g];
headsWrite[idx_g] = ca.heads[idx_g] + ht1 / ht2;
if (headsWrite[idx_g] < 0)
{ headsWrite[idx_g] = 0; }
}
}
__global__ void hybrid_step(struct CA ca, double *headsWrite)
{
__shared__ double s_heads[BLOCK_SIZE][BLOCK_SIZE];
__shared__ double s_K[BLOCK_SIZE][BLOCK_SIZE];
unsigned idx_x = blockIdx.x * blockDim.x + threadIdx.x;
unsigned idx_y = blockIdx.y * blockDim.y + threadIdx.y;
unsigned idx_g = idx_y * COLS + idx_x;
if (idx_x < COLS && idx_y < ROWS)
{
double Q{}, diff_head, tmp_t, ht1, ht2;
s_heads[threadIdx.y][threadIdx.x] = ca.heads[idx_g];
s_K[threadIdx.y][threadIdx.x] = ca.K[idx_g];
__syncthreads();
#ifdef LOOP
for (int i = 0; i < KERNEL_LOOP_SIZE; i++)
{
if (i == KERNEL_LOOP_SIZE - 1)
{
if (Q) { Q = 0; }
}
#endif
if (idx_x >= 1)
{ // left neighbor
if (threadIdx.x >= 1)
diff_head = s_heads[threadIdx.y][threadIdx.x - 1] - s_heads[threadIdx.y][threadIdx.x];
else
diff_head = ca.heads[idx_g - 1] - s_heads[threadIdx.y][threadIdx.x];
tmp_t = s_K[threadIdx.y][threadIdx.x] * THICKNESS;
Q += diff_head * tmp_t;
}
if (idx_y >= 1)
{ // upper neighbor
if (threadIdx.y >= 1)
diff_head = s_heads[threadIdx.y - 1][threadIdx.x] - s_heads[threadIdx.y][threadIdx.x];
else
diff_head = ca.heads[(idx_y - 1) * COLS + idx_x] - s_heads[threadIdx.y][threadIdx.x];
tmp_t = s_K[threadIdx.y][threadIdx.x] * THICKNESS;
Q += diff_head * tmp_t;
}
if (idx_x + 1 < COLS)
{ // right neighbor
if (threadIdx.x < BLOCK_SIZE - 1)
diff_head = s_heads[threadIdx.y][threadIdx.x + 1] - s_heads[threadIdx.y][threadIdx.x];
else
diff_head = ca.heads[idx_g + 1] - s_heads[threadIdx.y][threadIdx.x];
tmp_t = s_K[threadIdx.y][threadIdx.x] * THICKNESS;
Q += diff_head * tmp_t;
}
if (idx_y + 1 < ROWS)
{ // bottom neighbor
if (threadIdx.y < BLOCK_SIZE - 1)
diff_head = s_heads[threadIdx.y + 1][threadIdx.x] - s_heads[threadIdx.y][threadIdx.x];
else
diff_head = ca.heads[(idx_y + 1) * COLS + idx_x] - s_heads[threadIdx.y][threadIdx.x];
tmp_t = s_K[threadIdx.y][threadIdx.x] * THICKNESS;
Q += diff_head * tmp_t;
}
#ifdef LOOP
}
#endif
Q -= ca.sources[idx_g];
ht1 = Q * DELTA_T;
ht2 = AREA * ca.Sy[idx_g];
headsWrite[idx_g] = s_heads[threadIdx.y][threadIdx.x] + ht1 / ht2;
if (headsWrite[idx_g] < 0)
{ headsWrite[idx_g] = 0; }
}
}
__global__ void shared_step(struct CA ca, double *headsWrite)
{
__shared__ double s_heads[BLOCK_SIZE + 2][BLOCK_SIZE + 2];
__shared__ double s_K[BLOCK_SIZE + 2][BLOCK_SIZE + 2];
unsigned idx_x = blockIdx.x * blockDim.x + threadIdx.x;
unsigned idx_y = blockIdx.y * blockDim.y + threadIdx.y;
unsigned idx_g = idx_y * COLS + idx_x;
if (idx_x < COLS && idx_y < ROWS)
{
double Q{}, diff_head, tmp_t, ht1, ht2;
unsigned x = threadIdx.x + 1;
unsigned y = threadIdx.y + 1;
s_heads[y][x] = ca.heads[idx_g];
s_K[y][x] = ca.K[idx_g];
if (threadIdx.x == 0 && blockIdx.x != 0) // left
s_heads[y][x - 1] = ca.heads[idx_g - 1];
if (threadIdx.x == BLOCK_SIZE - 1 && blockIdx.x != gridDim.x - 1) // right
s_heads[y][x + 1] = ca.heads[idx_g + 1];
if (threadIdx.y == 0 && blockIdx.y != 0) // upper
s_heads[y - 1][x] = ca.heads[idx_g - COLS];
if (threadIdx.y == BLOCK_SIZE - 1 && blockIdx.y != gridDim.y - 1) // bottom
s_heads[y + 1][x] = ca.heads[idx_g + COLS];
__syncthreads();
#ifdef LOOP
for (int i = 0; i < KERNEL_LOOP_SIZE; i++)
{
if (i == KERNEL_LOOP_SIZE - 1)
{
if (Q) { Q = 0; }
}
#endif
if (idx_x >= 1)
{ // left neighbor
diff_head = s_heads[y][x - 1] - s_heads[y][x];
tmp_t = s_K[y][x] * THICKNESS;
Q += diff_head * tmp_t;
}
if (idx_y >= 1)
{ // upper neighbor
diff_head = s_heads[y - 1][x] - s_heads[y][x];
tmp_t = s_K[y][x] * THICKNESS;
Q += diff_head * tmp_t;
}
if (idx_x + 1 < COLS)
{ // right neighbor
diff_head = s_heads[y][x + 1] - s_heads[y][x];
tmp_t = s_K[y][x] * THICKNESS;
Q += diff_head * tmp_t;
}
if (idx_y + 1 < ROWS)
{ // bottom neighbor
diff_head = s_heads[y + 1][x] - s_heads[y][x];
tmp_t = s_K[y][x] * THICKNESS;
Q += diff_head * tmp_t;
}
#ifdef LOOP
}
#endif
Q -= ca.sources[idx_g];
ht1 = Q * DELTA_T;
ht2 = AREA * ca.Sy[idx_g];
headsWrite[idx_g] = s_heads[y][x] + ht1 / ht2;
if (headsWrite[idx_g] < 0)
{ headsWrite[idx_g] = 0; }
}
}
}
#endif //SATURATED_FLOW_ITERATION_STEP_CU
| 45cbf6771e92249c2187d15bd1adc31efa892756.cu | #ifndef SATURATED_FLOW_ITERATION_STEP_CU
#define SATURATED_FLOW_ITERATION_STEP_CU
#include "../common/memory_management.cuh"
namespace kernels
{
__global__ void standard_step(struct CA ca, double *headsWrite)
{
unsigned idx_x = blockIdx.x * blockDim.x + threadIdx.x;
unsigned idx_y = blockIdx.y * blockDim.y + threadIdx.y;
unsigned idx_g = idx_y * COLS + idx_x;
if (idx_x < COLS && idx_y < ROWS)
{
double Q{}, diff_head, tmp_t, ht1, ht2;
#ifdef LOOP
for (int i = 0; i < KERNEL_LOOP_SIZE; i++)
{
if (i == KERNEL_LOOP_SIZE - 1)
{
if (Q) { Q = 0; }
}
#endif
if (idx_x >= 1)
{
diff_head = ca.heads[idx_g - 1] - ca.heads[idx_g];
tmp_t = ca.K[idx_g] * THICKNESS;
Q += diff_head * tmp_t;
}
if (idx_y >= 1)
{
diff_head = ca.heads[(idx_y - 1) * COLS + idx_x] - ca.heads[idx_g];
tmp_t = ca.K[idx_g] * THICKNESS;
Q += diff_head * tmp_t;
}
if (idx_x + 1 < COLS)
{
diff_head = ca.heads[idx_g + 1] - ca.heads[idx_g];
tmp_t = ca.K[idx_g] * THICKNESS;
Q += diff_head * tmp_t;
}
if (idx_y + 1 < ROWS)
{
diff_head = ca.heads[(idx_y + 1) * COLS + idx_x] - ca.heads[idx_g];
tmp_t = ca.K[idx_g] * THICKNESS;
Q += diff_head * tmp_t;
}
#ifdef LOOP
}
#endif
Q -= ca.sources[idx_g];
ht1 = Q * DELTA_T;
ht2 = AREA * ca.Sy[idx_g];
headsWrite[idx_g] = ca.heads[idx_g] + ht1 / ht2;
if (headsWrite[idx_g] < 0)
{ headsWrite[idx_g] = 0; }
}
}
__global__ void hybrid_step(struct CA ca, double *headsWrite)
{
__shared__ double s_heads[BLOCK_SIZE][BLOCK_SIZE];
__shared__ double s_K[BLOCK_SIZE][BLOCK_SIZE];
unsigned idx_x = blockIdx.x * blockDim.x + threadIdx.x;
unsigned idx_y = blockIdx.y * blockDim.y + threadIdx.y;
unsigned idx_g = idx_y * COLS + idx_x;
if (idx_x < COLS && idx_y < ROWS)
{
double Q{}, diff_head, tmp_t, ht1, ht2;
s_heads[threadIdx.y][threadIdx.x] = ca.heads[idx_g];
s_K[threadIdx.y][threadIdx.x] = ca.K[idx_g];
__syncthreads();
#ifdef LOOP
for (int i = 0; i < KERNEL_LOOP_SIZE; i++)
{
if (i == KERNEL_LOOP_SIZE - 1)
{
if (Q) { Q = 0; }
}
#endif
if (idx_x >= 1)
{ // left neighbor
if (threadIdx.x >= 1)
diff_head = s_heads[threadIdx.y][threadIdx.x - 1] - s_heads[threadIdx.y][threadIdx.x];
else
diff_head = ca.heads[idx_g - 1] - s_heads[threadIdx.y][threadIdx.x];
tmp_t = s_K[threadIdx.y][threadIdx.x] * THICKNESS;
Q += diff_head * tmp_t;
}
if (idx_y >= 1)
{ // upper neighbor
if (threadIdx.y >= 1)
diff_head = s_heads[threadIdx.y - 1][threadIdx.x] - s_heads[threadIdx.y][threadIdx.x];
else
diff_head = ca.heads[(idx_y - 1) * COLS + idx_x] - s_heads[threadIdx.y][threadIdx.x];
tmp_t = s_K[threadIdx.y][threadIdx.x] * THICKNESS;
Q += diff_head * tmp_t;
}
if (idx_x + 1 < COLS)
{ // right neighbor
if (threadIdx.x < BLOCK_SIZE - 1)
diff_head = s_heads[threadIdx.y][threadIdx.x + 1] - s_heads[threadIdx.y][threadIdx.x];
else
diff_head = ca.heads[idx_g + 1] - s_heads[threadIdx.y][threadIdx.x];
tmp_t = s_K[threadIdx.y][threadIdx.x] * THICKNESS;
Q += diff_head * tmp_t;
}
if (idx_y + 1 < ROWS)
{ // bottom neighbor
if (threadIdx.y < BLOCK_SIZE - 1)
diff_head = s_heads[threadIdx.y + 1][threadIdx.x] - s_heads[threadIdx.y][threadIdx.x];
else
diff_head = ca.heads[(idx_y + 1) * COLS + idx_x] - s_heads[threadIdx.y][threadIdx.x];
tmp_t = s_K[threadIdx.y][threadIdx.x] * THICKNESS;
Q += diff_head * tmp_t;
}
#ifdef LOOP
}
#endif
Q -= ca.sources[idx_g];
ht1 = Q * DELTA_T;
ht2 = AREA * ca.Sy[idx_g];
headsWrite[idx_g] = s_heads[threadIdx.y][threadIdx.x] + ht1 / ht2;
if (headsWrite[idx_g] < 0)
{ headsWrite[idx_g] = 0; }
}
}
__global__ void shared_step(struct CA ca, double *headsWrite)
{
__shared__ double s_heads[BLOCK_SIZE + 2][BLOCK_SIZE + 2];
__shared__ double s_K[BLOCK_SIZE + 2][BLOCK_SIZE + 2];
unsigned idx_x = blockIdx.x * blockDim.x + threadIdx.x;
unsigned idx_y = blockIdx.y * blockDim.y + threadIdx.y;
unsigned idx_g = idx_y * COLS + idx_x;
if (idx_x < COLS && idx_y < ROWS)
{
double Q{}, diff_head, tmp_t, ht1, ht2;
unsigned x = threadIdx.x + 1;
unsigned y = threadIdx.y + 1;
s_heads[y][x] = ca.heads[idx_g];
s_K[y][x] = ca.K[idx_g];
if (threadIdx.x == 0 && blockIdx.x != 0) // left
s_heads[y][x - 1] = ca.heads[idx_g - 1];
if (threadIdx.x == BLOCK_SIZE - 1 && blockIdx.x != gridDim.x - 1) // right
s_heads[y][x + 1] = ca.heads[idx_g + 1];
if (threadIdx.y == 0 && blockIdx.y != 0) // upper
s_heads[y - 1][x] = ca.heads[idx_g - COLS];
if (threadIdx.y == BLOCK_SIZE - 1 && blockIdx.y != gridDim.y - 1) // bottom
s_heads[y + 1][x] = ca.heads[idx_g + COLS];
__syncthreads();
#ifdef LOOP
for (int i = 0; i < KERNEL_LOOP_SIZE; i++)
{
if (i == KERNEL_LOOP_SIZE - 1)
{
if (Q) { Q = 0; }
}
#endif
if (idx_x >= 1)
{ // left neighbor
diff_head = s_heads[y][x - 1] - s_heads[y][x];
tmp_t = s_K[y][x] * THICKNESS;
Q += diff_head * tmp_t;
}
if (idx_y >= 1)
{ // upper neighbor
diff_head = s_heads[y - 1][x] - s_heads[y][x];
tmp_t = s_K[y][x] * THICKNESS;
Q += diff_head * tmp_t;
}
if (idx_x + 1 < COLS)
{ // right neighbor
diff_head = s_heads[y][x + 1] - s_heads[y][x];
tmp_t = s_K[y][x] * THICKNESS;
Q += diff_head * tmp_t;
}
if (idx_y + 1 < ROWS)
{ // bottom neighbor
diff_head = s_heads[y + 1][x] - s_heads[y][x];
tmp_t = s_K[y][x] * THICKNESS;
Q += diff_head * tmp_t;
}
#ifdef LOOP
}
#endif
Q -= ca.sources[idx_g];
ht1 = Q * DELTA_T;
ht2 = AREA * ca.Sy[idx_g];
headsWrite[idx_g] = s_heads[y][x] + ht1 / ht2;
if (headsWrite[idx_g] < 0)
{ headsWrite[idx_g] = 0; }
}
}
}
#endif //SATURATED_FLOW_ITERATION_STEP_CU
|
6c51ff6495a1a0a2a1714ad986998be3f87bd46a.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright 1999-2017 Alibaba Group.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "xdl/core/ops/take_op.h"
#include "xdl/core/framework/op_registry.h"
#include "xdl/core/lib/common_defines.h"
#include "xdl/core/framework/gpu/gpu_device.h"
namespace xdl {
namespace {
template <typename T, typename I>
__global__ void TakeOpKernel(const T* pin,
const I* pind,
size_t col,
size_t num,
T* pout) {
const size_t k = blockIdx.x * blockDim.x + threadIdx.x;
if (k >= num) return;
const size_t i = k / col, j = k % col;
pout[k] = pin[pind[i] * col + j];
}
} // namespace
template <typename T, typename I>
class TakeGpuOp : public GpuOpKernel {
public:
Status Init(OpKernelConstruction* ctx) override {
return Status::Ok();
}
Status LaunchKernel(OpKernelContext* ctx, CudaStream* stream) override;
};
template <typename T, typename I>
Status TakeGpuOp<T, I>::LaunchKernel(OpKernelContext* ctx, CudaStream* stream) {
Tensor feature, indicator, output;
XDL_CHECK_STATUS(ctx->GetInput(0, &feature));
XDL_CHECK_STATUS(ctx->GetInput(1, &indicator));
XDL_CHECK_COND(1 == indicator.Shape().Size(),
Status::ArgumentError("indicator must be rank 1 tensor"));
auto fea_dims = feature.Shape().Dims();
std::vector<size_t> dims(fea_dims.begin(), fea_dims.end());
dims[0] = indicator.Shape().NumElements();
TensorShape out_shape(dims);
XDL_CHECK_STATUS(ctx->AllocateOutput(0, out_shape, &output));
size_t row = dims[0];
size_t col = feature.Shape().NumElements() / feature.Shape()[0];
size_t num = row * col;
T* pin = feature.Raw<T>(), *pout = output.Raw<T>();
I* pind = indicator.Raw<I>();
hipStream_t st = stream->GetInternal();
if (num == 0) {
CUDA_CHECK(hipMemsetAsync(pout, 0, sizeof(T) * out_shape.NumElements(), st));
return Status::Ok();
}
size_t blocks = CUDA_GET_BLOCKS(num);
hipLaunchKernelGGL(( TakeOpKernel<T, I>),
dim3(blocks),
dim3(CUDA_GET_THREADS(num, blocks)),
0,
st, pin, pind, col, num, pout);
return Status::Ok();
}
#define REGISTER_GPU_KERNEL(T, I) \
XDL_REGISTER_KERNEL(TakeOp, TakeGpuOp<T, I>) \
.Device("GPU") \
.AttrDataType<T>("dtype") \
.AttrDataType<I>("itype")
REGISTER_GPU_KERNEL(float, int32_t);
REGISTER_GPU_KERNEL(float, int64_t);
REGISTER_GPU_KERNEL(double, int32_t);
REGISTER_GPU_KERNEL(double, int64_t);
#undef REGISTER_GPU_KERNEL
} // namespace xdl
| 6c51ff6495a1a0a2a1714ad986998be3f87bd46a.cu | /*
* Copyright 1999-2017 Alibaba Group.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "xdl/core/ops/take_op.h"
#include "xdl/core/framework/op_registry.h"
#include "xdl/core/lib/common_defines.h"
#include "xdl/core/framework/gpu/gpu_device.h"
namespace xdl {
namespace {
template <typename T, typename I>
__global__ void TakeOpKernel(const T* pin,
const I* pind,
size_t col,
size_t num,
T* pout) {
const size_t k = blockIdx.x * blockDim.x + threadIdx.x;
if (k >= num) return;
const size_t i = k / col, j = k % col;
pout[k] = pin[pind[i] * col + j];
}
} // namespace
template <typename T, typename I>
class TakeGpuOp : public GpuOpKernel {
public:
Status Init(OpKernelConstruction* ctx) override {
return Status::Ok();
}
Status LaunchKernel(OpKernelContext* ctx, CudaStream* stream) override;
};
template <typename T, typename I>
Status TakeGpuOp<T, I>::LaunchKernel(OpKernelContext* ctx, CudaStream* stream) {
Tensor feature, indicator, output;
XDL_CHECK_STATUS(ctx->GetInput(0, &feature));
XDL_CHECK_STATUS(ctx->GetInput(1, &indicator));
XDL_CHECK_COND(1 == indicator.Shape().Size(),
Status::ArgumentError("indicator must be rank 1 tensor"));
auto fea_dims = feature.Shape().Dims();
std::vector<size_t> dims(fea_dims.begin(), fea_dims.end());
dims[0] = indicator.Shape().NumElements();
TensorShape out_shape(dims);
XDL_CHECK_STATUS(ctx->AllocateOutput(0, out_shape, &output));
size_t row = dims[0];
size_t col = feature.Shape().NumElements() / feature.Shape()[0];
size_t num = row * col;
T* pin = feature.Raw<T>(), *pout = output.Raw<T>();
I* pind = indicator.Raw<I>();
cudaStream_t st = stream->GetInternal();
if (num == 0) {
CUDA_CHECK(cudaMemsetAsync(pout, 0, sizeof(T) * out_shape.NumElements(), st));
return Status::Ok();
}
size_t blocks = CUDA_GET_BLOCKS(num);
TakeOpKernel<T, I><<<
blocks,
CUDA_GET_THREADS(num, blocks),
0,
st>>>(pin, pind, col, num, pout);
return Status::Ok();
}
#define REGISTER_GPU_KERNEL(T, I) \
XDL_REGISTER_KERNEL(TakeOp, TakeGpuOp<T, I>) \
.Device("GPU") \
.AttrDataType<T>("dtype") \
.AttrDataType<I>("itype")
REGISTER_GPU_KERNEL(float, int32_t);
REGISTER_GPU_KERNEL(float, int64_t);
REGISTER_GPU_KERNEL(double, int32_t);
REGISTER_GPU_KERNEL(double, int64_t);
#undef REGISTER_GPU_KERNEL
} // namespace xdl
|
f7d0d72543443369150f7cc89483f9255d6cf9bf.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/**
* Copyright (c) 2016-present, Facebook, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
//
// Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
// NVIDIA/apex is licensed under the
// BSD 3 - Clause "New" or "Revised" License
//
/* Modifications Copyright (c) Microsoft. */
#include "core/providers/cuda/cu_inc/common.cuh"
#include "layer_norm_impl.h"
namespace onnxruntime {
namespace contrib {
namespace cuda {
using namespace onnxruntime::cuda;
template <typename U, bool simplified>
__device__ void cuWelfordOnlineSum(
const U curr,
U& mu,
U& sigma2,
U& count) {
count = count + U(1);
U delta = curr - mu;
U lmean = mu + delta / count;
mu = lmean;
if (simplified) {
sigma2 = sigma2 + curr * curr;
} else {
U delta2 = curr - lmean;
sigma2 = sigma2 + delta * delta2;
}
}
template <typename U, bool simplified>
__device__ void cuChanOnlineSum(
const U muB,
const U sigma2B,
const U countB,
U& mu,
U& sigma2,
U& count) {
U delta = muB - mu;
U nA = count;
U nB = countB;
count = count + countB;
U nX = count;
if (nX > U(0)) {
nA = nA / nX;
nB = nB / nX;
mu = nA * mu + nB * muB;
if (simplified) {
sigma2 = sigma2 + sigma2B;
} else {
sigma2 = sigma2 + sigma2B + delta * delta * nA * nB * nX;
}
} else {
mu = U(0);
sigma2 = U(0);
}
}
template <typename T, typename U, bool simplified>
__device__ void cuWelfordMuSigma2(
const T* __restrict__ vals,
const int n1,
const int n2,
const int i1,
U& mu,
U& sigma2,
U* buf) {
// Assumptions:
// 1) blockDim.x == GPU_WARP_SIZE
// 2) Tensor is contiguous
// 3) 2*blockDim.y*sizeof(U)+blockDim.y*sizeof(int) shared memory available.
//
// compute variance and mean over n2
U count = U(0);
mu = U(0);
sigma2 = U(0);
if (i1 < n1) {
// one warp normalizes one n1 index,
// synchronization is implicit
// initialize with standard Welford algorithm
const int numx = blockDim.x * blockDim.y;
const int thrx = threadIdx.x + threadIdx.y * blockDim.x;
const T* lvals = vals + i1 * n2;
int l = 4 * thrx;
for (; l + 3 < n2; l += 4 * numx) {
for (int k = 0; k < 4; ++k) {
U curr = static_cast<U>(lvals[l + k]);
cuWelfordOnlineSum<U, simplified>(curr, mu, sigma2, count);
}
}
for (; l < n2; ++l) {
U curr = static_cast<U>(lvals[l]);
cuWelfordOnlineSum<U, simplified>(curr, mu, sigma2, count);
}
// intra-warp reductions
#pragma unroll
for (int stride = GPU_WARP_SIZE / 2; stride > 0; stride /= 2) {
U muB = WARP_SHFL_DOWN(mu, stride);
U countB = WARP_SHFL_DOWN(count, stride);
U sigma2B = WARP_SHFL_DOWN(sigma2, stride);
cuChanOnlineSum<U, simplified>(muB, sigma2B, countB, mu, sigma2, count);
}
// threadIdx.x == 0 has correct values for each warp
// inter-warp reductions
if (blockDim.y > 1) {
U* ubuf = (U*)buf;
U* ibuf = (U*)(ubuf + blockDim.y);
for (int offset = blockDim.y / 2; offset > 0; offset /= 2) {
// upper half of warps write to shared
if (threadIdx.x == 0 && threadIdx.y >= offset && threadIdx.y < 2 * offset) {
const int wrt_y = threadIdx.y - offset;
ubuf[2 * wrt_y] = mu;
ubuf[2 * wrt_y + 1] = sigma2;
ibuf[wrt_y] = count;
}
__syncthreads();
// lower half merges
if (threadIdx.x == 0 && threadIdx.y < offset) {
U muB = ubuf[2 * threadIdx.y];
U sigma2B = ubuf[2 * threadIdx.y + 1];
U countB = ibuf[threadIdx.y];
cuChanOnlineSum<U, simplified>(muB, sigma2B, countB, mu, sigma2, count);
}
__syncthreads();
}
// threadIdx.x = 0 && threadIdx.y == 0 only thread that has correct values
if (threadIdx.x == 0 && threadIdx.y == 0) {
ubuf[0] = mu;
ubuf[1] = sigma2;
}
__syncthreads();
mu = ubuf[0];
sigma2 = ubuf[1] / U(n2);
// don't care about final value of count, we know count == n2
} else {
mu = WARP_SHFL(mu, 0);
sigma2 = WARP_SHFL(sigma2 / U(n2), 0);
}
}
}
template <bool simplified>
__device__ void cuWelfordMuSigma2(
const half* __restrict__ vals,
const int n1,
const int n2,
const int i1,
float& mu,
float& sigma2,
float* buf) {
// Assumptions:
// 1) blockDim.x == GPU_WARP_SIZE
// 2) Tensor is contiguous
// 3) 2*blockDim.y*sizeof(U)+blockDim.y*sizeof(int) shared memory available.
//
// compute variance and mean over n2
float count = 0.0f;
mu = float(0);
sigma2 = float(0);
if (i1 < n1) {
// one warp normalizes one n1 index,
// synchronization is implicit
// initialize with standard Welford algorithm
const int numx = blockDim.x * blockDim.y;
const int thrx = threadIdx.x + threadIdx.y * blockDim.x;
const half* lvals = vals + i1 * n2;
int l = 8 * thrx;
if ((((size_t)lvals) & 3) != 0) {
// 16 bit alignment
// first thread consumes first point
if (thrx == 0) {
float curr = static_cast<float>(lvals[0]);
cuWelfordOnlineSum<float, simplified>(curr, mu, sigma2, count);
}
++l;
}
// at this point, lvals[l] are 32 bit aligned for all threads.
for (; l + 7 < n2; l += 8 * numx) {
for (int k = 0; k < 8; k += 2) {
float2 curr = __half22float2(*((__half2*)(lvals + l + k)));
cuWelfordOnlineSum<float, simplified>(curr.x, mu, sigma2, count);
cuWelfordOnlineSum<float, simplified>(curr.y, mu, sigma2, count);
}
}
for (; l < n2; ++l) {
float curr = static_cast<float>(lvals[l]);
cuWelfordOnlineSum<float, simplified>(curr, mu, sigma2, count);
}
// intra-warp reductions
#pragma unroll
for (int stride = GPU_WARP_SIZE / 2; stride > 0; stride /= 2) {
float muB = WARP_SHFL_DOWN(mu, stride);
float countB = WARP_SHFL_DOWN(count, stride);
float sigma2B = WARP_SHFL_DOWN(sigma2, stride);
cuChanOnlineSum<float, simplified>(muB, sigma2B, countB, mu, sigma2, count);
}
// threadIdx.x == 0 has correct values for each warp
// inter-warp reductions
if (blockDim.y > 1) {
float* ubuf = (float*)buf;
float* ibuf = (float*)(ubuf + blockDim.y);
for (int offset = blockDim.y / 2; offset > 0; offset /= 2) {
// upper half of warps write to shared
if (threadIdx.x == 0 && threadIdx.y >= offset && threadIdx.y < 2 * offset) {
const int wrt_y = threadIdx.y - offset;
ubuf[2 * wrt_y] = mu;
ubuf[2 * wrt_y + 1] = sigma2;
ibuf[wrt_y] = count;
}
__syncthreads();
// lower half merges
if (threadIdx.x == 0 && threadIdx.y < offset) {
float muB = ubuf[2 * threadIdx.y];
float sigma2B = ubuf[2 * threadIdx.y + 1];
float countB = ibuf[threadIdx.y];
cuChanOnlineSum<float, simplified>(muB, sigma2B, countB, mu, sigma2, count);
}
__syncthreads();
}
// threadIdx.x = 0 && threadIdx.y == 0 only thread that has correct values
if (threadIdx.x == 0 && threadIdx.y == 0) {
ubuf[0] = mu;
ubuf[1] = sigma2;
}
__syncthreads();
mu = ubuf[0];
sigma2 = ubuf[1] / float(n2);
// don't care about final value of count, we know count == n2
} else {
mu = WARP_SHFL(mu, 0);
sigma2 = WARP_SHFL(sigma2 / float(n2), 0);
}
}
}
template <typename U>
__device__ U rsqrt(U v) {
return U(1) / sqrt(v);
}
template <>
__device__ float rsqrt(float v) {
return rsqrtf(v);
}
template <>
__device__ double rsqrt(double v) {
return rsqrt(v);
}
namespace {
// This is the un-specialized struct. Note that we prevent instantiation of this
// struct by putting an undefined symbol in the function body so it won't compile.
// template <typename T>
// struct SharedMemory
// {
// // Ensure that we won't compile any un-specialized types
// __device__ T *getPointer()
// {
// extern __device__ void error(void);
// error();
// return NULL;
// }
// };
// https://github.com/NVIDIA/apex/issues/246
template <typename T>
struct SharedMemory;
template <>
struct SharedMemory<float> {
__device__ float* getPointer() {
extern __shared__ float s_float[];
return s_float;
}
};
template <>
struct SharedMemory<double> {
__device__ double* getPointer() {
extern __shared__ double s_double[];
return s_double;
}
};
} // namespace
template <typename T, typename U, typename V, bool simplified>
__global__ void cuApplyLayerNorm(
V* __restrict__ output_vals,
U* __restrict__ mean,
U* __restrict__ inv_std_dev,
const T* __restrict__ vals,
const int n1,
const int n2,
const U epsilon,
const V* __restrict__ gamma,
const V* __restrict__ beta) {
// Assumptions:
// 1) blockDim.x == GPU_WARP_SIZE
// 2) Tensors are contiguous
//
for (int i1 = blockIdx.y; i1 < n1; i1 += gridDim.y) {
SharedMemory<U> shared;
U* buf = shared.getPointer();
U mu, sigma2;
cuWelfordMuSigma2<T, U, simplified>(vals, n1, n2, i1, mu, sigma2, buf);
const T* lvals = vals + i1 * n2;
V* ovals = output_vals + i1 * n2;
U c_inv_std_dev = rsqrt(sigma2 + epsilon);
const int numx = blockDim.x * blockDim.y;
const int thrx = threadIdx.x + threadIdx.y * blockDim.x;
for (int i = thrx; i < n2; i += numx) {
U curr = static_cast<U>(lvals[i]);
V gamma_i = (gamma != NULL) ? gamma[i] : (V)1;
V beta_i = (beta != NULL) ? beta[i] : (V)0;
if (simplified) {
ovals[i] = gamma_i * static_cast<V>(c_inv_std_dev * curr);
} else {
ovals[i] = gamma_i * static_cast<V>(c_inv_std_dev * (curr - mu)) + beta_i;
}
}
if (threadIdx.x == 0 && threadIdx.y == 0) {
if (mean != nullptr) mean[i1] = mu;
if (inv_std_dev != nullptr) inv_std_dev[i1] = c_inv_std_dev;
}
}
}
template <typename T, typename U, typename V, bool simplified>
void HostApplyLayerNorm(
const hipDeviceProp_t& prop,
hipStream_t stream,
V* output,
U* mean,
U* inv_std_dev,
const T* input,
int n1,
int n2,
double epsilon,
const V* gamma,
const V* beta) {
const int maxGridY = prop.maxGridSize[1];
const int warp_size = prop.warpSize;
ORT_ENFORCE(warp_size == GPU_WARP_SIZE_HOST);
dim3 threads(warp_size, 4, 1);
#ifdef __HIP_PLATFORM_HCC__
// Optimization for ROCm MI100
threads.y = 1;
#endif
const dim3 blocks(1, std::min<unsigned int>(n1, maxGridY), 1);
int nshared =
threads.y > 1 ? threads.y * sizeof(U) + (threads.y / 2) * sizeof(U) : 0;
hipLaunchKernelGGL(( cuApplyLayerNorm<T, U, V, simplified>), dim3(blocks), dim3(threads), nshared, stream,
output,
mean,
inv_std_dev,
input,
n1, n2,
U(epsilon),
gamma, beta);
}
#define LAYERNORM_LINEAR_IMPL(T, U, V, simplified) \
template void HostApplyLayerNorm<T, U, V, simplified>(const hipDeviceProp_t& prop, hipStream_t stream, V* output, \
U* mean, U* inv_std_dev, const T* input, int n1, int n2, \
double epsilon, const V* gamma, const V* beta);
LAYERNORM_LINEAR_IMPL(float, float, float, true)
LAYERNORM_LINEAR_IMPL(half, float, half, true)
LAYERNORM_LINEAR_IMPL(double, double, double, true)
LAYERNORM_LINEAR_IMPL(float, float, half, true)
LAYERNORM_LINEAR_IMPL(float, float, float, false)
LAYERNORM_LINEAR_IMPL(half, float, half, false)
LAYERNORM_LINEAR_IMPL(double, double, double, false)
LAYERNORM_LINEAR_IMPL(float, float, half, false)
LAYERNORM_LINEAR_IMPL(BFloat16, float, BFloat16, true)
LAYERNORM_LINEAR_IMPL(BFloat16, float, BFloat16, false)
} // namespace cuda
} // namespace contrib
} // namespace onnxruntime
| f7d0d72543443369150f7cc89483f9255d6cf9bf.cu | /**
* Copyright (c) 2016-present, Facebook, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
//
// Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
// NVIDIA/apex is licensed under the
// BSD 3 - Clause "New" or "Revised" License
//
/* Modifications Copyright (c) Microsoft. */
#include "core/providers/cuda/cu_inc/common.cuh"
#include "layer_norm_impl.h"
namespace onnxruntime {
namespace contrib {
namespace cuda {
using namespace onnxruntime::cuda;
template <typename U, bool simplified>
__device__ void cuWelfordOnlineSum(
const U curr,
U& mu,
U& sigma2,
U& count) {
count = count + U(1);
U delta = curr - mu;
U lmean = mu + delta / count;
mu = lmean;
if (simplified) {
sigma2 = sigma2 + curr * curr;
} else {
U delta2 = curr - lmean;
sigma2 = sigma2 + delta * delta2;
}
}
template <typename U, bool simplified>
__device__ void cuChanOnlineSum(
const U muB,
const U sigma2B,
const U countB,
U& mu,
U& sigma2,
U& count) {
U delta = muB - mu;
U nA = count;
U nB = countB;
count = count + countB;
U nX = count;
if (nX > U(0)) {
nA = nA / nX;
nB = nB / nX;
mu = nA * mu + nB * muB;
if (simplified) {
sigma2 = sigma2 + sigma2B;
} else {
sigma2 = sigma2 + sigma2B + delta * delta * nA * nB * nX;
}
} else {
mu = U(0);
sigma2 = U(0);
}
}
template <typename T, typename U, bool simplified>
__device__ void cuWelfordMuSigma2(
const T* __restrict__ vals,
const int n1,
const int n2,
const int i1,
U& mu,
U& sigma2,
U* buf) {
// Assumptions:
// 1) blockDim.x == GPU_WARP_SIZE
// 2) Tensor is contiguous
// 3) 2*blockDim.y*sizeof(U)+blockDim.y*sizeof(int) shared memory available.
//
// compute variance and mean over n2
U count = U(0);
mu = U(0);
sigma2 = U(0);
if (i1 < n1) {
// one warp normalizes one n1 index,
// synchronization is implicit
// initialize with standard Welford algorithm
const int numx = blockDim.x * blockDim.y;
const int thrx = threadIdx.x + threadIdx.y * blockDim.x;
const T* lvals = vals + i1 * n2;
int l = 4 * thrx;
for (; l + 3 < n2; l += 4 * numx) {
for (int k = 0; k < 4; ++k) {
U curr = static_cast<U>(lvals[l + k]);
cuWelfordOnlineSum<U, simplified>(curr, mu, sigma2, count);
}
}
for (; l < n2; ++l) {
U curr = static_cast<U>(lvals[l]);
cuWelfordOnlineSum<U, simplified>(curr, mu, sigma2, count);
}
// intra-warp reductions
#pragma unroll
for (int stride = GPU_WARP_SIZE / 2; stride > 0; stride /= 2) {
U muB = WARP_SHFL_DOWN(mu, stride);
U countB = WARP_SHFL_DOWN(count, stride);
U sigma2B = WARP_SHFL_DOWN(sigma2, stride);
cuChanOnlineSum<U, simplified>(muB, sigma2B, countB, mu, sigma2, count);
}
// threadIdx.x == 0 has correct values for each warp
// inter-warp reductions
if (blockDim.y > 1) {
U* ubuf = (U*)buf;
U* ibuf = (U*)(ubuf + blockDim.y);
for (int offset = blockDim.y / 2; offset > 0; offset /= 2) {
// upper half of warps write to shared
if (threadIdx.x == 0 && threadIdx.y >= offset && threadIdx.y < 2 * offset) {
const int wrt_y = threadIdx.y - offset;
ubuf[2 * wrt_y] = mu;
ubuf[2 * wrt_y + 1] = sigma2;
ibuf[wrt_y] = count;
}
__syncthreads();
// lower half merges
if (threadIdx.x == 0 && threadIdx.y < offset) {
U muB = ubuf[2 * threadIdx.y];
U sigma2B = ubuf[2 * threadIdx.y + 1];
U countB = ibuf[threadIdx.y];
cuChanOnlineSum<U, simplified>(muB, sigma2B, countB, mu, sigma2, count);
}
__syncthreads();
}
// threadIdx.x = 0 && threadIdx.y == 0 only thread that has correct values
if (threadIdx.x == 0 && threadIdx.y == 0) {
ubuf[0] = mu;
ubuf[1] = sigma2;
}
__syncthreads();
mu = ubuf[0];
sigma2 = ubuf[1] / U(n2);
// don't care about final value of count, we know count == n2
} else {
mu = WARP_SHFL(mu, 0);
sigma2 = WARP_SHFL(sigma2 / U(n2), 0);
}
}
}
template <bool simplified>
__device__ void cuWelfordMuSigma2(
const half* __restrict__ vals,
const int n1,
const int n2,
const int i1,
float& mu,
float& sigma2,
float* buf) {
// Assumptions:
// 1) blockDim.x == GPU_WARP_SIZE
// 2) Tensor is contiguous
// 3) 2*blockDim.y*sizeof(U)+blockDim.y*sizeof(int) shared memory available.
//
// compute variance and mean over n2
float count = 0.0f;
mu = float(0);
sigma2 = float(0);
if (i1 < n1) {
// one warp normalizes one n1 index,
// synchronization is implicit
// initialize with standard Welford algorithm
const int numx = blockDim.x * blockDim.y;
const int thrx = threadIdx.x + threadIdx.y * blockDim.x;
const half* lvals = vals + i1 * n2;
int l = 8 * thrx;
if ((((size_t)lvals) & 3) != 0) {
// 16 bit alignment
// first thread consumes first point
if (thrx == 0) {
float curr = static_cast<float>(lvals[0]);
cuWelfordOnlineSum<float, simplified>(curr, mu, sigma2, count);
}
++l;
}
// at this point, lvals[l] are 32 bit aligned for all threads.
for (; l + 7 < n2; l += 8 * numx) {
for (int k = 0; k < 8; k += 2) {
float2 curr = __half22float2(*((__half2*)(lvals + l + k)));
cuWelfordOnlineSum<float, simplified>(curr.x, mu, sigma2, count);
cuWelfordOnlineSum<float, simplified>(curr.y, mu, sigma2, count);
}
}
for (; l < n2; ++l) {
float curr = static_cast<float>(lvals[l]);
cuWelfordOnlineSum<float, simplified>(curr, mu, sigma2, count);
}
// intra-warp reductions
#pragma unroll
for (int stride = GPU_WARP_SIZE / 2; stride > 0; stride /= 2) {
float muB = WARP_SHFL_DOWN(mu, stride);
float countB = WARP_SHFL_DOWN(count, stride);
float sigma2B = WARP_SHFL_DOWN(sigma2, stride);
cuChanOnlineSum<float, simplified>(muB, sigma2B, countB, mu, sigma2, count);
}
// threadIdx.x == 0 has correct values for each warp
// inter-warp reductions
if (blockDim.y > 1) {
float* ubuf = (float*)buf;
float* ibuf = (float*)(ubuf + blockDim.y);
for (int offset = blockDim.y / 2; offset > 0; offset /= 2) {
// upper half of warps write to shared
if (threadIdx.x == 0 && threadIdx.y >= offset && threadIdx.y < 2 * offset) {
const int wrt_y = threadIdx.y - offset;
ubuf[2 * wrt_y] = mu;
ubuf[2 * wrt_y + 1] = sigma2;
ibuf[wrt_y] = count;
}
__syncthreads();
// lower half merges
if (threadIdx.x == 0 && threadIdx.y < offset) {
float muB = ubuf[2 * threadIdx.y];
float sigma2B = ubuf[2 * threadIdx.y + 1];
float countB = ibuf[threadIdx.y];
cuChanOnlineSum<float, simplified>(muB, sigma2B, countB, mu, sigma2, count);
}
__syncthreads();
}
// threadIdx.x = 0 && threadIdx.y == 0 only thread that has correct values
if (threadIdx.x == 0 && threadIdx.y == 0) {
ubuf[0] = mu;
ubuf[1] = sigma2;
}
__syncthreads();
mu = ubuf[0];
sigma2 = ubuf[1] / float(n2);
// don't care about final value of count, we know count == n2
} else {
mu = WARP_SHFL(mu, 0);
sigma2 = WARP_SHFL(sigma2 / float(n2), 0);
}
}
}
template <typename U>
__device__ U rsqrt(U v) {
return U(1) / sqrt(v);
}
template <>
__device__ float rsqrt(float v) {
return rsqrtf(v);
}
template <>
__device__ double rsqrt(double v) {
return rsqrt(v);
}
namespace {
// This is the un-specialized struct. Note that we prevent instantiation of this
// struct by putting an undefined symbol in the function body so it won't compile.
// template <typename T>
// struct SharedMemory
// {
// // Ensure that we won't compile any un-specialized types
// __device__ T *getPointer()
// {
// extern __device__ void error(void);
// error();
// return NULL;
// }
// };
// https://github.com/NVIDIA/apex/issues/246
template <typename T>
struct SharedMemory;
template <>
struct SharedMemory<float> {
__device__ float* getPointer() {
extern __shared__ float s_float[];
return s_float;
}
};
template <>
struct SharedMemory<double> {
__device__ double* getPointer() {
extern __shared__ double s_double[];
return s_double;
}
};
} // namespace
template <typename T, typename U, typename V, bool simplified>
__global__ void cuApplyLayerNorm(
V* __restrict__ output_vals,
U* __restrict__ mean,
U* __restrict__ inv_std_dev,
const T* __restrict__ vals,
const int n1,
const int n2,
const U epsilon,
const V* __restrict__ gamma,
const V* __restrict__ beta) {
// Assumptions:
// 1) blockDim.x == GPU_WARP_SIZE
// 2) Tensors are contiguous
//
for (int i1 = blockIdx.y; i1 < n1; i1 += gridDim.y) {
SharedMemory<U> shared;
U* buf = shared.getPointer();
U mu, sigma2;
cuWelfordMuSigma2<T, U, simplified>(vals, n1, n2, i1, mu, sigma2, buf);
const T* lvals = vals + i1 * n2;
V* ovals = output_vals + i1 * n2;
U c_inv_std_dev = rsqrt(sigma2 + epsilon);
const int numx = blockDim.x * blockDim.y;
const int thrx = threadIdx.x + threadIdx.y * blockDim.x;
for (int i = thrx; i < n2; i += numx) {
U curr = static_cast<U>(lvals[i]);
V gamma_i = (gamma != NULL) ? gamma[i] : (V)1;
V beta_i = (beta != NULL) ? beta[i] : (V)0;
if (simplified) {
ovals[i] = gamma_i * static_cast<V>(c_inv_std_dev * curr);
} else {
ovals[i] = gamma_i * static_cast<V>(c_inv_std_dev * (curr - mu)) + beta_i;
}
}
if (threadIdx.x == 0 && threadIdx.y == 0) {
if (mean != nullptr) mean[i1] = mu;
if (inv_std_dev != nullptr) inv_std_dev[i1] = c_inv_std_dev;
}
}
}
template <typename T, typename U, typename V, bool simplified>
void HostApplyLayerNorm(
const cudaDeviceProp& prop,
cudaStream_t stream,
V* output,
U* mean,
U* inv_std_dev,
const T* input,
int n1,
int n2,
double epsilon,
const V* gamma,
const V* beta) {
const int maxGridY = prop.maxGridSize[1];
const int warp_size = prop.warpSize;
ORT_ENFORCE(warp_size == GPU_WARP_SIZE_HOST);
dim3 threads(warp_size, 4, 1);
#ifdef __HIP_PLATFORM_HCC__
// Optimization for ROCm MI100
threads.y = 1;
#endif
const dim3 blocks(1, std::min<unsigned int>(n1, maxGridY), 1);
int nshared =
threads.y > 1 ? threads.y * sizeof(U) + (threads.y / 2) * sizeof(U) : 0;
cuApplyLayerNorm<T, U, V, simplified><<<blocks, threads, nshared, stream>>>(
output,
mean,
inv_std_dev,
input,
n1, n2,
U(epsilon),
gamma, beta);
}
#define LAYERNORM_LINEAR_IMPL(T, U, V, simplified) \
template void HostApplyLayerNorm<T, U, V, simplified>(const cudaDeviceProp& prop, cudaStream_t stream, V* output, \
U* mean, U* inv_std_dev, const T* input, int n1, int n2, \
double epsilon, const V* gamma, const V* beta);
LAYERNORM_LINEAR_IMPL(float, float, float, true)
LAYERNORM_LINEAR_IMPL(half, float, half, true)
LAYERNORM_LINEAR_IMPL(double, double, double, true)
LAYERNORM_LINEAR_IMPL(float, float, half, true)
LAYERNORM_LINEAR_IMPL(float, float, float, false)
LAYERNORM_LINEAR_IMPL(half, float, half, false)
LAYERNORM_LINEAR_IMPL(double, double, double, false)
LAYERNORM_LINEAR_IMPL(float, float, half, false)
LAYERNORM_LINEAR_IMPL(BFloat16, float, BFloat16, true)
LAYERNORM_LINEAR_IMPL(BFloat16, float, BFloat16, false)
} // namespace cuda
} // namespace contrib
} // namespace onnxruntime
|
b1fe61ecfd946b8f9cb0e2f21b1b32768f666f3c.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "cuda_sort.cuh"
#include <algorithm>
cuda_sort::cuda_sort()
{
name = "cuda Bitonic sort";
}
__global__
void bitonic_sort_step(int * dev_values, int j, int k)
{
// sorting params
unsigned int i, ixj;
i = threadIdx.x + blockDim.x * blockIdx.x;
ixj = i^j;
// threads with the lowest ids sort the array
if ((ixj) > i) {
if ((i&k) == 0) {
// sort ascending
if (dev_values[i] > dev_values[ixj]) {
// swap i with ixj
int temp = dev_values[i];
dev_values[i] = dev_values[ixj];
dev_values[ixj] = temp;
}
}
if ((i&k) != 0) {
// sort descending
if (dev_values[i] < dev_values[ixj]) {
// swap ixj with i
int temp = dev_values[i];
dev_values[i] = dev_values[ixj];
dev_values[ixj] = temp;
}
}
}
}
void cuda_sort::run(int * unsortedInts, int length)
{
THREADS = 512;
NUM_VALS = length;
BLOCKS = NUM_VALS / THREADS;
int *dev_values;
size_t size = NUM_VALS * sizeof(int);
hipMalloc((void**)&dev_values, size);
hipMemcpy(dev_values, unsortedInts, size, hipMemcpyHostToDevice);
dim3 blocks(BLOCKS, 1); // num blocks
dim3 threads(THREADS, 1); // num threads
int j, k;
// Major step
for (k = 2; k <= NUM_VALS; k <<= 1) {
// Minor step
for (j = k >> 1; j > 0; j = j >> 1) {
hipLaunchKernelGGL(( bitonic_sort_step) , dim3(blocks), dim3(threads) , 0, 0, dev_values, j, k);
}
}
int * res;
hipMemcpy(unsortedInts, dev_values, size, hipMemcpyDeviceToHost);
hipFree(dev_values);
}
void cuda_sort::bitonic_sort(int * values)
{
std::vector<int> test(values, values + NUM_VALS);
int *dev_values;
size_t size = NUM_VALS * sizeof(int);
hipMalloc((void**)&dev_values, size);
hipMemcpy(dev_values, values, size, hipMemcpyHostToDevice);
dim3 blocks(BLOCKS, 1); // num blocks
dim3 threads(THREADS, 1); // num threads
int j, k;
// Major step
for (k = 2; k <= NUM_VALS; k <<= 1) {
// Minor step
for (j = k >> 1; j > 0; j = j >> 1) {
hipLaunchKernelGGL(( bitonic_sort_step) , dim3(blocks), dim3(threads), 0, 0, dev_values, j, k);
}
}
hipMemcpy(values, dev_values, size, hipMemcpyDeviceToHost);
hipFree(dev_values);
} | b1fe61ecfd946b8f9cb0e2f21b1b32768f666f3c.cu | #include "cuda_sort.cuh"
#include <algorithm>
cuda_sort::cuda_sort()
{
name = "cuda Bitonic sort";
}
__global__
void bitonic_sort_step(int * dev_values, int j, int k)
{
// sorting params
unsigned int i, ixj;
i = threadIdx.x + blockDim.x * blockIdx.x;
ixj = i^j;
// threads with the lowest ids sort the array
if ((ixj) > i) {
if ((i&k) == 0) {
// sort ascending
if (dev_values[i] > dev_values[ixj]) {
// swap i with ixj
int temp = dev_values[i];
dev_values[i] = dev_values[ixj];
dev_values[ixj] = temp;
}
}
if ((i&k) != 0) {
// sort descending
if (dev_values[i] < dev_values[ixj]) {
// swap ixj with i
int temp = dev_values[i];
dev_values[i] = dev_values[ixj];
dev_values[ixj] = temp;
}
}
}
}
void cuda_sort::run(int * unsortedInts, int length)
{
THREADS = 512;
NUM_VALS = length;
BLOCKS = NUM_VALS / THREADS;
int *dev_values;
size_t size = NUM_VALS * sizeof(int);
cudaMalloc((void**)&dev_values, size);
cudaMemcpy(dev_values, unsortedInts, size, cudaMemcpyHostToDevice);
dim3 blocks(BLOCKS, 1); // num blocks
dim3 threads(THREADS, 1); // num threads
int j, k;
// Major step
for (k = 2; k <= NUM_VALS; k <<= 1) {
// Minor step
for (j = k >> 1; j > 0; j = j >> 1) {
bitonic_sort_step <<<blocks, threads >>>(dev_values, j, k);
}
}
int * res;
cudaMemcpy(unsortedInts, dev_values, size, cudaMemcpyDeviceToHost);
cudaFree(dev_values);
}
void cuda_sort::bitonic_sort(int * values)
{
std::vector<int> test(values, values + NUM_VALS);
int *dev_values;
size_t size = NUM_VALS * sizeof(int);
cudaMalloc((void**)&dev_values, size);
cudaMemcpy(dev_values, values, size, cudaMemcpyHostToDevice);
dim3 blocks(BLOCKS, 1); // num blocks
dim3 threads(THREADS, 1); // num threads
int j, k;
// Major step
for (k = 2; k <= NUM_VALS; k <<= 1) {
// Minor step
for (j = k >> 1; j > 0; j = j >> 1) {
bitonic_sort_step <<<blocks, threads>>>(dev_values, j, k);
}
}
cudaMemcpy(values, dev_values, size, cudaMemcpyDeviceToHost);
cudaFree(dev_values);
} |
9471c01b83b0bf5e520def947e16f846dc021ac3.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "common/Constants.h"
#include "core/geometry/NodeGraphBuilderNaive.h"
#include <device_launch_parameters.h>
namespace surfelwarp { namespace device {
__device__ __forceinline__ float distance_square(const float4& p1, const float4& p2) {
return (p1.x - p2.x)*(p1.x - p2.x) + (p1.y - p2.y)*(p1.y - p2.y) + (p1.z - p2.z)*(p1.z - p2.z);
}
enum {
//The default nn size of nodes
d_node_nn_size = 8
};
/* The node graph is the neighbour of a control node, each node has 8 neighbour,
which is stored as consequential elements in the node graph. The node are in
the number of thousand level, thus iterate only over nodes are not very expensive
*/
__global__ void buildNaiveNodeGraphKernel(
const DeviceArrayView<float4> node_coords,
ushort2* device_node_graph
) {
const int node_num = node_coords.Size();
const int idx = blockDim.x * blockIdx.x + threadIdx.x;
if (idx >= node_num) return;
float dist_vec[d_node_nn_size];
int idx_vec[d_node_nn_size];
//First init these values
for (int k = 0; k < d_node_nn_size; k++) {
idx_vec[k] = -1;
dist_vec[k] = 1e5;
}
//Perform brute-force search of these nodes
const float4 p_idx = node_coords[idx];
int max_index = 0;
for (int k = 0; k < node_num; k++) {
const float4 coord = node_coords[k];
const float new_dist = distance_square(p_idx, coord);
if (new_dist > 1e-6 && new_dist < dist_vec[max_index]) {
dist_vec[max_index] = new_dist;
idx_vec[max_index] = k;
//Rechoice the index with the maximum distance
max_index = 0;
float max_dist = 0;
for (int j = 0; j < d_node_nn_size; j++) {
if (dist_vec[j] > max_dist) {
max_index = j;
max_dist = dist_vec[j];
}
}
}
}
//Record the computed distance on the ptr
for (int k = 0; k < d_node_nn_size; k++) {
const int offset = idx * d_node_nn_size + k;
device_node_graph[offset] = make_ushort2(idx, idx_vec[k]);
}
}
} // namespace device
} // namespace surfelwarp
void surfelwarp::NodeGraphBuilderNaive::buildNodeGraph(
const DeviceArrayView<float4>& reference_nodes,
DeviceArraySlice<ushort2> node_graph,
hipStream_t stream
) {
dim3 blk(64);
dim3 grid(divUp(reference_nodes.Size(), blk.x));
hipLaunchKernelGGL(( device::buildNaiveNodeGraphKernel), dim3(grid), dim3(blk), 0, stream,
reference_nodes,
node_graph.RawPtr()
);
}
void surfelwarp::NodeGraphBuilderNaive::BuildNodeGraph(
const DeviceArrayView<float4>& reference_nodes,
DeviceBufferArray<ushort2>& node_graph,
hipStream_t stream
) {
node_graph.ResizeArrayOrException(reference_nodes.Size() * Constants::kNumNodeGraphNeigbours);
buildNodeGraph(reference_nodes, node_graph.ArraySlice(), stream);
} | 9471c01b83b0bf5e520def947e16f846dc021ac3.cu | #include "common/Constants.h"
#include "core/geometry/NodeGraphBuilderNaive.h"
#include <device_launch_parameters.h>
namespace surfelwarp { namespace device {
__device__ __forceinline__ float distance_square(const float4& p1, const float4& p2) {
return (p1.x - p2.x)*(p1.x - p2.x) + (p1.y - p2.y)*(p1.y - p2.y) + (p1.z - p2.z)*(p1.z - p2.z);
}
enum {
//The default nn size of nodes
d_node_nn_size = 8
};
/* The node graph is the neighbour of a control node, each node has 8 neighbour,
which is stored as consequential elements in the node graph. The node are in
the number of thousand level, thus iterate only over nodes are not very expensive
*/
__global__ void buildNaiveNodeGraphKernel(
const DeviceArrayView<float4> node_coords,
ushort2* device_node_graph
) {
const int node_num = node_coords.Size();
const int idx = blockDim.x * blockIdx.x + threadIdx.x;
if (idx >= node_num) return;
float dist_vec[d_node_nn_size];
int idx_vec[d_node_nn_size];
//First init these values
for (int k = 0; k < d_node_nn_size; k++) {
idx_vec[k] = -1;
dist_vec[k] = 1e5;
}
//Perform brute-force search of these nodes
const float4 p_idx = node_coords[idx];
int max_index = 0;
for (int k = 0; k < node_num; k++) {
const float4 coord = node_coords[k];
const float new_dist = distance_square(p_idx, coord);
if (new_dist > 1e-6 && new_dist < dist_vec[max_index]) {
dist_vec[max_index] = new_dist;
idx_vec[max_index] = k;
//Rechoice the index with the maximum distance
max_index = 0;
float max_dist = 0;
for (int j = 0; j < d_node_nn_size; j++) {
if (dist_vec[j] > max_dist) {
max_index = j;
max_dist = dist_vec[j];
}
}
}
}
//Record the computed distance on the ptr
for (int k = 0; k < d_node_nn_size; k++) {
const int offset = idx * d_node_nn_size + k;
device_node_graph[offset] = make_ushort2(idx, idx_vec[k]);
}
}
} // namespace device
} // namespace surfelwarp
void surfelwarp::NodeGraphBuilderNaive::buildNodeGraph(
const DeviceArrayView<float4>& reference_nodes,
DeviceArraySlice<ushort2> node_graph,
cudaStream_t stream
) {
dim3 blk(64);
dim3 grid(divUp(reference_nodes.Size(), blk.x));
device::buildNaiveNodeGraphKernel<<<grid, blk, 0, stream>>>(
reference_nodes,
node_graph.RawPtr()
);
}
void surfelwarp::NodeGraphBuilderNaive::BuildNodeGraph(
const DeviceArrayView<float4>& reference_nodes,
DeviceBufferArray<ushort2>& node_graph,
cudaStream_t stream
) {
node_graph.ResizeArrayOrException(reference_nodes.Size() * Constants::kNumNodeGraphNeigbours);
buildNodeGraph(reference_nodes, node_graph.ArraySlice(), stream);
} |
eee3772364d309a0f5e086471cb2d36e13395378.hip | // !!! This is a file automatically generated by hipify!!!
#if !MEGDNN_TEGRA_X1
// generated by gen_cuda_conv_bias_kern_impls.py
// ignore warning of cutlass
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wunused-parameter"
#pragma GCC diagnostic ignored "-Wstrict-aliasing"
#include "src/cuda/conv_bias/int8/conv_bias_int8_implicit_gemm_cutlass_wrapper.cuinl"
using LayoutSrc = cutlass::layout::TensorNCxHWx<4>;
using LayoutFilter = cutlass::layout::TensorCxRSKx<4>;
using LayoutDst = cutlass::layout::TensorNCHW;
using ThreadBlockShape = cutlass::gemm::GemmShape<128, 64, 32>;
using WarpShape = cutlass::gemm::GemmShape<64, 32, 32>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 4>;
using EpilogueOp = cutlass::epilogue::thread::BiasAddLinearCombinationRelu<
float, 1, int32_t, float, float>;
using Convolution = cutlass::conv::device::Convolution<
int8_t, LayoutSrc, int8_t, LayoutFilter, float,
LayoutDst, float, LayoutDst, int32_t,
cutlass::conv::ConvType::kConvolution, cutlass::arch::OpClassSimt, cutlass::arch::Sm61,
ThreadBlockShape, WarpShape, InstructionShape, EpilogueOp,
cutlass::conv::threadblock::ConvolutionFpropNCxHWxThreadblockSwizzle,
2, 4, 16, false,
cutlass::arch::OpMultiplyAdd>;
template void megdnn::cuda::cutlass_wrapper::cutlass_convolution_wrapper<Convolution>(
const typename Convolution::ElementSrc* d_src,
const typename Convolution::ElementFilter* d_filter,
const typename Convolution::ElementBias* d_bias,
const typename Convolution::ElementDst* d_z,
typename Convolution::ElementDst* d_dst,
int* workspace,
typename Convolution::ConvolutionParameter const& conv_param,
typename Convolution::EpilogueOutputOp::Params const& epilogue,
hipStream_t stream);
#pragma GCC diagnostic pop
#endif
| eee3772364d309a0f5e086471cb2d36e13395378.cu | #if !MEGDNN_TEGRA_X1
// generated by gen_cuda_conv_bias_kern_impls.py
// ignore warning of cutlass
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wunused-parameter"
#pragma GCC diagnostic ignored "-Wstrict-aliasing"
#include "src/cuda/conv_bias/int8/conv_bias_int8_implicit_gemm_cutlass_wrapper.cuinl"
using LayoutSrc = cutlass::layout::TensorNCxHWx<4>;
using LayoutFilter = cutlass::layout::TensorCxRSKx<4>;
using LayoutDst = cutlass::layout::TensorNCHW;
using ThreadBlockShape = cutlass::gemm::GemmShape<128, 64, 32>;
using WarpShape = cutlass::gemm::GemmShape<64, 32, 32>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 4>;
using EpilogueOp = cutlass::epilogue::thread::BiasAddLinearCombinationRelu<
float, 1, int32_t, float, float>;
using Convolution = cutlass::conv::device::Convolution<
int8_t, LayoutSrc, int8_t, LayoutFilter, float,
LayoutDst, float, LayoutDst, int32_t,
cutlass::conv::ConvType::kConvolution, cutlass::arch::OpClassSimt, cutlass::arch::Sm61,
ThreadBlockShape, WarpShape, InstructionShape, EpilogueOp,
cutlass::conv::threadblock::ConvolutionFpropNCxHWxThreadblockSwizzle,
2, 4, 16, false,
cutlass::arch::OpMultiplyAdd>;
template void megdnn::cuda::cutlass_wrapper::cutlass_convolution_wrapper<Convolution>(
const typename Convolution::ElementSrc* d_src,
const typename Convolution::ElementFilter* d_filter,
const typename Convolution::ElementBias* d_bias,
const typename Convolution::ElementDst* d_z,
typename Convolution::ElementDst* d_dst,
int* workspace,
typename Convolution::ConvolutionParameter const& conv_param,
typename Convolution::EpilogueOutputOp::Params const& epilogue,
cudaStream_t stream);
#pragma GCC diagnostic pop
#endif
|
550f31b111673a625b28490a1e25fa0c4c20ccc8.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void update(int* U, int* F, int* d, int* del, size_t gSize) {
int globalThreadId = blockIdx.x * blockDim.x + threadIdx.x;
if (globalThreadId < gSize) {
F[globalThreadId] = 0;
if(U[globalThreadId] && d[globalThreadId] < del[0]) {
U[globalThreadId] = 0;
F[globalThreadId] = 1;
}
}
} | 550f31b111673a625b28490a1e25fa0c4c20ccc8.cu | #include "includes.h"
__global__ void update(int* U, int* F, int* d, int* del, size_t gSize) {
int globalThreadId = blockIdx.x * blockDim.x + threadIdx.x;
if (globalThreadId < gSize) {
F[globalThreadId] = 0;
if(U[globalThreadId] && d[globalThreadId] < del[0]) {
U[globalThreadId] = 0;
F[globalThreadId] = 1;
}
}
} |
5049377c15b7d5f2d72cd633d47ba66042f08979.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* Copyright 2021 NVIDIA Corporation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
#include "cuda_help.h"
#include "equal_reduce.h"
#include "proj.h"
using namespace Legion;
namespace legate {
namespace numpy {
template<typename T>
__global__ void __launch_bounds__(THREADS_PER_BLOCK, MIN_CTAS_PER_SM)
legate_eq_reduce_1d(DeferredValue<bool> result, const AccessorRO<T, 1> in1, const AccessorRO<T, 1> in2, const Point<1> origin,
const size_t max) {
const size_t offset = blockIdx.x * blockDim.x + threadIdx.x;
const coord_t x = origin[0] + offset;
int value = (offset >= max) ? 1 : (in1[x] == in2[x]) ? 1 : 0;
reduce_bool(result, value);
}
template<typename T>
__global__ void __launch_bounds__(THREADS_PER_BLOCK, MIN_CTAS_PER_SM)
legate_eq_reduce_2d(DeferredValue<bool> result, const AccessorRO<T, 2> in1, const AccessorRO<T, 2> in2, const Point<2> origin,
const Point<1> pitch, const size_t max) {
const size_t offset = blockIdx.x * blockDim.x + threadIdx.x;
const coord_t x = origin[0] + offset / pitch[0];
const coord_t y = origin[1] + offset % pitch[0];
int value = (offset >= max) ? 1 : (in1[x][y] == in2[x][y]) ? 1 : 0;
reduce_bool(result, value);
}
template<typename T>
__global__ void __launch_bounds__(THREADS_PER_BLOCK, MIN_CTAS_PER_SM)
legate_eq_reduce_3d(DeferredValue<bool> result, const AccessorRO<T, 3> in1, const AccessorRO<T, 3> in2, const Point<3> origin,
const Point<2> pitch, const size_t max) {
const size_t offset = blockIdx.x * blockDim.x + threadIdx.x;
const coord_t x = origin[0] + offset / pitch[0];
const coord_t y = origin[1] + (offset % pitch[0]) / pitch[1];
const coord_t z = origin[2] + (offset % pitch[0]) % pitch[1];
int value = (offset >= max) ? 1 : (in1[x][y][z] == in2[x][y][z]) ? 1 : 0;
reduce_bool(result, value);
}
template<typename T>
/*static*/ DeferredValue<bool> EqualReducTask<T>::gpu_variant(const Task* task, const std::vector<PhysicalRegion>& regions,
Context ctx, Runtime* runtime) {
DeferredValue<bool> result(true /*initial value*/);
LegateDeserializer derez(task->args, task->arglen);
const int dim = derez.unpack_dimension();
switch (dim) {
case 1: {
const Rect<1> rect = NumPyProjectionFunctor::unpack_shape<1>(task, derez);
if (rect.empty()) break;
const AccessorRO<T, 1> in1 = derez.unpack_accessor_RO<T, 1>(regions[0], rect);
const AccessorRO<T, 1> in2 = derez.unpack_accessor_RO<T, 1>(regions[1], rect);
const size_t volume = rect.volume();
const size_t blocks = (volume + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK;
hipLaunchKernelGGL(( legate_eq_reduce_1d<T>), dim3(blocks), dim3(THREADS_PER_BLOCK), 0, 0, result, in1, in2, rect.lo, volume);
break;
}
case 2: {
const Rect<2> rect = NumPyProjectionFunctor::unpack_shape<2>(task, derez);
if (rect.empty()) break;
const AccessorRO<T, 2> in1 = derez.unpack_accessor_RO<T, 2>(regions[0], rect);
const AccessorRO<T, 2> in2 = derez.unpack_accessor_RO<T, 2>(regions[1], rect);
const size_t volume = rect.volume();
const size_t blocks = (volume + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK;
const coord_t pitch = rect.hi[1] - rect.lo[1] + 1;
hipLaunchKernelGGL(( legate_eq_reduce_2d<T>), dim3(blocks), dim3(THREADS_PER_BLOCK), 0, 0, result, in1, in2, rect.lo, Point<1>(pitch), volume);
break;
}
case 3: {
const Rect<3> rect = NumPyProjectionFunctor::unpack_shape<3>(task, derez);
if (rect.empty()) break;
const AccessorRO<T, 3> in1 = derez.unpack_accessor_RO<T, 3>(regions[0], rect);
const AccessorRO<T, 3> in2 = derez.unpack_accessor_RO<T, 3>(regions[1], rect);
const size_t volume = rect.volume();
const size_t blocks = (volume + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK;
const coord_t diffy = rect.hi[1] - rect.lo[1] + 1;
const coord_t diffz = rect.hi[2] - rect.lo[2] + 1;
const coord_t pitch[2] = {diffy * diffz, diffz};
hipLaunchKernelGGL(( legate_eq_reduce_3d<T>), dim3(blocks), dim3(THREADS_PER_BLOCK), 0, 0, result, in1, in2, rect.lo, Point<2>(pitch), volume);
break;
}
default:
assert(false);
}
return result;
}
INSTANTIATE_DEFERRED_VALUE_TASK_VARIANT(EqualReducTask, bool, gpu_variant)
} // namespace numpy
} // namespace legate
| 5049377c15b7d5f2d72cd633d47ba66042f08979.cu | /* Copyright 2021 NVIDIA Corporation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
#include "cuda_help.h"
#include "equal_reduce.h"
#include "proj.h"
using namespace Legion;
namespace legate {
namespace numpy {
template<typename T>
__global__ void __launch_bounds__(THREADS_PER_BLOCK, MIN_CTAS_PER_SM)
legate_eq_reduce_1d(DeferredValue<bool> result, const AccessorRO<T, 1> in1, const AccessorRO<T, 1> in2, const Point<1> origin,
const size_t max) {
const size_t offset = blockIdx.x * blockDim.x + threadIdx.x;
const coord_t x = origin[0] + offset;
int value = (offset >= max) ? 1 : (in1[x] == in2[x]) ? 1 : 0;
reduce_bool(result, value);
}
template<typename T>
__global__ void __launch_bounds__(THREADS_PER_BLOCK, MIN_CTAS_PER_SM)
legate_eq_reduce_2d(DeferredValue<bool> result, const AccessorRO<T, 2> in1, const AccessorRO<T, 2> in2, const Point<2> origin,
const Point<1> pitch, const size_t max) {
const size_t offset = blockIdx.x * blockDim.x + threadIdx.x;
const coord_t x = origin[0] + offset / pitch[0];
const coord_t y = origin[1] + offset % pitch[0];
int value = (offset >= max) ? 1 : (in1[x][y] == in2[x][y]) ? 1 : 0;
reduce_bool(result, value);
}
template<typename T>
__global__ void __launch_bounds__(THREADS_PER_BLOCK, MIN_CTAS_PER_SM)
legate_eq_reduce_3d(DeferredValue<bool> result, const AccessorRO<T, 3> in1, const AccessorRO<T, 3> in2, const Point<3> origin,
const Point<2> pitch, const size_t max) {
const size_t offset = blockIdx.x * blockDim.x + threadIdx.x;
const coord_t x = origin[0] + offset / pitch[0];
const coord_t y = origin[1] + (offset % pitch[0]) / pitch[1];
const coord_t z = origin[2] + (offset % pitch[0]) % pitch[1];
int value = (offset >= max) ? 1 : (in1[x][y][z] == in2[x][y][z]) ? 1 : 0;
reduce_bool(result, value);
}
template<typename T>
/*static*/ DeferredValue<bool> EqualReducTask<T>::gpu_variant(const Task* task, const std::vector<PhysicalRegion>& regions,
Context ctx, Runtime* runtime) {
DeferredValue<bool> result(true /*initial value*/);
LegateDeserializer derez(task->args, task->arglen);
const int dim = derez.unpack_dimension();
switch (dim) {
case 1: {
const Rect<1> rect = NumPyProjectionFunctor::unpack_shape<1>(task, derez);
if (rect.empty()) break;
const AccessorRO<T, 1> in1 = derez.unpack_accessor_RO<T, 1>(regions[0], rect);
const AccessorRO<T, 1> in2 = derez.unpack_accessor_RO<T, 1>(regions[1], rect);
const size_t volume = rect.volume();
const size_t blocks = (volume + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK;
legate_eq_reduce_1d<T><<<blocks, THREADS_PER_BLOCK>>>(result, in1, in2, rect.lo, volume);
break;
}
case 2: {
const Rect<2> rect = NumPyProjectionFunctor::unpack_shape<2>(task, derez);
if (rect.empty()) break;
const AccessorRO<T, 2> in1 = derez.unpack_accessor_RO<T, 2>(regions[0], rect);
const AccessorRO<T, 2> in2 = derez.unpack_accessor_RO<T, 2>(regions[1], rect);
const size_t volume = rect.volume();
const size_t blocks = (volume + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK;
const coord_t pitch = rect.hi[1] - rect.lo[1] + 1;
legate_eq_reduce_2d<T><<<blocks, THREADS_PER_BLOCK>>>(result, in1, in2, rect.lo, Point<1>(pitch), volume);
break;
}
case 3: {
const Rect<3> rect = NumPyProjectionFunctor::unpack_shape<3>(task, derez);
if (rect.empty()) break;
const AccessorRO<T, 3> in1 = derez.unpack_accessor_RO<T, 3>(regions[0], rect);
const AccessorRO<T, 3> in2 = derez.unpack_accessor_RO<T, 3>(regions[1], rect);
const size_t volume = rect.volume();
const size_t blocks = (volume + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK;
const coord_t diffy = rect.hi[1] - rect.lo[1] + 1;
const coord_t diffz = rect.hi[2] - rect.lo[2] + 1;
const coord_t pitch[2] = {diffy * diffz, diffz};
legate_eq_reduce_3d<T><<<blocks, THREADS_PER_BLOCK>>>(result, in1, in2, rect.lo, Point<2>(pitch), volume);
break;
}
default:
assert(false);
}
return result;
}
INSTANTIATE_DEFERRED_VALUE_TASK_VARIANT(EqualReducTask, bool, gpu_variant)
} // namespace numpy
} // namespace legate
|
8bb0918f407d25fcdd8562a94245d97df4202396.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "stdio.h"
#include "assert.h"
#include "math.h"
#include <iostream>
using namespace std;
#define N 100000
#define BLOCK_SIZE 1024
#define MAX_ERR 1e-6
__global__ void add(int *a, int *b, int *c, int count)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < count)
{
c[idx] = a[idx] + b[idx];
}
}
int main()
{
int *ha, *hb, *hc;
int *da, *db, *dc;
ha = (int *)malloc(sizeof(int) * N);
hb = (int *)malloc(sizeof(int) * N);
hc = (int *)malloc(sizeof(int) * N);
for (int i = 0; i < N; i++)
{
ha[i] = -i;
hb[i] = i * i;
}
hipMalloc((void **)&da, sizeof(int) * N);
hipMalloc((void **)&db, sizeof(int) * N);
hipMalloc((void **)&dc, sizeof(int) * N);
hipMemcpy(da, ha, sizeof(int) * N, hipMemcpyHostToDevice);
hipMemcpy(db, hb, sizeof(int) * N, hipMemcpyHostToDevice);
hipLaunchKernelGGL(( add), dim3((N + BLOCK_SIZE) / BLOCK_SIZE), dim3(BLOCK_SIZE), 0, 0, da, db, dc, N);
hipMemcpy(hc, dc, sizeof(int) * N, hipMemcpyDeviceToHost);
for (int i = 0; i < N; i++)
{
assert(abs(hc[i] - ha[i] - hb[i]) < MAX_ERR);
}
cout << "passed" << endl;
free(ha);
free(hb);
free(hc);
hipFree(da);
hipFree(db);
hipFree(dc);
return 0;
}
| 8bb0918f407d25fcdd8562a94245d97df4202396.cu | #include "stdio.h"
#include "assert.h"
#include "math.h"
#include <iostream>
using namespace std;
#define N 100000
#define BLOCK_SIZE 1024
#define MAX_ERR 1e-6
__global__ void add(int *a, int *b, int *c, int count)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < count)
{
c[idx] = a[idx] + b[idx];
}
}
int main()
{
int *ha, *hb, *hc;
int *da, *db, *dc;
ha = (int *)malloc(sizeof(int) * N);
hb = (int *)malloc(sizeof(int) * N);
hc = (int *)malloc(sizeof(int) * N);
for (int i = 0; i < N; i++)
{
ha[i] = -i;
hb[i] = i * i;
}
cudaMalloc((void **)&da, sizeof(int) * N);
cudaMalloc((void **)&db, sizeof(int) * N);
cudaMalloc((void **)&dc, sizeof(int) * N);
cudaMemcpy(da, ha, sizeof(int) * N, cudaMemcpyHostToDevice);
cudaMemcpy(db, hb, sizeof(int) * N, cudaMemcpyHostToDevice);
add<<<(N + BLOCK_SIZE) / BLOCK_SIZE, BLOCK_SIZE>>>(da, db, dc, N);
cudaMemcpy(hc, dc, sizeof(int) * N, cudaMemcpyDeviceToHost);
for (int i = 0; i < N; i++)
{
assert(abs(hc[i] - ha[i] - hb[i]) < MAX_ERR);
}
cout << "passed" << endl;
free(ha);
free(hb);
free(hc);
cudaFree(da);
cudaFree(db);
cudaFree(dc);
return 0;
}
|
9c3a07bde4b1249d23c833f2ec98e3f32bde191e.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include "gputimer.h"
//#include "utils.h"
const int N= 1024; // matrix size will be NxN
int compare_matrices(float *gpu, float *ref, int N)
{
int result = 0;
for(int j=0; j < N; j++)
for(int i=0; i < N; i++)
if (ref[i + j*N] != gpu[i + j*N])
{//printf("i: %f\tj: %f\tref: %i\tgpu: %i\n",i, j, ref[i + j*N], gpu[i + j*N]);
result = 1;}
return result;
}
// fill a matrix with sequential numbers in the range 0..N-1
void fill_matrix(float *mat, int N)
{
for(int j=0; j < N * N; j++)
mat[j] = (float) j;
}
// The following functions and kernels are for your references
void
transpose_CPU(float in[], float out[])
{
for(int j=0; j < N; j++)
for(int i=0; i < N; i++)
out[j + i*N] = in[i + j*N]; // out(j,i) = in(i,j)
}
// to be launched on a single thread
__global__ void
transpose_serial(float in[], float out[])
{
for(int j=0; j < N; j++)
for(int i=0; i < N; i++)
out[j + i*N] = in[i + j*N]; // out(j,i) = in(i,j)
}
// to be launched with one thread per row of output matrix
__global__ void
transpose_parallel_per_row(float in[], float out[])
{
int i = threadIdx.x + blockDim.x * blockIdx.x;
for(int j=0; j < N; j++)
out[j + i*N] = in[i + j*N]; // out(j,i) = in(i,j)
}
// Write two tiled versions of transpose -- One using shared memory.
// To be launched with one thread per element, in KxK threadblocks.
// You will determine for each thread (x,y) in tile the element (i,j) of global output matrix.
__global__ void
transpose_parallel_per_element_tiled(float in[], float out[])
{
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
//Ignore any thread mapped to element outside the matrix
if (x >= N || y >= N)
return;
//Swap two elements.
out[x + y*N] = in[y + x*N]; // out(j,i) = in(i,j)
}
__global__ void
transpose_parallel_per_element_tiled_shared(float in[], float out[])
{
extern __shared__ float tileData[];
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
if (x >= N || y >= N)
return;
int blockStart = N * blockIdx.y * blockDim.y + blockIdx.x * blockDim.x;
//First thread will copy the data into share memory
if(threadIdx.x == 0 && threadIdx.y == 0) {
for(int x = 0; x < blockDim.x; x++) {
for(int y = 0; y < blockDim.y; y++) {
tileData[x+blockDim.x*y] = in[blockStart + x + N * y];
}
}
}
__syncthreads();
out[y + x*N] = tileData[threadIdx.x + threadIdx.y * blockDim.x];
}
int main(int argc, char **argv)
{
int numbytes = N * N * sizeof(float);
float *in = (float *) malloc(numbytes);
float *out = (float *) malloc(numbytes);
float *gold = (float *) malloc(numbytes);
fill_matrix(in, N);
transpose_CPU(in, gold);
float *d_in, *d_out;
hipMalloc(&d_in, numbytes);
hipMalloc(&d_out, numbytes);
hipMemcpy(d_in, in, numbytes, hipMemcpyHostToDevice);
GpuTimer timer;
timer.Start();
hipLaunchKernelGGL(( transpose_serial), dim3(1),dim3(1), 0, 0, d_in, d_out);
timer.Stop();
for (int i=0; i < N*N; ++i){out[i] = 0.0;}
hipMemcpy(out, d_out, numbytes, hipMemcpyDeviceToHost);
printf("transpose_serial: %g ms.\nVerifying ...%s\n",
timer.Elapsed(), compare_matrices(out, gold, N) ? "Failed" : "Success");
hipMemcpy(d_out, d_in, numbytes, hipMemcpyDeviceToDevice); //clean d_out
timer.Start();
hipLaunchKernelGGL(( transpose_parallel_per_row), dim3(1),dim3(N), 0, 0, d_in, d_out);
timer.Stop();
for (int i=0; i < N*N; ++i){out[i] = 0.0;} //clean out
hipMemcpy(out, d_out, numbytes, hipMemcpyDeviceToHost);
printf("transpose_parallel_per_row: %g ms.\nVerifying ...%s\n",
timer.Elapsed(), compare_matrices(out, gold, N) ? "Failed" : "Success");
hipMemcpy(d_out, d_in, numbytes, hipMemcpyDeviceToDevice); //clean d_out
// Tiled versions
int K = 20;
if(argc > 1) {
K = atoi(argv[1]);
}
int gridWidth = ceil(1.0 * N / K);
dim3 blocks_tiled(gridWidth,gridWidth);
dim3 threads_tiled(K,K);
timer.Start();
hipLaunchKernelGGL(( transpose_parallel_per_element_tiled), dim3(blocks_tiled),dim3(threads_tiled), 0, 0, d_in, d_out);
timer.Stop();
for (int i=0; i < N*N; ++i){out[i] = 0.0;}
hipMemcpy(out, d_out, numbytes, hipMemcpyDeviceToHost);
printf("transpose_parallel_per_element_tiled %dx%d: %g ms.\nVerifying ...%s\n",
K, K, timer.Elapsed(), compare_matrices(out, gold, N) ? "Failed" : "Success");
hipMemcpy(d_out, d_in, numbytes, hipMemcpyDeviceToDevice); //clean d_out
dim3 blocks_tiled_sh(gridWidth,gridWidth);
dim3 threads_tiled_sh(K,K);
size_t sharedMemSize = K * K * sizeof(float);
timer.Start();
hipLaunchKernelGGL(( transpose_parallel_per_element_tiled_shared), dim3(blocks_tiled_sh),dim3(threads_tiled_sh), sharedMemSize, 0, d_in, d_out);
timer.Stop();
for (int i=0; i < N*N; ++i){out[i] = 0.0;}
hipMemcpy(out, d_out, numbytes, hipMemcpyDeviceToHost);
printf("transpose_parallel_per_element_tiled_shared %dx%d: %g ms.\nVerifying ...%s\n",
K, K, timer.Elapsed(), compare_matrices(out, gold, N) ? "Failed" : "Success");
hipFree(d_in);
hipFree(d_out);
}
| 9c3a07bde4b1249d23c833f2ec98e3f32bde191e.cu | #include <stdio.h>
#include "gputimer.h"
//#include "utils.h"
const int N= 1024; // matrix size will be NxN
int compare_matrices(float *gpu, float *ref, int N)
{
int result = 0;
for(int j=0; j < N; j++)
for(int i=0; i < N; i++)
if (ref[i + j*N] != gpu[i + j*N])
{//printf("i: %f\tj: %f\tref: %i\tgpu: %i\n",i, j, ref[i + j*N], gpu[i + j*N]);
result = 1;}
return result;
}
// fill a matrix with sequential numbers in the range 0..N-1
void fill_matrix(float *mat, int N)
{
for(int j=0; j < N * N; j++)
mat[j] = (float) j;
}
// The following functions and kernels are for your references
void
transpose_CPU(float in[], float out[])
{
for(int j=0; j < N; j++)
for(int i=0; i < N; i++)
out[j + i*N] = in[i + j*N]; // out(j,i) = in(i,j)
}
// to be launched on a single thread
__global__ void
transpose_serial(float in[], float out[])
{
for(int j=0; j < N; j++)
for(int i=0; i < N; i++)
out[j + i*N] = in[i + j*N]; // out(j,i) = in(i,j)
}
// to be launched with one thread per row of output matrix
__global__ void
transpose_parallel_per_row(float in[], float out[])
{
int i = threadIdx.x + blockDim.x * blockIdx.x;
for(int j=0; j < N; j++)
out[j + i*N] = in[i + j*N]; // out(j,i) = in(i,j)
}
// Write two tiled versions of transpose -- One using shared memory.
// To be launched with one thread per element, in KxK threadblocks.
// You will determine for each thread (x,y) in tile the element (i,j) of global output matrix.
__global__ void
transpose_parallel_per_element_tiled(float in[], float out[])
{
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
//Ignore any thread mapped to element outside the matrix
if (x >= N || y >= N)
return;
//Swap two elements.
out[x + y*N] = in[y + x*N]; // out(j,i) = in(i,j)
}
__global__ void
transpose_parallel_per_element_tiled_shared(float in[], float out[])
{
extern __shared__ float tileData[];
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
if (x >= N || y >= N)
return;
int blockStart = N * blockIdx.y * blockDim.y + blockIdx.x * blockDim.x;
//First thread will copy the data into share memory
if(threadIdx.x == 0 && threadIdx.y == 0) {
for(int x = 0; x < blockDim.x; x++) {
for(int y = 0; y < blockDim.y; y++) {
tileData[x+blockDim.x*y] = in[blockStart + x + N * y];
}
}
}
__syncthreads();
out[y + x*N] = tileData[threadIdx.x + threadIdx.y * blockDim.x];
}
int main(int argc, char **argv)
{
int numbytes = N * N * sizeof(float);
float *in = (float *) malloc(numbytes);
float *out = (float *) malloc(numbytes);
float *gold = (float *) malloc(numbytes);
fill_matrix(in, N);
transpose_CPU(in, gold);
float *d_in, *d_out;
cudaMalloc(&d_in, numbytes);
cudaMalloc(&d_out, numbytes);
cudaMemcpy(d_in, in, numbytes, cudaMemcpyHostToDevice);
GpuTimer timer;
timer.Start();
transpose_serial<<<1,1>>>(d_in, d_out);
timer.Stop();
for (int i=0; i < N*N; ++i){out[i] = 0.0;}
cudaMemcpy(out, d_out, numbytes, cudaMemcpyDeviceToHost);
printf("transpose_serial: %g ms.\nVerifying ...%s\n",
timer.Elapsed(), compare_matrices(out, gold, N) ? "Failed" : "Success");
cudaMemcpy(d_out, d_in, numbytes, cudaMemcpyDeviceToDevice); //clean d_out
timer.Start();
transpose_parallel_per_row<<<1,N>>>(d_in, d_out);
timer.Stop();
for (int i=0; i < N*N; ++i){out[i] = 0.0;} //clean out
cudaMemcpy(out, d_out, numbytes, cudaMemcpyDeviceToHost);
printf("transpose_parallel_per_row: %g ms.\nVerifying ...%s\n",
timer.Elapsed(), compare_matrices(out, gold, N) ? "Failed" : "Success");
cudaMemcpy(d_out, d_in, numbytes, cudaMemcpyDeviceToDevice); //clean d_out
// Tiled versions
int K = 20;
if(argc > 1) {
K = atoi(argv[1]);
}
int gridWidth = ceil(1.0 * N / K);
dim3 blocks_tiled(gridWidth,gridWidth);
dim3 threads_tiled(K,K);
timer.Start();
transpose_parallel_per_element_tiled<<<blocks_tiled,threads_tiled>>>(d_in, d_out);
timer.Stop();
for (int i=0; i < N*N; ++i){out[i] = 0.0;}
cudaMemcpy(out, d_out, numbytes, cudaMemcpyDeviceToHost);
printf("transpose_parallel_per_element_tiled %dx%d: %g ms.\nVerifying ...%s\n",
K, K, timer.Elapsed(), compare_matrices(out, gold, N) ? "Failed" : "Success");
cudaMemcpy(d_out, d_in, numbytes, cudaMemcpyDeviceToDevice); //clean d_out
dim3 blocks_tiled_sh(gridWidth,gridWidth);
dim3 threads_tiled_sh(K,K);
size_t sharedMemSize = K * K * sizeof(float);
timer.Start();
transpose_parallel_per_element_tiled_shared<<<blocks_tiled_sh,threads_tiled_sh, sharedMemSize>>>(d_in, d_out);
timer.Stop();
for (int i=0; i < N*N; ++i){out[i] = 0.0;}
cudaMemcpy(out, d_out, numbytes, cudaMemcpyDeviceToHost);
printf("transpose_parallel_per_element_tiled_shared %dx%d: %g ms.\nVerifying ...%s\n",
K, K, timer.Elapsed(), compare_matrices(out, gold, N) ? "Failed" : "Success");
cudaFree(d_in);
cudaFree(d_out);
}
|
f1468166761ad6e167c039afcfa0c23291dd574d.hip | // !!! This is a file automatically generated by hipify!!!
#include "../../include/util/math_function_ptr.h"
//#include <hip/hip_runtime.h>
//#include <device_launch_parameters.h>
//
//#include "rocblas.h"
#include "../../include/config.h"
#include "../../include/util/common.h"
template<typename dtype>
__global__ void gpu_minus(const dtype* a, const dtype* b, const int size, const dtype alpha, dtype* c) {
const int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index < size) {
c[index] = a[index] - alpha*b[index];
}
}
template<typename dtype>
__global__ void gpu_column_sum_plus(const dtype* a, const int row,
const int column, dtype* b) {
const int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index < column) {
for (int i = 0; i < row; ++i) {
b[index] += a[i*column + index];
}
}
}
template<typename dtype>
__global__ void gpu_mmadd(const dtype* a, const dtype* b,
const int size, dtype* result) {
const int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index < size) {
result[index] = a[index] + b[index];
}
}
template<typename dtype>
__global__ void gpu_gen_git_label(const dtype* a, const int size, const int classes, dtype* b) {
const int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index < size) {
b[index*classes + static_cast<int>(a[index] + 0.1)] = 1.;
}
}
template<typename dtype>
__global__ void gpu_argmax(const dtype* a, const int row, const int column, dtype* b) {
const int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index < row) {
dtype t = a[index*column];
int m = 0;
for (int k = 1; k < column; ++k) {
if (t < a[index*column + k]) {
t = a[index*column + k];
m = k;
}
}
b[index] = m;
}
}
template<typename dtype>
__global__ void gpu_equal_count(const dtype* a, const dtype* b, const int size, int* count) {
const int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index < size) {
if (a[index] == b[index]) atomicAdd(count, 1);
}
}
template<typename dtype>
__global__ void gpu_plus(const dtype* a, const int size, const dtype alpha, const dtype beta, dtype* b) {
const int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index < size) {
b[index] = beta*b[index] + alpha*a[index];
}
}
namespace BigBang {
template<typename dtype>
void bigbang_gpu_minus(const dtype* a, const dtype* b, const int size, const dtype alpha, dtype* c) {
gpu_minus << <BigBangGetBlocks(size), THREAD_MAX_NUMS >> > (a, b, size, alpha, c);
}
template void bigbang_gpu_minus<float>(const float* a, const float* b, const int size, const float alpha, float* c);
template void bigbang_gpu_minus<double>(const double* a, const double* b, const int size, const double alpha, double* c);
template<>
void bigbang_gpu_gemm<float>(
bool trans_a,
bool trans_b,
int m,
int n,
int k,
const float alpha,
const float* a,
const float* b,
const float beta,
float* c) {
const int lda = trans_a ? m : k;
const int ldb = trans_b ? k : n;
hipblasOperation_t op_a = trans_a ? HIPBLAS_OP_T : HIPBLAS_OP_N;
hipblasOperation_t op_b = trans_b ? HIPBLAS_OP_T : HIPBLAS_OP_N;
hipblasSgemm(Config::Get().CublasHandle(), op_b, op_a, n, m, k, &alpha, b, ldb,
a, lda, &beta, c, n);
}
template<>
void bigbang_gpu_gemm<double>(
bool trans_a,
bool trans_b,
int m,
int n,
int k,
const double alpha,
const double* a,
const double* b,
const double beta,
double* c) {
const int lda = trans_a ? m : k;
const int ldb = trans_b ? k : n;
hipblasOperation_t op_a = trans_a ? HIPBLAS_OP_T : HIPBLAS_OP_N;
hipblasOperation_t op_b = trans_b ? HIPBLAS_OP_T : HIPBLAS_OP_N;
hipblasDgemm(Config::Get().CublasHandle(), op_b, op_a, n, m, k, &alpha, b, ldb,
a, lda, &beta, c, n);
}
template<typename dtype>
void bigbang_gpu_column_sum_plus(const dtype* a, const int row, const int column, dtype* b) {
hipMemset(b, 0, sizeof(dtype)*column);
gpu_column_sum_plus << <BigBangGetBlocks(column), THREAD_MAX_NUMS >> > (a, row, column, b);
}
template void bigbang_gpu_column_sum_plus<float>(const float* a, const int row, const int column, float* b);
template void bigbang_gpu_column_sum_plus<double>(const double* a, const int row, const int column, double* b);
template<typename dtype>
void bigbang_gpu_mmadd(const dtype* a, const dtype* b,
const int size, dtype* result) {
gpu_mmadd << <BigBangGetBlocks(size), THREAD_MAX_NUMS >> > (a, b, size, result);
}
template void bigbang_gpu_mmadd<float>(const float* a, const float* b,
const int size, float* result);
template void bigbang_gpu_mmadd<double>(const double* a, const double* b,
const int size, double* result);
template<typename dtype>
void bigbang_gpu_argmax(const dtype* a, const int row, const int column, dtype* b) {
gpu_argmax << <BigBangGetBlocks(row), THREAD_MAX_NUMS >> > (a, row, column, b);
}
template void bigbang_gpu_argmax<float>(const float* a, const int row, const int column, float* b);
template void bigbang_gpu_argmax<double>(const double* a, const int row, const int column, double* b);
template<typename dtype>
void bigbang_gpu_equals_count(const dtype* a, const dtype* b, const int size, int* count) {
gpu_equal_count << <BigBangGetBlocks(size), THREAD_MAX_NUMS >> >(a, b, size, count) ;
}
template void bigbang_gpu_equals_count<float>(const float* a, const float* b, const int size, int* count);
template void bigbang_gpu_equals_count<double>(const double* a, const double* b, const int size, int* count);
template<typename dtype>
void bigbang_gpu_gen_fit_label(const dtype* a, const int size, const int classes, dtype* b) {
gpu_gen_git_label << <BigBangGetBlocks(size), THREAD_MAX_NUMS >> > (a, size, classes, b);
}
template void bigbang_gpu_gen_fit_label<float>(const float* a, const int size, const int classes, float* b);
template void bigbang_gpu_gen_fit_label<double>(const double* a, const int size, const int classes, double* b);
void bigbang_gpu_random_uniform(const int size, unsigned int* output) {
hiprandGenerate(Config::Get().CurandGenerator(), output, size);
}
template<typename dtype>
void bigbang_gpu_plus(const dtype* a, const int size, const dtype alpha, const dtype beta, dtype* b) {
gpu_plus << <BigBangGetBlocks(size), THREAD_MAX_NUMS >> > (a, size, alpha, beta, b);
}
template void bigbang_gpu_plus<float>(const float* a, const int size, const float alpha, const float beta, float* b);
template void bigbang_gpu_plus<double>(const double* a, const int size, const double alpha, const double beta, double* b);
}
| f1468166761ad6e167c039afcfa0c23291dd574d.cu | #include "../../include/util/math_function_ptr.h"
//#include <cuda_runtime.h>
//#include <device_launch_parameters.h>
//
//#include "cublas_v2.h"
#include "../../include/config.h"
#include "../../include/util/common.h"
template<typename dtype>
__global__ void gpu_minus(const dtype* a, const dtype* b, const int size, const dtype alpha, dtype* c) {
const int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index < size) {
c[index] = a[index] - alpha*b[index];
}
}
template<typename dtype>
__global__ void gpu_column_sum_plus(const dtype* a, const int row,
const int column, dtype* b) {
const int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index < column) {
for (int i = 0; i < row; ++i) {
b[index] += a[i*column + index];
}
}
}
template<typename dtype>
__global__ void gpu_mmadd(const dtype* a, const dtype* b,
const int size, dtype* result) {
const int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index < size) {
result[index] = a[index] + b[index];
}
}
template<typename dtype>
__global__ void gpu_gen_git_label(const dtype* a, const int size, const int classes, dtype* b) {
const int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index < size) {
b[index*classes + static_cast<int>(a[index] + 0.1)] = 1.;
}
}
template<typename dtype>
__global__ void gpu_argmax(const dtype* a, const int row, const int column, dtype* b) {
const int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index < row) {
dtype t = a[index*column];
int m = 0;
for (int k = 1; k < column; ++k) {
if (t < a[index*column + k]) {
t = a[index*column + k];
m = k;
}
}
b[index] = m;
}
}
template<typename dtype>
__global__ void gpu_equal_count(const dtype* a, const dtype* b, const int size, int* count) {
const int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index < size) {
if (a[index] == b[index]) atomicAdd(count, 1);
}
}
template<typename dtype>
__global__ void gpu_plus(const dtype* a, const int size, const dtype alpha, const dtype beta, dtype* b) {
const int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index < size) {
b[index] = beta*b[index] + alpha*a[index];
}
}
namespace BigBang {
template<typename dtype>
void bigbang_gpu_minus(const dtype* a, const dtype* b, const int size, const dtype alpha, dtype* c) {
gpu_minus << <BigBangGetBlocks(size), THREAD_MAX_NUMS >> > (a, b, size, alpha, c);
}
template void bigbang_gpu_minus<float>(const float* a, const float* b, const int size, const float alpha, float* c);
template void bigbang_gpu_minus<double>(const double* a, const double* b, const int size, const double alpha, double* c);
template<>
void bigbang_gpu_gemm<float>(
bool trans_a,
bool trans_b,
int m,
int n,
int k,
const float alpha,
const float* a,
const float* b,
const float beta,
float* c) {
const int lda = trans_a ? m : k;
const int ldb = trans_b ? k : n;
cublasOperation_t op_a = trans_a ? CUBLAS_OP_T : CUBLAS_OP_N;
cublasOperation_t op_b = trans_b ? CUBLAS_OP_T : CUBLAS_OP_N;
cublasSgemm(Config::Get().CublasHandle(), op_b, op_a, n, m, k, &alpha, b, ldb,
a, lda, &beta, c, n);
}
template<>
void bigbang_gpu_gemm<double>(
bool trans_a,
bool trans_b,
int m,
int n,
int k,
const double alpha,
const double* a,
const double* b,
const double beta,
double* c) {
const int lda = trans_a ? m : k;
const int ldb = trans_b ? k : n;
cublasOperation_t op_a = trans_a ? CUBLAS_OP_T : CUBLAS_OP_N;
cublasOperation_t op_b = trans_b ? CUBLAS_OP_T : CUBLAS_OP_N;
cublasDgemm(Config::Get().CublasHandle(), op_b, op_a, n, m, k, &alpha, b, ldb,
a, lda, &beta, c, n);
}
template<typename dtype>
void bigbang_gpu_column_sum_plus(const dtype* a, const int row, const int column, dtype* b) {
cudaMemset(b, 0, sizeof(dtype)*column);
gpu_column_sum_plus << <BigBangGetBlocks(column), THREAD_MAX_NUMS >> > (a, row, column, b);
}
template void bigbang_gpu_column_sum_plus<float>(const float* a, const int row, const int column, float* b);
template void bigbang_gpu_column_sum_plus<double>(const double* a, const int row, const int column, double* b);
template<typename dtype>
void bigbang_gpu_mmadd(const dtype* a, const dtype* b,
const int size, dtype* result) {
gpu_mmadd << <BigBangGetBlocks(size), THREAD_MAX_NUMS >> > (a, b, size, result);
}
template void bigbang_gpu_mmadd<float>(const float* a, const float* b,
const int size, float* result);
template void bigbang_gpu_mmadd<double>(const double* a, const double* b,
const int size, double* result);
template<typename dtype>
void bigbang_gpu_argmax(const dtype* a, const int row, const int column, dtype* b) {
gpu_argmax << <BigBangGetBlocks(row), THREAD_MAX_NUMS >> > (a, row, column, b);
}
template void bigbang_gpu_argmax<float>(const float* a, const int row, const int column, float* b);
template void bigbang_gpu_argmax<double>(const double* a, const int row, const int column, double* b);
template<typename dtype>
void bigbang_gpu_equals_count(const dtype* a, const dtype* b, const int size, int* count) {
gpu_equal_count << <BigBangGetBlocks(size), THREAD_MAX_NUMS >> >(a, b, size, count) ;
}
template void bigbang_gpu_equals_count<float>(const float* a, const float* b, const int size, int* count);
template void bigbang_gpu_equals_count<double>(const double* a, const double* b, const int size, int* count);
template<typename dtype>
void bigbang_gpu_gen_fit_label(const dtype* a, const int size, const int classes, dtype* b) {
gpu_gen_git_label << <BigBangGetBlocks(size), THREAD_MAX_NUMS >> > (a, size, classes, b);
}
template void bigbang_gpu_gen_fit_label<float>(const float* a, const int size, const int classes, float* b);
template void bigbang_gpu_gen_fit_label<double>(const double* a, const int size, const int classes, double* b);
void bigbang_gpu_random_uniform(const int size, unsigned int* output) {
curandGenerate(Config::Get().CurandGenerator(), output, size);
}
template<typename dtype>
void bigbang_gpu_plus(const dtype* a, const int size, const dtype alpha, const dtype beta, dtype* b) {
gpu_plus << <BigBangGetBlocks(size), THREAD_MAX_NUMS >> > (a, size, alpha, beta, b);
}
template void bigbang_gpu_plus<float>(const float* a, const int size, const float alpha, const float beta, float* b);
template void bigbang_gpu_plus<double>(const double* a, const int size, const double alpha, const double beta, double* b);
}
|
8614b7b0297f1934c27463485523dad73b6fe94e.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/**
* @file compute_normal.cu
* @author xiaotaw ([email protected])
* @brief compute normal map, reference: https://github.com/weigao95/surfelwarp
* @version 0.1
* @date 2021-04-18
* @copyright Copyright (c) 2021
*/
#include "img_proc/cuda/compute_normal.h"
#include "img_proc/cuda/cuda_snippets.hpp"
#include "img_proc/cuda/utils/eigen.hpp"
namespace device {
__device__ float3 ReadFloat3(const PtrStepSz<float> map, const int &u,
const int &v) {
const float x = map.ptr(v, 0)[u];
const float y = map.ptr(v, 1)[u];
const float z = map.ptr(v, 2)[u];
return make_float3(x, y, z);
}
__device__ void WriteFloat4(PtrStepSz<float> map, const float4 &val,
const int &u, const int &v) {
map.ptr(v, 0)[u] = val.x;
map.ptr(v, 1)[u] = val.y;
map.ptr(v, 2)[u] = val.z;
map.ptr(v, 3)[u] = val.w;
}
/**
* @brief
* @param vertex_map
* @param normal_map
* @todo 1. try to use shared memory to speed up,
* 2. try fast mean & std caculation in a iterative way
*/
__global__ void ComputeNormalKernel(const PtrStepSz<float> vertex_map,
PtrStepSz<float> normal_map) {
const int x = threadIdx.x + blockIdx.x * blockDim.x;
const int y = threadIdx.y + blockIdx.y * blockDim.y;
const int cols = vertex_map.cols;
const int rows = vertex_map.rows;
if (x >= cols || y >= rows) {
return;
}
//
const int neighbor_radius = 3;
const int neighbor_points_number =
(2 * neighbor_radius + 1) * (2 * neighbor_radius + 1);
float4 normal = {0, 0, 0, 0};
float3 vertex = ReadFloat3(vertex_map, x, y);
if (!IsZeroVertex(vertex)) {
//
int cnt = 0;
float3 centroid = make_float3(0, 0, 0);
for (int i = -neighbor_radius; i <= neighbor_radius; ++i) {
for (int j = -neighbor_radius; j <= neighbor_radius; ++j) {
if (x + i < 0 || x + i >= cols || y + j < 0 || y + j >= rows) {
continue;
}
float3 v = ReadFloat3(vertex_map, x + i, y + j);
if (!IsZeroVertex(v)) {
centroid += v;
cnt++;
}
}
}
//
if (cnt * 2 > neighbor_points_number) {
centroid *= (1.0f / cnt);
float cov_half[6] = {0};
for (int i = -neighbor_radius; i <= neighbor_radius; ++i) {
for (int j = -neighbor_radius; j <= neighbor_radius; ++j) {
if (x + i < 0 || x + i >= cols || y + j < 0 || y + j >= rows) {
continue;
}
float3 v = ReadFloat3(vertex_map, x + i, y + j);
float3 d = v - centroid;
cov_half[0] += d.x * d.x;
cov_half[1] += d.x * d.y;
cov_half[2] += d.x * d.z;
cov_half[3] += d.y * d.y;
cov_half[4] += d.y * d.z;
cov_half[5] += d.z * d.z;
}
}
//
Eigen33::Mat33 tmp, vec_tmp, evecs;
float3 evals;
Eigen33 eigen33(cov_half);
eigen33.compute(tmp, vec_tmp, evecs, evals);
// evalsevals[0]evecs[0]
normal.x = evecs[0].x;
normal.y = evecs[0].y;
normal.z = evecs[0].z;
//
if (dot(vertex, make_float3(normal.x, normal.y, normal.z)) > 0) {
normal *= -1.0;
}
// curvature surface change
float evals_sum = evals.x + evals.y + evals.z;
normal.w = (evals_sum == 0) ? 0 : ::fabs(evals.x / evals_sum);
}
}
WriteFloat4(normal_map, normal, x, y);
}
} // namespace device
void ComputeNormal(const DeviceArray3D<float> vertex_map,
DeviceArray3D<float> normal_map, hipStream_t stream) {
dim3 block(16, 16);
dim3 grid(DivideUp(vertex_map.cols(), block.x),
DivideUp(vertex_map.rows(), block.y));
hipLaunchKernelGGL(( device::ComputeNormalKernel), dim3(grid), dim3(block), 0, stream, vertex_map,
normal_map);
CudaSafeCall(hipStreamSynchronize(stream));
CudaSafeCall(hipGetLastError());
} | 8614b7b0297f1934c27463485523dad73b6fe94e.cu | /**
* @file compute_normal.cu
* @author xiaotaw ([email protected])
* @brief compute normal map, reference: https://github.com/weigao95/surfelwarp
* @version 0.1
* @date 2021-04-18
* @copyright Copyright (c) 2021
*/
#include "img_proc/cuda/compute_normal.h"
#include "img_proc/cuda/cuda_snippets.hpp"
#include "img_proc/cuda/utils/eigen.hpp"
namespace device {
__device__ float3 ReadFloat3(const PtrStepSz<float> map, const int &u,
const int &v) {
const float x = map.ptr(v, 0)[u];
const float y = map.ptr(v, 1)[u];
const float z = map.ptr(v, 2)[u];
return make_float3(x, y, z);
}
__device__ void WriteFloat4(PtrStepSz<float> map, const float4 &val,
const int &u, const int &v) {
map.ptr(v, 0)[u] = val.x;
map.ptr(v, 1)[u] = val.y;
map.ptr(v, 2)[u] = val.z;
map.ptr(v, 3)[u] = val.w;
}
/**
* @brief 计算法向量的核函数
* @param vertex_map 顶点
* @param normal_map 法向量
* @todo 1. try to use shared memory to speed up,
* 2. try fast mean & std caculation, in a iterative way
*/
__global__ void ComputeNormalKernel(const PtrStepSz<float> vertex_map,
PtrStepSz<float> normal_map) {
const int x = threadIdx.x + blockIdx.x * blockDim.x;
const int y = threadIdx.y + blockIdx.y * blockDim.y;
const int cols = vertex_map.cols;
const int rows = vertex_map.rows;
if (x >= cols || y >= rows) {
return;
}
// 矩形范围内,均为临近点
const int neighbor_radius = 3;
const int neighbor_points_number =
(2 * neighbor_radius + 1) * (2 * neighbor_radius + 1);
float4 normal = {0, 0, 0, 0};
float3 vertex = ReadFloat3(vertex_map, x, y);
if (!IsZeroVertex(vertex)) {
// 统计周围点的信息
int cnt = 0;
float3 centroid = make_float3(0, 0, 0);
for (int i = -neighbor_radius; i <= neighbor_radius; ++i) {
for (int j = -neighbor_radius; j <= neighbor_radius; ++j) {
if (x + i < 0 || x + i >= cols || y + j < 0 || y + j >= rows) {
continue;
}
float3 v = ReadFloat3(vertex_map, x + i, y + j);
if (!IsZeroVertex(v)) {
centroid += v;
cnt++;
}
}
}
// 点数过半,才计算法向量
if (cnt * 2 > neighbor_points_number) {
centroid *= (1.0f / cnt);
float cov_half[6] = {0};
for (int i = -neighbor_radius; i <= neighbor_radius; ++i) {
for (int j = -neighbor_radius; j <= neighbor_radius; ++j) {
if (x + i < 0 || x + i >= cols || y + j < 0 || y + j >= rows) {
continue;
}
float3 v = ReadFloat3(vertex_map, x + i, y + j);
float3 d = v - centroid;
cov_half[0] += d.x * d.x;
cov_half[1] += d.x * d.y;
cov_half[2] += d.x * d.z;
cov_half[3] += d.y * d.y;
cov_half[4] += d.y * d.z;
cov_half[5] += d.z * d.z;
}
}
// 特征值最小的特征向量即为法向量
Eigen33::Mat33 tmp, vec_tmp, evecs;
float3 evals;
Eigen33 eigen33(cov_half);
eigen33.compute(tmp, vec_tmp, evecs, evals);
// 特征值evals中最小的为evals[0],对应的特征向量为evecs[0]
normal.x = evecs[0].x;
normal.y = evecs[0].y;
normal.z = evecs[0].z;
// 法向量朝向纠正,使之朝向相机
if (dot(vertex, make_float3(normal.x, normal.y, normal.z)) > 0) {
normal *= -1.0;
}
// curvature surface change
float evals_sum = evals.x + evals.y + evals.z;
normal.w = (evals_sum == 0) ? 0 : std::fabs(evals.x / evals_sum);
}
}
WriteFloat4(normal_map, normal, x, y);
}
} // namespace device
void ComputeNormal(const DeviceArray3D<float> vertex_map,
DeviceArray3D<float> normal_map, cudaStream_t stream) {
dim3 block(16, 16);
dim3 grid(DivideUp(vertex_map.cols(), block.x),
DivideUp(vertex_map.rows(), block.y));
device::ComputeNormalKernel<<<grid, block, 0, stream>>>(vertex_map,
normal_map);
CudaSafeCall(cudaStreamSynchronize(stream));
CudaSafeCall(cudaGetLastError());
} |
0be8dc4cf4cababf86b8ae2ae8e5e3140f73911e.hip | // !!! This is a file automatically generated by hipify!!!
#include <cassert>
#include <cfloat>
#include <hip/hip_runtime_api.h>
#include <hip/hip_runtime.h>
#include <iostream>
#include <stdio.h>
#include <list>
#include <map>
#include <math.h>
#include <stdlib.h>
#include <vector>
#include <set>
#include <algorithm>
#include <iterator>
#include <fstream>
#include "../include/common.h"
#define K 1
using namespace std;
texture<float, 1, hipReadModeElementType> tex_mx;
texture<float, 1, hipReadModeElementType> tex_my;
texture<float, 1, hipReadModeElementType> tex_mz;
#define cfd_NBLOCKS 16*6*2
//#define cfd_SUPER_BLOCKS_PER_SM 5
#define cfd_BLOCK_SIZE 256
//const int cfd_BLOCK_SIZE = 256;
const int cfd_nBlksPerCluster = 16;
const int cfd_nAtom = cfd_BLOCK_SIZE * cfd_NBLOCKS;
const int cfd_maxNeighbors = 8;
inline int * cfd_myBuildNeighborList_blkSchedule(const int nAtom,
int* neighborList, int blockSz)
{
//create non-uniform data sharing
//but avoid that tasks sharing the same data are neighbor tasks by randomization
vector<int> atomInds(nAtom);
vector<int> blkInds((nAtom+blockSz-1)/blockSz);
for(int i=0; i<blkInds.size(); ++i)
blkInds[i] = i;
random_shuffle(blkInds.begin(), blkInds.end());
int *blkOrder = (int*)malloc(blkInds.size()*sizeof(int));
for(int i=0; i<blkInds.size(); ++i)
blkOrder[i] = blkInds[i];
int j=0;
for(vector<int>::iterator it=blkInds.begin(); it!=blkInds.end(); ++it)
{
int blkInd = *it;
for(int i=0; i<blockSz; ++i)
atomInds[j++] = blkInd*blockSz + i;
}
int superBlockSz = blockSz * cfd_nBlksPerCluster;
// Build Neighbor List
for (int i = 0; i < nAtom; i++)
{
int start = i - i%superBlockSz; //difference is here
//int end = i + (superBlockSz - i%superBlockSz)-1;
int nNeighbors = 0;
do {
int j = start + rand() % superBlockSz;
if (i == j || j>=nAtom) continue; // An atom cannot be its own neighbor
neighborList[nNeighbors*nAtom + atomInds[i]] = atomInds[j];
nNeighbors ++;
} while(nNeighbors<cfd_maxNeighbors);
}
return blkOrder;
}
#define GAMMA 1.4f
#define VAR_DENSITY 0
#define VAR_MOMENTUM 1
#define NDIM 3
#define VAR_DENSITY_ENERGY (VAR_MOMENTUM+NDIM)
#define NVAR (VAR_DENSITY_ENERGY+1)
__host__ __device__ inline void compute_velocity(float& density, float3& momentum, float3& velocity)
{
velocity.x = momentum.x / density;
velocity.y = momentum.y / density;
velocity.z = momentum.z / density;
}
__host__ __device__ inline float compute_speed_sqd(float3& velocity)
{
return velocity.x*velocity.x + velocity.y*velocity.y + velocity.z*velocity.z;
}
__host__ __device__ inline float compute_pressure(float& density, float& density_energy, float& speed_sqd)
{
return (float(GAMMA)-float(1.0f))*(density_energy - float(0.5f)*density*speed_sqd);
}
__host__ __device__ inline float compute_speed_of_sound(float& density, float& pressure)
{
return sqrtf(float(GAMMA)*pressure/density);
}
__host__ __device__ __host__ inline void compute_flux_contribution(float& density, float3& momentum, float& density_energy, float& pressure, float3& velocity, float3& fc_momentum_x, float3& fc_momentum_y, float3& fc_momentum_z, float3& fc_density_energy)
{
fc_momentum_x.x = velocity.x*momentum.x + pressure;
fc_momentum_x.y = velocity.x*momentum.y;
fc_momentum_x.z = velocity.x*momentum.z;
fc_momentum_y.x = fc_momentum_x.y;
fc_momentum_y.y = velocity.y*momentum.y + pressure;
fc_momentum_y.z = velocity.y*momentum.z;
fc_momentum_z.x = fc_momentum_x.z;
fc_momentum_z.y = fc_momentum_y.z;
fc_momentum_z.z = velocity.z*momentum.z + pressure;
float de_p = density_energy+pressure;
fc_density_energy.x = velocity.x*de_p;
fc_density_energy.y = velocity.y*de_p;
fc_density_energy.z = velocity.z*de_p;
}
void check_cfd(int nelr, int* elements_surrounding_elements, float*
normals, float* density, float* mx, float* my, float* mz, float* density_energy, float* fluxes)
{
const float smoothing_coefficient = float(0.2f);
//const int i = (blockDim.x*blockIdx.x + threadIdx.x);
for(int i=0;i<cfd_NBLOCKS*cfd_BLOCK_SIZE;i++){
int j, nb;
float3 normal; float normal_len;
float factor;
//float density_i = variables[i + VAR_DENSITY*nelr];
float density_i = density[i];
float3 momentum_i;
//momentum_i.x = variables[i + (VAR_MOMENTUM+0)*nelr];
//momentum_i.y = variables[i + (VAR_MOMENTUM+1)*nelr];
//momentum_i.z = variables[i + (VAR_MOMENTUM+2)*nelr];
momentum_i.x = mx[i];
momentum_i.y = my[i];
momentum_i.z = mz[i];
//float density_energy_i = variables[i + VAR_DENSITY_ENERGY*nelr];
float density_energy_i = density_energy[i];
float3 velocity_i; compute_velocity(density_i, momentum_i, velocity_i);
float speed_sqd_i = compute_speed_sqd(velocity_i);
float speed_i = sqrtf(speed_sqd_i);
float pressure_i = compute_pressure(density_i, density_energy_i, speed_sqd_i);
float speed_of_sound_i = compute_speed_of_sound(density_i, pressure_i);
float3 flux_contribution_i_momentum_x, flux_contribution_i_momentum_y, flux_contribution_i_momentum_z;
float3 flux_contribution_i_density_energy;
compute_flux_contribution(density_i, momentum_i, density_energy_i, pressure_i, velocity_i, flux_contribution_i_momentum_x, flux_contribution_i_momentum_y, flux_contribution_i_momentum_z, flux_contribution_i_density_energy);
//float flux_i_density = float(0.0f);
float flux_i_density = 0.0;
float3 flux_i_momentum;
flux_i_momentum.x = float(0.0f);
flux_i_momentum.y = float(0.0f);
flux_i_momentum.z = float(0.0f);
float flux_i_density_energy = float(0.0f);
float3 velocity_nb;
float density_nb, density_energy_nb;
float3 momentum_nb;
float3 flux_contribution_nb_momentum_x, flux_contribution_nb_momentum_y, flux_contribution_nb_momentum_z;
float3 flux_contribution_nb_density_energy;
float speed_sqd_nb, speed_of_sound_nb, pressure_nb;
#pragma unroll
for(j = 0; j < cfd_maxNeighbors; j++)
{
nb = elements_surrounding_elements[i + j*nelr];
//optimal layout already
// |X for neighbor 0, X for neighbor 1, ... | Y for neighbor 0, Y for neighbor 1, ...
// |Z for neighbor 0, Z for neighbor 1, ... |
normal.x = normals[i + (j + 0*cfd_maxNeighbors)*nelr];
normal.y = normals[i + (j + 1*cfd_maxNeighbors)*nelr];
normal.z = normals[i + (j + 2*cfd_maxNeighbors)*nelr];
normal_len = sqrtf(normal.x*normal.x + normal.y*normal.y + normal.z*normal.z);
if(nb >= 0) // a legitimate neighbor
{
//density_nb = variables[nb + VAR_DENSITY*nelr];
//momentum_nb.x = variables[nb + (VAR_MOMENTUM+0)*nelr];
//momentum_nb.y = variables[nb + (VAR_MOMENTUM+1)*nelr];
//momentum_nb.z = variables[nb + (VAR_MOMENTUM+2)*nelr];
density_nb = density[nb];
momentum_nb.x = mx[nb];
momentum_nb.y = my[nb];
momentum_nb.z = mz[nb];
//density_energy_nb = variables[nb + VAR_DENSITY_ENERGY*nelr];
density_energy_nb = density_energy[nb];
compute_velocity(density_nb, momentum_nb, velocity_nb);
speed_sqd_nb = compute_speed_sqd(velocity_nb);
pressure_nb = compute_pressure(density_nb, density_energy_nb, speed_sqd_nb);
speed_of_sound_nb = compute_speed_of_sound(density_nb, pressure_nb);
compute_flux_contribution(density_nb, momentum_nb, density_energy_nb, pressure_nb, velocity_nb, flux_contribution_nb_momentum_x, flux_contribution_nb_momentum_y, flux_contribution_nb_momentum_z, flux_contribution_nb_density_energy);
// artificial viscosity
//factor = -normal_len*smoothing_coefficient*float(0.5f)*(speed_i + sqrtf(speed_sqd_nb) + speed_of_sound_i + speed_of_sound_nb);
factor = 1.3;
flux_i_density += factor*(density_i-density_nb);
flux_i_density_energy += factor*(density_energy_i-density_energy_nb);
flux_i_momentum.x += factor*(momentum_i.x-momentum_nb.x);
flux_i_momentum.y += factor*(momentum_i.y-momentum_nb.y);
flux_i_momentum.z += factor*(momentum_i.z-momentum_nb.z);
// accumulate cell-centered fluxes
factor = float(0.5f)*normal.x;
flux_i_density += factor*(momentum_nb.x+momentum_i.x);
flux_i_density_energy += factor*(flux_contribution_nb_density_energy.x+flux_contribution_i_density_energy.x);
flux_i_momentum.x += factor*(flux_contribution_nb_momentum_x.x+flux_contribution_i_momentum_x.x);
flux_i_momentum.y += factor*(flux_contribution_nb_momentum_y.x+flux_contribution_i_momentum_y.x);
flux_i_momentum.z += factor*(flux_contribution_nb_momentum_z.x+flux_contribution_i_momentum_z.x);
factor = float(0.5f)*normal.y;
flux_i_density += factor*(momentum_nb.y+momentum_i.y);
flux_i_density_energy += factor*(flux_contribution_nb_density_energy.y+flux_contribution_i_density_energy.y);
flux_i_momentum.x += factor*(flux_contribution_nb_momentum_x.y+flux_contribution_i_momentum_x.y);
flux_i_momentum.y += factor*(flux_contribution_nb_momentum_y.y+flux_contribution_i_momentum_y.y);
flux_i_momentum.z += factor*(flux_contribution_nb_momentum_z.y+flux_contribution_i_momentum_z.y);
factor = float(0.5f)*normal.z;
flux_i_density += factor*(momentum_nb.z+momentum_i.z);
flux_i_density_energy += factor*(flux_contribution_nb_density_energy.z+flux_contribution_i_density_energy.z);
flux_i_momentum.x += factor*(flux_contribution_nb_momentum_x.z+flux_contribution_i_momentum_x.z);
flux_i_momentum.y += factor*(flux_contribution_nb_momentum_y.z+flux_contribution_i_momentum_y.z);
flux_i_momentum.z += factor*(flux_contribution_nb_momentum_z.z+flux_contribution_i_momentum_z.z);
}
}
/*if(((pow((fluxes[i + VAR_DENSITY*nelr] - flux_i_density),2)/flux_i_density)>0.001)||\
((pow((fluxes[i + (VAR_MOMENTUM+0)*nelr] - flux_i_momentum.x),2)/flux_i_momentum.x)>0.001)||\
((pow((fluxes[i + (VAR_MOMENTUM+1)*nelr] - flux_i_momentum.y),2)/flux_i_momentum.y)>0.001)||\
((pow((fluxes[i + (VAR_MOMENTUM+2)*nelr] - flux_i_momentum.z),2)/flux_i_momentum.z)>0.001)||\
((pow((fluxes[i + VAR_DENSITY_ENERGY*nelr]- flux_i_density_energy),2)/flux_i_density_energy)>0.001))*/
if(((abs((fluxes[i + VAR_DENSITY*nelr] - flux_i_density)/flux_i_density)>0.01)&&(abs((fluxes[i + VAR_DENSITY*nelr] - flux_i_density))>0.01))||\
((abs((fluxes[i + (VAR_MOMENTUM+0)*nelr] - flux_i_momentum.x)/flux_i_momentum.x)>0.01)&&(abs((fluxes[i + (VAR_MOMENTUM+0)*nelr] - flux_i_momentum.x))>0.01))||\
((abs((fluxes[i + (VAR_MOMENTUM+1)*nelr] - flux_i_momentum.y)/flux_i_momentum.y)>0.01)&&(abs((fluxes[i + (VAR_MOMENTUM+1)*nelr] - flux_i_momentum.y))>0.01))||\
((abs((fluxes[i + (VAR_MOMENTUM+2)*nelr] - flux_i_momentum.z)/flux_i_momentum.z)>0.01)&&(abs((fluxes[i + (VAR_MOMENTUM+2)*nelr] - flux_i_momentum.z))>0.01))||\
((abs((fluxes[i + VAR_DENSITY_ENERGY*nelr]- flux_i_density_energy)/flux_i_density_energy)>0.01)&&(abs((fluxes[i + VAR_DENSITY_ENERGY*nelr]- flux_i_density_energy))>0.01)))
{printf("failed!%d,%f,%f,%f,%f,%f,%f,%f,%f,%f,%f\n",i,fluxes[i + VAR_DENSITY*nelr],flux_i_density,\
fluxes[i + (VAR_MOMENTUM+0)*nelr],flux_i_momentum.x,\
fluxes[i + (VAR_MOMENTUM+1)*nelr] , flux_i_momentum.y,\
fluxes[i + (VAR_MOMENTUM+2)*nelr],flux_i_momentum.z,\
fluxes[i + VAR_DENSITY_ENERGY*nelr],flux_i_density_energy);
return;}
}
printf("GOOD! passed!\n");
return;
}
__global__ void cfd_kernel(int nelr, int* elements_surrounding_elements, float*
normals, float* density, float* mx, float* my, float* __restrict__ mz, float* density_energy, float* fluxes,int *d_flag)
{
const float smoothing_coefficient = float(0.2f);
const int i = (blockDim.x*blockIdx.x + threadIdx.x);
int j, nb;
float3 normal; float normal_len;
float factor;
//float density_i = variables[i + VAR_DENSITY*nelr];
float density_i = density[i];
float3 momentum_i;
//momentum_i.x = variables[i + (VAR_MOMENTUM+0)*nelr];
//momentum_i.y = variables[i + (VAR_MOMENTUM+1)*nelr];
//momentum_i.z = variables[i + (VAR_MOMENTUM+2)*nelr];
momentum_i.x = tex1Dfetch(tex_mx,i);//mx[i];
momentum_i.y = tex1Dfetch(tex_my,i);my[i];
momentum_i.z = tex1Dfetch(tex_mz,i);//mz[i];
//float density_energy_i = variables[i + VAR_DENSITY_ENERGY*nelr];
float density_energy_i = density_energy[i];
float3 velocity_i; compute_velocity(density_i, momentum_i, velocity_i);
float speed_sqd_i = compute_speed_sqd(velocity_i);
float speed_i = sqrtf(speed_sqd_i);
float pressure_i = compute_pressure(density_i, density_energy_i, speed_sqd_i);
float speed_of_sound_i = compute_speed_of_sound(density_i, pressure_i);
float3 flux_contribution_i_momentum_x, flux_contribution_i_momentum_y, flux_contribution_i_momentum_z;
float3 flux_contribution_i_density_energy;
compute_flux_contribution(density_i, momentum_i, density_energy_i, pressure_i, velocity_i, flux_contribution_i_momentum_x, flux_contribution_i_momentum_y, flux_contribution_i_momentum_z, flux_contribution_i_density_energy);
//float flux_i_density = float(0.0f);
float flux_i_density = 0.0;
float3 flux_i_momentum;
flux_i_momentum.x = float(0.0f);
flux_i_momentum.y = float(0.0f);
flux_i_momentum.z = float(0.0f);
float flux_i_density_energy = float(0.0f);
float3 velocity_nb;
float density_nb, density_energy_nb;
float3 momentum_nb;
float3 flux_contribution_nb_momentum_x, flux_contribution_nb_momentum_y, flux_contribution_nb_momentum_z;
float3 flux_contribution_nb_density_energy;
float speed_sqd_nb, speed_of_sound_nb, pressure_nb;
#pragma unroll
for(j = 0; j < cfd_maxNeighbors; j++)
{
nb = elements_surrounding_elements[i + j*nelr];
//optimal layout already
// |X for neighbor 0, X for neighbor 1, ... | Y for neighbor 0, Y for neighbor 1, ...
// |Z for neighbor 0, Z for neighbor 1, ... |
normal.x = normals[i + (j + 0*cfd_maxNeighbors)*nelr];
normal.y = normals[i + (j + 1*cfd_maxNeighbors)*nelr];
normal.z = normals[i + (j + 2*cfd_maxNeighbors)*nelr];
normal_len = sqrtf(normal.x*normal.x + normal.y*normal.y + normal.z*normal.z);
if(nb >= 0) // a legitimate neighbor
{
//density_nb = variables[nb + VAR_DENSITY*nelr];
//momentum_nb.x = variables[nb + (VAR_MOMENTUM+0)*nelr];
//momentum_nb.y = variables[nb + (VAR_MOMENTUM+1)*nelr];
//momentum_nb.z = variables[nb + (VAR_MOMENTUM+2)*nelr];
density_nb = density[nb];
momentum_nb.x =tex1Dfetch(tex_mx,nb);// mx[nb];
momentum_nb.y = tex1Dfetch(tex_my,nb);//my[nb];
momentum_nb.z =tex1Dfetch(tex_mz,nb);// mz[nb];
//density_energy_nb = variables[nb + VAR_DENSITY_ENERGY*nelr];
density_energy_nb = density_energy[nb];
compute_velocity(density_nb, momentum_nb, velocity_nb);
speed_sqd_nb = compute_speed_sqd(velocity_nb);
pressure_nb = compute_pressure(density_nb, density_energy_nb, speed_sqd_nb);
speed_of_sound_nb = compute_speed_of_sound(density_nb, pressure_nb);
compute_flux_contribution(density_nb, momentum_nb, density_energy_nb, pressure_nb, velocity_nb, flux_contribution_nb_momentum_x, flux_contribution_nb_momentum_y, flux_contribution_nb_momentum_z, flux_contribution_nb_density_energy);
// artificial viscosity
//factor = -normal_len*smoothing_coefficient*float(0.5f)*(speed_i + sqrtf(speed_sqd_nb) + speed_of_sound_i + speed_of_sound_nb);
factor = 1.3;
flux_i_density += factor*(density_i-density_nb);
flux_i_density_energy += factor*(density_energy_i-density_energy_nb);
flux_i_momentum.x += factor*(momentum_i.x-momentum_nb.x);
flux_i_momentum.y += factor*(momentum_i.y-momentum_nb.y);
flux_i_momentum.z += factor*(momentum_i.z-momentum_nb.z);
// accumulate cell-centered fluxes
factor = float(0.5f)*normal.x;
flux_i_density += factor*(momentum_nb.x+momentum_i.x);
flux_i_density_energy += factor*(flux_contribution_nb_density_energy.x+flux_contribution_i_density_energy.x);
flux_i_momentum.x += factor*(flux_contribution_nb_momentum_x.x+flux_contribution_i_momentum_x.x);
flux_i_momentum.y += factor*(flux_contribution_nb_momentum_y.x+flux_contribution_i_momentum_y.x);
flux_i_momentum.z += factor*(flux_contribution_nb_momentum_z.x+flux_contribution_i_momentum_z.x);
factor = float(0.5f)*normal.y;
flux_i_density += factor*(momentum_nb.y+momentum_i.y);
flux_i_density_energy += factor*(flux_contribution_nb_density_energy.y+flux_contribution_i_density_energy.y);
flux_i_momentum.x += factor*(flux_contribution_nb_momentum_x.y+flux_contribution_i_momentum_x.y);
flux_i_momentum.y += factor*(flux_contribution_nb_momentum_y.y+flux_contribution_i_momentum_y.y);
flux_i_momentum.z += factor*(flux_contribution_nb_momentum_z.y+flux_contribution_i_momentum_z.y);
factor = float(0.5f)*normal.z;
flux_i_density += factor*(momentum_nb.z+momentum_i.z);
flux_i_density_energy += factor*(flux_contribution_nb_density_energy.z+flux_contribution_i_density_energy.z);
flux_i_momentum.x += factor*(flux_contribution_nb_momentum_x.z+flux_contribution_i_momentum_x.z);
flux_i_momentum.y += factor*(flux_contribution_nb_momentum_y.z+flux_contribution_i_momentum_y.z);
flux_i_momentum.z += factor*(flux_contribution_nb_momentum_z.z+flux_contribution_i_momentum_z.z);
}
}
fluxes[i + VAR_DENSITY*nelr] = flux_i_density;
fluxes[i + (VAR_MOMENTUM+0)*nelr] = flux_i_momentum.x;
fluxes[i + (VAR_MOMENTUM+1)*nelr] = flux_i_momentum.y;
fluxes[i + (VAR_MOMENTUM+2)*nelr] = flux_i_momentum.z;
fluxes[i + VAR_DENSITY_ENERGY*nelr] = flux_i_density_energy;
//if (threadIdx.x==0) atomicAdd(d_flag,1);
}
int main(int argc, char **argv) {
hipSetDevice(2);
srand(2013);
// Allocate problem data on host
//posVecType* position;
//forceVecType* force;
float *density;
float *mx;
float *my;
float *mz;
float *density_energy;
float *normals;
float *fluxes;
int* cfd_neighborList;
hipHostMalloc((void**)&density, cfd_nAtom*sizeof(float));
hipHostMalloc((void**)&mx, cfd_nAtom*sizeof(float));
hipHostMalloc((void**)&my, cfd_nAtom*sizeof(float));
hipHostMalloc((void**)&mz, cfd_nAtom*sizeof(float));
hipHostMalloc((void**)&density_energy, cfd_nAtom*sizeof(float));
hipHostMalloc((void**)&normals, cfd_nAtom*NDIM*cfd_maxNeighbors*sizeof(float));
hipHostMalloc((void**)&fluxes, cfd_nAtom*NVAR*sizeof(float));
hipHostMalloc((void**)&cfd_neighborList, cfd_nAtom*cfd_maxNeighbors*sizeof(int));
// Allocate device memory for position and force
//forceVecType* d_force;
//posVecType* d_position;
float *d_density;
float *d_mx;
float *d_my;
float *d_mz;
float *d_density_energy;
float *d_normals;
float *d_fluxes;
hipMalloc((void**)&d_density, cfd_nAtom*sizeof(float));
hipMalloc((void**)&d_mx, cfd_nAtom*sizeof(float));
hipMalloc((void**)&d_my, cfd_nAtom*sizeof(float));
hipMalloc((void**)&d_mz, cfd_nAtom*sizeof(float));
hipMalloc((void**)&d_density_energy, cfd_nAtom*sizeof(float));
hipMalloc((void**)&d_normals, cfd_nAtom*NDIM*cfd_maxNeighbors*sizeof(float));
hipMalloc((void**)&d_fluxes, cfd_nAtom*NVAR*sizeof(float));
hipMemset(d_fluxes, 0, cfd_nAtom*NVAR*sizeof(float));
//hipMemset(d_force, 0, cfd_nAtom*sizeof(forceVecType));
// Allocate device memory for neighbor list
int* d_cfd_neighborList;
hipMalloc((void**)&d_cfd_neighborList, cfd_nAtom*cfd_maxNeighbors*sizeof(int));
//cout << "Initializing test problem (this can take several "
// "minutes for large problems)\n";
// Initialize positions -- random distribution in cubic domain
// domainEdge constant specifies edge length
for (int i = 0; i < cfd_nAtom; i++)
{
density[i] = (float)(drand48());
density_energy[i] = (float)(drand48() );
mx[i] = (float)(drand48() );
my[i] = (float)(drand48() );
mz[i] = (float)(drand48() );
/*
density[i] = 1.1+i*0.01;
density_energy[i] = 1.1+i*0.01;
mx[i] = 1.1+i*0.01;
my[i] = 1.1+i*0.01;
mz[i] = 1.1+i*0.01;
*/
}
for(int i=0; i<cfd_nAtom*NDIM*cfd_maxNeighbors; ++i)
normals[i] = (float)(drand48());
cfd_myBuildNeighborList_blkSchedule(cfd_nAtom, cfd_neighborList, cfd_BLOCK_SIZE);
hipMemcpy(d_cfd_neighborList, cfd_neighborList, cfd_maxNeighbors*cfd_nAtom*sizeof(int), hipMemcpyHostToDevice);
// Copy data to GPU
hipMemcpy(d_density, density, cfd_nAtom*sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(d_mx, mx, cfd_nAtom*sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(d_my, my, cfd_nAtom*sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(d_mz, mz, cfd_nAtom*sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(d_density_energy, density_energy, cfd_nAtom*sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(d_normals, normals, cfd_nAtom*NDIM*cfd_maxNeighbors*sizeof(float), hipMemcpyHostToDevice);
hipChannelFormatDesc chDesc1 = hipCreateChannelDesc<float>();
hipChannelFormatDesc chDesc2 = hipCreateChannelDesc<float>();
hipChannelFormatDesc chDesc3 = hipCreateChannelDesc<float>();
tex_mx.filterMode = hipFilterModePoint;
tex_mx.normalized = false;
tex_mx.channelDesc = chDesc1;
tex_my.filterMode = hipFilterModePoint;
tex_my.normalized = false;
tex_my.channelDesc = chDesc2;
tex_mz.filterMode = hipFilterModePoint;
tex_mz.normalized = false;
tex_mz.channelDesc = chDesc3;
hipBindTexture(NULL,&tex_mx,d_mx,&chDesc1,cfd_nAtom*sizeof(float));
hipBindTexture(NULL,&tex_my,d_my,&chDesc2,cfd_nAtom*sizeof(float));
hipBindTexture(NULL,&tex_mz,d_mz,&chDesc3,cfd_nAtom*sizeof(float));
hipSetDeviceFlags(hipDeviceMapHost);
int *flag_cfd,*d_flag_cfd;
hipHostMalloc((void**)&flag_cfd,sizeof( int),hipHostMallocMapped);
hipHostGetDevicePointer((void**)&d_flag_cfd,(void*)flag_cfd,0);
hipEvent_t kernel_start, kernel_stop;
hipEventCreate(&kernel_start);
hipEventCreate(&kernel_stop);
float kernel_time = 0.0f;
hipEventRecord(kernel_start, 0);
int cfd_gridSize = (cfd_nAtom-1+cfd_BLOCK_SIZE) / cfd_BLOCK_SIZE;
hipLaunchKernelGGL(( cfd_kernel), dim3(cfd_gridSize), dim3(cfd_BLOCK_SIZE), 0, 0, cfd_nAtom, d_cfd_neighborList, d_normals, d_density, d_mx, d_my, d_mz, d_density_energy,
d_fluxes,d_flag_cfd);
hipDeviceSynchronize();
hipEventRecord(kernel_stop, 0);
hipEventSynchronize(kernel_stop);
// get elapsed time
kernel_time = 0.0f;
hipEventElapsedTime(&kernel_time, kernel_start, kernel_stop);
kernel_time *= 1.e-3; // Convert to seconds
cout << "kernel exe time: " << kernel_time << endl;
hipMemcpy(fluxes, d_fluxes, cfd_nAtom*NVAR*sizeof(float), hipMemcpyDeviceToHost);
check_cfd(cfd_nAtom,cfd_neighborList,normals,density,mx,my,mz,density_energy,fluxes);
//TODO:verified on small inputs
/*
ifstream fluxesF("../org/fluxes.txt");
for(int i=0; i<cfd_nAtom*NVAR; ++i) {
float f;
fluxesF >> f;
if(abs(f - fluxes[i]) > 0.001) {
fprintf(stderr, "Test failed! i = %d\n", i);
return 1;
}
}*/
// printf("Test passed!\n");
// fluxesF.close();
return 0;
}
| 0be8dc4cf4cababf86b8ae2ae8e5e3140f73911e.cu |
#include <cassert>
#include <cfloat>
#include <cuda_runtime_api.h>
#include <cuda.h>
#include <iostream>
#include <stdio.h>
#include <list>
#include <map>
#include <math.h>
#include <stdlib.h>
#include <vector>
#include <set>
#include <algorithm>
#include <iterator>
#include <fstream>
#include "../include/common.h"
#define K 1
using namespace std;
texture<float, 1, cudaReadModeElementType> tex_mx;
texture<float, 1, cudaReadModeElementType> tex_my;
texture<float, 1, cudaReadModeElementType> tex_mz;
#define cfd_NBLOCKS 16*6*2
//#define cfd_SUPER_BLOCKS_PER_SM 5
#define cfd_BLOCK_SIZE 256
//const int cfd_BLOCK_SIZE = 256;
const int cfd_nBlksPerCluster = 16;
const int cfd_nAtom = cfd_BLOCK_SIZE * cfd_NBLOCKS;
const int cfd_maxNeighbors = 8;
inline int * cfd_myBuildNeighborList_blkSchedule(const int nAtom,
int* neighborList, int blockSz)
{
//create non-uniform data sharing
//but avoid that tasks sharing the same data are neighbor tasks by randomization
vector<int> atomInds(nAtom);
vector<int> blkInds((nAtom+blockSz-1)/blockSz);
for(int i=0; i<blkInds.size(); ++i)
blkInds[i] = i;
random_shuffle(blkInds.begin(), blkInds.end());
int *blkOrder = (int*)malloc(blkInds.size()*sizeof(int));
for(int i=0; i<blkInds.size(); ++i)
blkOrder[i] = blkInds[i];
int j=0;
for(vector<int>::iterator it=blkInds.begin(); it!=blkInds.end(); ++it)
{
int blkInd = *it;
for(int i=0; i<blockSz; ++i)
atomInds[j++] = blkInd*blockSz + i;
}
int superBlockSz = blockSz * cfd_nBlksPerCluster;
// Build Neighbor List
for (int i = 0; i < nAtom; i++)
{
int start = i - i%superBlockSz; //difference is here
//int end = i + (superBlockSz - i%superBlockSz)-1;
int nNeighbors = 0;
do {
int j = start + rand() % superBlockSz;
if (i == j || j>=nAtom) continue; // An atom cannot be its own neighbor
neighborList[nNeighbors*nAtom + atomInds[i]] = atomInds[j];
nNeighbors ++;
} while(nNeighbors<cfd_maxNeighbors);
}
return blkOrder;
}
#define GAMMA 1.4f
#define VAR_DENSITY 0
#define VAR_MOMENTUM 1
#define NDIM 3
#define VAR_DENSITY_ENERGY (VAR_MOMENTUM+NDIM)
#define NVAR (VAR_DENSITY_ENERGY+1)
__host__ __device__ inline void compute_velocity(float& density, float3& momentum, float3& velocity)
{
velocity.x = momentum.x / density;
velocity.y = momentum.y / density;
velocity.z = momentum.z / density;
}
__host__ __device__ inline float compute_speed_sqd(float3& velocity)
{
return velocity.x*velocity.x + velocity.y*velocity.y + velocity.z*velocity.z;
}
__host__ __device__ inline float compute_pressure(float& density, float& density_energy, float& speed_sqd)
{
return (float(GAMMA)-float(1.0f))*(density_energy - float(0.5f)*density*speed_sqd);
}
__host__ __device__ inline float compute_speed_of_sound(float& density, float& pressure)
{
return sqrtf(float(GAMMA)*pressure/density);
}
__host__ __device__ __host__ inline void compute_flux_contribution(float& density, float3& momentum, float& density_energy, float& pressure, float3& velocity, float3& fc_momentum_x, float3& fc_momentum_y, float3& fc_momentum_z, float3& fc_density_energy)
{
fc_momentum_x.x = velocity.x*momentum.x + pressure;
fc_momentum_x.y = velocity.x*momentum.y;
fc_momentum_x.z = velocity.x*momentum.z;
fc_momentum_y.x = fc_momentum_x.y;
fc_momentum_y.y = velocity.y*momentum.y + pressure;
fc_momentum_y.z = velocity.y*momentum.z;
fc_momentum_z.x = fc_momentum_x.z;
fc_momentum_z.y = fc_momentum_y.z;
fc_momentum_z.z = velocity.z*momentum.z + pressure;
float de_p = density_energy+pressure;
fc_density_energy.x = velocity.x*de_p;
fc_density_energy.y = velocity.y*de_p;
fc_density_energy.z = velocity.z*de_p;
}
void check_cfd(int nelr, int* elements_surrounding_elements, float*
normals, float* density, float* mx, float* my, float* mz, float* density_energy, float* fluxes)
{
const float smoothing_coefficient = float(0.2f);
//const int i = (blockDim.x*blockIdx.x + threadIdx.x);
for(int i=0;i<cfd_NBLOCKS*cfd_BLOCK_SIZE;i++){
int j, nb;
float3 normal; float normal_len;
float factor;
//float density_i = variables[i + VAR_DENSITY*nelr];
float density_i = density[i];
float3 momentum_i;
//momentum_i.x = variables[i + (VAR_MOMENTUM+0)*nelr];
//momentum_i.y = variables[i + (VAR_MOMENTUM+1)*nelr];
//momentum_i.z = variables[i + (VAR_MOMENTUM+2)*nelr];
momentum_i.x = mx[i];
momentum_i.y = my[i];
momentum_i.z = mz[i];
//float density_energy_i = variables[i + VAR_DENSITY_ENERGY*nelr];
float density_energy_i = density_energy[i];
float3 velocity_i; compute_velocity(density_i, momentum_i, velocity_i);
float speed_sqd_i = compute_speed_sqd(velocity_i);
float speed_i = sqrtf(speed_sqd_i);
float pressure_i = compute_pressure(density_i, density_energy_i, speed_sqd_i);
float speed_of_sound_i = compute_speed_of_sound(density_i, pressure_i);
float3 flux_contribution_i_momentum_x, flux_contribution_i_momentum_y, flux_contribution_i_momentum_z;
float3 flux_contribution_i_density_energy;
compute_flux_contribution(density_i, momentum_i, density_energy_i, pressure_i, velocity_i, flux_contribution_i_momentum_x, flux_contribution_i_momentum_y, flux_contribution_i_momentum_z, flux_contribution_i_density_energy);
//float flux_i_density = float(0.0f);
float flux_i_density = 0.0;
float3 flux_i_momentum;
flux_i_momentum.x = float(0.0f);
flux_i_momentum.y = float(0.0f);
flux_i_momentum.z = float(0.0f);
float flux_i_density_energy = float(0.0f);
float3 velocity_nb;
float density_nb, density_energy_nb;
float3 momentum_nb;
float3 flux_contribution_nb_momentum_x, flux_contribution_nb_momentum_y, flux_contribution_nb_momentum_z;
float3 flux_contribution_nb_density_energy;
float speed_sqd_nb, speed_of_sound_nb, pressure_nb;
#pragma unroll
for(j = 0; j < cfd_maxNeighbors; j++)
{
nb = elements_surrounding_elements[i + j*nelr];
//optimal layout already
// |X for neighbor 0, X for neighbor 1, ... | Y for neighbor 0, Y for neighbor 1, ...
// |Z for neighbor 0, Z for neighbor 1, ... |
normal.x = normals[i + (j + 0*cfd_maxNeighbors)*nelr];
normal.y = normals[i + (j + 1*cfd_maxNeighbors)*nelr];
normal.z = normals[i + (j + 2*cfd_maxNeighbors)*nelr];
normal_len = sqrtf(normal.x*normal.x + normal.y*normal.y + normal.z*normal.z);
if(nb >= 0) // a legitimate neighbor
{
//density_nb = variables[nb + VAR_DENSITY*nelr];
//momentum_nb.x = variables[nb + (VAR_MOMENTUM+0)*nelr];
//momentum_nb.y = variables[nb + (VAR_MOMENTUM+1)*nelr];
//momentum_nb.z = variables[nb + (VAR_MOMENTUM+2)*nelr];
density_nb = density[nb];
momentum_nb.x = mx[nb];
momentum_nb.y = my[nb];
momentum_nb.z = mz[nb];
//density_energy_nb = variables[nb + VAR_DENSITY_ENERGY*nelr];
density_energy_nb = density_energy[nb];
compute_velocity(density_nb, momentum_nb, velocity_nb);
speed_sqd_nb = compute_speed_sqd(velocity_nb);
pressure_nb = compute_pressure(density_nb, density_energy_nb, speed_sqd_nb);
speed_of_sound_nb = compute_speed_of_sound(density_nb, pressure_nb);
compute_flux_contribution(density_nb, momentum_nb, density_energy_nb, pressure_nb, velocity_nb, flux_contribution_nb_momentum_x, flux_contribution_nb_momentum_y, flux_contribution_nb_momentum_z, flux_contribution_nb_density_energy);
// artificial viscosity
//factor = -normal_len*smoothing_coefficient*float(0.5f)*(speed_i + sqrtf(speed_sqd_nb) + speed_of_sound_i + speed_of_sound_nb);
factor = 1.3;
flux_i_density += factor*(density_i-density_nb);
flux_i_density_energy += factor*(density_energy_i-density_energy_nb);
flux_i_momentum.x += factor*(momentum_i.x-momentum_nb.x);
flux_i_momentum.y += factor*(momentum_i.y-momentum_nb.y);
flux_i_momentum.z += factor*(momentum_i.z-momentum_nb.z);
// accumulate cell-centered fluxes
factor = float(0.5f)*normal.x;
flux_i_density += factor*(momentum_nb.x+momentum_i.x);
flux_i_density_energy += factor*(flux_contribution_nb_density_energy.x+flux_contribution_i_density_energy.x);
flux_i_momentum.x += factor*(flux_contribution_nb_momentum_x.x+flux_contribution_i_momentum_x.x);
flux_i_momentum.y += factor*(flux_contribution_nb_momentum_y.x+flux_contribution_i_momentum_y.x);
flux_i_momentum.z += factor*(flux_contribution_nb_momentum_z.x+flux_contribution_i_momentum_z.x);
factor = float(0.5f)*normal.y;
flux_i_density += factor*(momentum_nb.y+momentum_i.y);
flux_i_density_energy += factor*(flux_contribution_nb_density_energy.y+flux_contribution_i_density_energy.y);
flux_i_momentum.x += factor*(flux_contribution_nb_momentum_x.y+flux_contribution_i_momentum_x.y);
flux_i_momentum.y += factor*(flux_contribution_nb_momentum_y.y+flux_contribution_i_momentum_y.y);
flux_i_momentum.z += factor*(flux_contribution_nb_momentum_z.y+flux_contribution_i_momentum_z.y);
factor = float(0.5f)*normal.z;
flux_i_density += factor*(momentum_nb.z+momentum_i.z);
flux_i_density_energy += factor*(flux_contribution_nb_density_energy.z+flux_contribution_i_density_energy.z);
flux_i_momentum.x += factor*(flux_contribution_nb_momentum_x.z+flux_contribution_i_momentum_x.z);
flux_i_momentum.y += factor*(flux_contribution_nb_momentum_y.z+flux_contribution_i_momentum_y.z);
flux_i_momentum.z += factor*(flux_contribution_nb_momentum_z.z+flux_contribution_i_momentum_z.z);
}
}
/*if(((pow((fluxes[i + VAR_DENSITY*nelr] - flux_i_density),2)/flux_i_density)>0.001)||\
((pow((fluxes[i + (VAR_MOMENTUM+0)*nelr] - flux_i_momentum.x),2)/flux_i_momentum.x)>0.001)||\
((pow((fluxes[i + (VAR_MOMENTUM+1)*nelr] - flux_i_momentum.y),2)/flux_i_momentum.y)>0.001)||\
((pow((fluxes[i + (VAR_MOMENTUM+2)*nelr] - flux_i_momentum.z),2)/flux_i_momentum.z)>0.001)||\
((pow((fluxes[i + VAR_DENSITY_ENERGY*nelr]- flux_i_density_energy),2)/flux_i_density_energy)>0.001))*/
if(((abs((fluxes[i + VAR_DENSITY*nelr] - flux_i_density)/flux_i_density)>0.01)&&(abs((fluxes[i + VAR_DENSITY*nelr] - flux_i_density))>0.01))||\
((abs((fluxes[i + (VAR_MOMENTUM+0)*nelr] - flux_i_momentum.x)/flux_i_momentum.x)>0.01)&&(abs((fluxes[i + (VAR_MOMENTUM+0)*nelr] - flux_i_momentum.x))>0.01))||\
((abs((fluxes[i + (VAR_MOMENTUM+1)*nelr] - flux_i_momentum.y)/flux_i_momentum.y)>0.01)&&(abs((fluxes[i + (VAR_MOMENTUM+1)*nelr] - flux_i_momentum.y))>0.01))||\
((abs((fluxes[i + (VAR_MOMENTUM+2)*nelr] - flux_i_momentum.z)/flux_i_momentum.z)>0.01)&&(abs((fluxes[i + (VAR_MOMENTUM+2)*nelr] - flux_i_momentum.z))>0.01))||\
((abs((fluxes[i + VAR_DENSITY_ENERGY*nelr]- flux_i_density_energy)/flux_i_density_energy)>0.01)&&(abs((fluxes[i + VAR_DENSITY_ENERGY*nelr]- flux_i_density_energy))>0.01)))
{printf("failed!%d,%f,%f,%f,%f,%f,%f,%f,%f,%f,%f\n",i,fluxes[i + VAR_DENSITY*nelr],flux_i_density,\
fluxes[i + (VAR_MOMENTUM+0)*nelr],flux_i_momentum.x,\
fluxes[i + (VAR_MOMENTUM+1)*nelr] , flux_i_momentum.y,\
fluxes[i + (VAR_MOMENTUM+2)*nelr],flux_i_momentum.z,\
fluxes[i + VAR_DENSITY_ENERGY*nelr],flux_i_density_energy);
return;}
}
printf("GOOD! passed!\n");
return;
}
__global__ void cfd_kernel(int nelr, int* elements_surrounding_elements, float*
normals, float* density, float* mx, float* my, float* __restrict__ mz, float* density_energy, float* fluxes,int *d_flag)
{
const float smoothing_coefficient = float(0.2f);
const int i = (blockDim.x*blockIdx.x + threadIdx.x);
int j, nb;
float3 normal; float normal_len;
float factor;
//float density_i = variables[i + VAR_DENSITY*nelr];
float density_i = density[i];
float3 momentum_i;
//momentum_i.x = variables[i + (VAR_MOMENTUM+0)*nelr];
//momentum_i.y = variables[i + (VAR_MOMENTUM+1)*nelr];
//momentum_i.z = variables[i + (VAR_MOMENTUM+2)*nelr];
momentum_i.x = tex1Dfetch(tex_mx,i);//mx[i];
momentum_i.y = tex1Dfetch(tex_my,i);my[i];
momentum_i.z = tex1Dfetch(tex_mz,i);//mz[i];
//float density_energy_i = variables[i + VAR_DENSITY_ENERGY*nelr];
float density_energy_i = density_energy[i];
float3 velocity_i; compute_velocity(density_i, momentum_i, velocity_i);
float speed_sqd_i = compute_speed_sqd(velocity_i);
float speed_i = sqrtf(speed_sqd_i);
float pressure_i = compute_pressure(density_i, density_energy_i, speed_sqd_i);
float speed_of_sound_i = compute_speed_of_sound(density_i, pressure_i);
float3 flux_contribution_i_momentum_x, flux_contribution_i_momentum_y, flux_contribution_i_momentum_z;
float3 flux_contribution_i_density_energy;
compute_flux_contribution(density_i, momentum_i, density_energy_i, pressure_i, velocity_i, flux_contribution_i_momentum_x, flux_contribution_i_momentum_y, flux_contribution_i_momentum_z, flux_contribution_i_density_energy);
//float flux_i_density = float(0.0f);
float flux_i_density = 0.0;
float3 flux_i_momentum;
flux_i_momentum.x = float(0.0f);
flux_i_momentum.y = float(0.0f);
flux_i_momentum.z = float(0.0f);
float flux_i_density_energy = float(0.0f);
float3 velocity_nb;
float density_nb, density_energy_nb;
float3 momentum_nb;
float3 flux_contribution_nb_momentum_x, flux_contribution_nb_momentum_y, flux_contribution_nb_momentum_z;
float3 flux_contribution_nb_density_energy;
float speed_sqd_nb, speed_of_sound_nb, pressure_nb;
#pragma unroll
for(j = 0; j < cfd_maxNeighbors; j++)
{
nb = elements_surrounding_elements[i + j*nelr];
//optimal layout already
// |X for neighbor 0, X for neighbor 1, ... | Y for neighbor 0, Y for neighbor 1, ...
// |Z for neighbor 0, Z for neighbor 1, ... |
normal.x = normals[i + (j + 0*cfd_maxNeighbors)*nelr];
normal.y = normals[i + (j + 1*cfd_maxNeighbors)*nelr];
normal.z = normals[i + (j + 2*cfd_maxNeighbors)*nelr];
normal_len = sqrtf(normal.x*normal.x + normal.y*normal.y + normal.z*normal.z);
if(nb >= 0) // a legitimate neighbor
{
//density_nb = variables[nb + VAR_DENSITY*nelr];
//momentum_nb.x = variables[nb + (VAR_MOMENTUM+0)*nelr];
//momentum_nb.y = variables[nb + (VAR_MOMENTUM+1)*nelr];
//momentum_nb.z = variables[nb + (VAR_MOMENTUM+2)*nelr];
density_nb = density[nb];
momentum_nb.x =tex1Dfetch(tex_mx,nb);// mx[nb];
momentum_nb.y = tex1Dfetch(tex_my,nb);//my[nb];
momentum_nb.z =tex1Dfetch(tex_mz,nb);// mz[nb];
//density_energy_nb = variables[nb + VAR_DENSITY_ENERGY*nelr];
density_energy_nb = density_energy[nb];
compute_velocity(density_nb, momentum_nb, velocity_nb);
speed_sqd_nb = compute_speed_sqd(velocity_nb);
pressure_nb = compute_pressure(density_nb, density_energy_nb, speed_sqd_nb);
speed_of_sound_nb = compute_speed_of_sound(density_nb, pressure_nb);
compute_flux_contribution(density_nb, momentum_nb, density_energy_nb, pressure_nb, velocity_nb, flux_contribution_nb_momentum_x, flux_contribution_nb_momentum_y, flux_contribution_nb_momentum_z, flux_contribution_nb_density_energy);
// artificial viscosity
//factor = -normal_len*smoothing_coefficient*float(0.5f)*(speed_i + sqrtf(speed_sqd_nb) + speed_of_sound_i + speed_of_sound_nb);
factor = 1.3;
flux_i_density += factor*(density_i-density_nb);
flux_i_density_energy += factor*(density_energy_i-density_energy_nb);
flux_i_momentum.x += factor*(momentum_i.x-momentum_nb.x);
flux_i_momentum.y += factor*(momentum_i.y-momentum_nb.y);
flux_i_momentum.z += factor*(momentum_i.z-momentum_nb.z);
// accumulate cell-centered fluxes
factor = float(0.5f)*normal.x;
flux_i_density += factor*(momentum_nb.x+momentum_i.x);
flux_i_density_energy += factor*(flux_contribution_nb_density_energy.x+flux_contribution_i_density_energy.x);
flux_i_momentum.x += factor*(flux_contribution_nb_momentum_x.x+flux_contribution_i_momentum_x.x);
flux_i_momentum.y += factor*(flux_contribution_nb_momentum_y.x+flux_contribution_i_momentum_y.x);
flux_i_momentum.z += factor*(flux_contribution_nb_momentum_z.x+flux_contribution_i_momentum_z.x);
factor = float(0.5f)*normal.y;
flux_i_density += factor*(momentum_nb.y+momentum_i.y);
flux_i_density_energy += factor*(flux_contribution_nb_density_energy.y+flux_contribution_i_density_energy.y);
flux_i_momentum.x += factor*(flux_contribution_nb_momentum_x.y+flux_contribution_i_momentum_x.y);
flux_i_momentum.y += factor*(flux_contribution_nb_momentum_y.y+flux_contribution_i_momentum_y.y);
flux_i_momentum.z += factor*(flux_contribution_nb_momentum_z.y+flux_contribution_i_momentum_z.y);
factor = float(0.5f)*normal.z;
flux_i_density += factor*(momentum_nb.z+momentum_i.z);
flux_i_density_energy += factor*(flux_contribution_nb_density_energy.z+flux_contribution_i_density_energy.z);
flux_i_momentum.x += factor*(flux_contribution_nb_momentum_x.z+flux_contribution_i_momentum_x.z);
flux_i_momentum.y += factor*(flux_contribution_nb_momentum_y.z+flux_contribution_i_momentum_y.z);
flux_i_momentum.z += factor*(flux_contribution_nb_momentum_z.z+flux_contribution_i_momentum_z.z);
}
}
fluxes[i + VAR_DENSITY*nelr] = flux_i_density;
fluxes[i + (VAR_MOMENTUM+0)*nelr] = flux_i_momentum.x;
fluxes[i + (VAR_MOMENTUM+1)*nelr] = flux_i_momentum.y;
fluxes[i + (VAR_MOMENTUM+2)*nelr] = flux_i_momentum.z;
fluxes[i + VAR_DENSITY_ENERGY*nelr] = flux_i_density_energy;
//if (threadIdx.x==0) atomicAdd(d_flag,1);
}
int main(int argc, char **argv) {
cudaSetDevice(2);
srand(2013);
// Allocate problem data on host
//posVecType* position;
//forceVecType* force;
float *density;
float *mx;
float *my;
float *mz;
float *density_energy;
float *normals;
float *fluxes;
int* cfd_neighborList;
cudaMallocHost((void**)&density, cfd_nAtom*sizeof(float));
cudaMallocHost((void**)&mx, cfd_nAtom*sizeof(float));
cudaMallocHost((void**)&my, cfd_nAtom*sizeof(float));
cudaMallocHost((void**)&mz, cfd_nAtom*sizeof(float));
cudaMallocHost((void**)&density_energy, cfd_nAtom*sizeof(float));
cudaMallocHost((void**)&normals, cfd_nAtom*NDIM*cfd_maxNeighbors*sizeof(float));
cudaMallocHost((void**)&fluxes, cfd_nAtom*NVAR*sizeof(float));
cudaMallocHost((void**)&cfd_neighborList, cfd_nAtom*cfd_maxNeighbors*sizeof(int));
// Allocate device memory for position and force
//forceVecType* d_force;
//posVecType* d_position;
float *d_density;
float *d_mx;
float *d_my;
float *d_mz;
float *d_density_energy;
float *d_normals;
float *d_fluxes;
cudaMalloc((void**)&d_density, cfd_nAtom*sizeof(float));
cudaMalloc((void**)&d_mx, cfd_nAtom*sizeof(float));
cudaMalloc((void**)&d_my, cfd_nAtom*sizeof(float));
cudaMalloc((void**)&d_mz, cfd_nAtom*sizeof(float));
cudaMalloc((void**)&d_density_energy, cfd_nAtom*sizeof(float));
cudaMalloc((void**)&d_normals, cfd_nAtom*NDIM*cfd_maxNeighbors*sizeof(float));
cudaMalloc((void**)&d_fluxes, cfd_nAtom*NVAR*sizeof(float));
cudaMemset(d_fluxes, 0, cfd_nAtom*NVAR*sizeof(float));
//cudaMemset(d_force, 0, cfd_nAtom*sizeof(forceVecType));
// Allocate device memory for neighbor list
int* d_cfd_neighborList;
cudaMalloc((void**)&d_cfd_neighborList, cfd_nAtom*cfd_maxNeighbors*sizeof(int));
//cout << "Initializing test problem (this can take several "
// "minutes for large problems)\n";
// Initialize positions -- random distribution in cubic domain
// domainEdge constant specifies edge length
for (int i = 0; i < cfd_nAtom; i++)
{
density[i] = (float)(drand48());
density_energy[i] = (float)(drand48() );
mx[i] = (float)(drand48() );
my[i] = (float)(drand48() );
mz[i] = (float)(drand48() );
/*
density[i] = 1.1+i*0.01;
density_energy[i] = 1.1+i*0.01;
mx[i] = 1.1+i*0.01;
my[i] = 1.1+i*0.01;
mz[i] = 1.1+i*0.01;
*/
}
for(int i=0; i<cfd_nAtom*NDIM*cfd_maxNeighbors; ++i)
normals[i] = (float)(drand48());
cfd_myBuildNeighborList_blkSchedule(cfd_nAtom, cfd_neighborList, cfd_BLOCK_SIZE);
cudaMemcpy(d_cfd_neighborList, cfd_neighborList, cfd_maxNeighbors*cfd_nAtom*sizeof(int), cudaMemcpyHostToDevice);
// Copy data to GPU
cudaMemcpy(d_density, density, cfd_nAtom*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(d_mx, mx, cfd_nAtom*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(d_my, my, cfd_nAtom*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(d_mz, mz, cfd_nAtom*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(d_density_energy, density_energy, cfd_nAtom*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(d_normals, normals, cfd_nAtom*NDIM*cfd_maxNeighbors*sizeof(float), cudaMemcpyHostToDevice);
cudaChannelFormatDesc chDesc1 = cudaCreateChannelDesc<float>();
cudaChannelFormatDesc chDesc2 = cudaCreateChannelDesc<float>();
cudaChannelFormatDesc chDesc3 = cudaCreateChannelDesc<float>();
tex_mx.filterMode = cudaFilterModePoint;
tex_mx.normalized = false;
tex_mx.channelDesc = chDesc1;
tex_my.filterMode = cudaFilterModePoint;
tex_my.normalized = false;
tex_my.channelDesc = chDesc2;
tex_mz.filterMode = cudaFilterModePoint;
tex_mz.normalized = false;
tex_mz.channelDesc = chDesc3;
cudaBindTexture(NULL,&tex_mx,d_mx,&chDesc1,cfd_nAtom*sizeof(float));
cudaBindTexture(NULL,&tex_my,d_my,&chDesc2,cfd_nAtom*sizeof(float));
cudaBindTexture(NULL,&tex_mz,d_mz,&chDesc3,cfd_nAtom*sizeof(float));
cudaSetDeviceFlags(cudaDeviceMapHost);
int *flag_cfd,*d_flag_cfd;
cudaHostAlloc((void**)&flag_cfd,sizeof( int),cudaHostAllocMapped);
cudaHostGetDevicePointer((void**)&d_flag_cfd,(void*)flag_cfd,0);
cudaEvent_t kernel_start, kernel_stop;
cudaEventCreate(&kernel_start);
cudaEventCreate(&kernel_stop);
float kernel_time = 0.0f;
cudaEventRecord(kernel_start, 0);
int cfd_gridSize = (cfd_nAtom-1+cfd_BLOCK_SIZE) / cfd_BLOCK_SIZE;
cfd_kernel<<<cfd_gridSize, cfd_BLOCK_SIZE>>>(cfd_nAtom, d_cfd_neighborList, d_normals, d_density, d_mx, d_my, d_mz, d_density_energy,
d_fluxes,d_flag_cfd);
cudaDeviceSynchronize();
cudaEventRecord(kernel_stop, 0);
cudaEventSynchronize(kernel_stop);
// get elapsed time
kernel_time = 0.0f;
cudaEventElapsedTime(&kernel_time, kernel_start, kernel_stop);
kernel_time *= 1.e-3; // Convert to seconds
cout << "kernel exe time: " << kernel_time << endl;
cudaMemcpy(fluxes, d_fluxes, cfd_nAtom*NVAR*sizeof(float), cudaMemcpyDeviceToHost);
check_cfd(cfd_nAtom,cfd_neighborList,normals,density,mx,my,mz,density_energy,fluxes);
//TODO:verified on small inputs
/*
ifstream fluxesF("../org/fluxes.txt");
for(int i=0; i<cfd_nAtom*NVAR; ++i) {
float f;
fluxesF >> f;
if(abs(f - fluxes[i]) > 0.001) {
fprintf(stderr, "Test failed! i = %d\n", i);
return 1;
}
}*/
// printf("Test passed!\n");
// fluxesF.close();
return 0;
}
|
eddd57d33f89e1591c589c0842c647eb660995c4.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#define HANDLE_ERROR( err ) ( HandleError( err, __FILE__, __LINE__ ) )
static void HandleError( hipError_t err, const char *file, int line )
{
if (err != hipSuccess)
{
printf( "%s in %s at line %d\n", hipGetErrorString( err ),
file, line );
exit( EXIT_FAILURE );
}
}
const short N = 100 ;
// CUDA Kernel for Vector Addition
__global__ void Vector_Addition ( const int *dev_a , const int *dev_b , int *dev_c)
{
//Get the id of thread within a block
unsigned short tid = blockDim.x*blockIdx.x+threadIdx.x;
if ( tid < N ) // check the boundry condition for the threads
dev_c [tid] = dev_a[tid] + dev_b[tid] ;
}
int main (void)
{
//Host array
int Host_a[N], Host_b[N], Host_c[N];
//Device array
int *dev_a , *dev_b, *dev_c ;
//Allocate the memory on the GPU
HANDLE_ERROR ( hipMalloc((void **)&dev_a , N*sizeof(int) ) );
HANDLE_ERROR ( hipMalloc((void **)&dev_b , N*sizeof(int) ) );
HANDLE_ERROR ( hipMalloc((void **)&dev_c , N*sizeof(int) ) );
//fill the Host array with random elements on the CPU
for ( int i = 0; i <N ; i++ )
{
Host_a[i] = i ;
Host_b[i] = i+1 ;
}
//Copy Host array to Device array
HANDLE_ERROR (hipMemcpy (dev_a , Host_a , N*sizeof(int) , hipMemcpyHostToDevice));
HANDLE_ERROR (hipMemcpy (dev_b , Host_b , N*sizeof(int) , hipMemcpyHostToDevice));
//Make a call to GPU kernel
int threadsPerBlock = 10;
int blocksPerGrid = N / threadsPerBlock;
hipLaunchKernelGGL(( Vector_Addition) , dim3(blocksPerGrid), dim3(threadsPerBlock) , 0, 0, dev_a , dev_b , dev_c ) ;
//Copy back to Host array from Device array
HANDLE_ERROR (hipMemcpy(Host_c , dev_c , N*sizeof(int) , hipMemcpyDeviceToHost));
//Display the result
for ( int i = 0; i<N; i++ )
printf ("%d + %d = %d\n", Host_a[i] , Host_b[i] , Host_c[i] ) ;
//Free the Device array memory
hipFree (dev_a) ;
hipFree (dev_b) ;
hipFree (dev_c) ;
system("pause");
return 0 ;
} | eddd57d33f89e1591c589c0842c647eb660995c4.cu | #include <stdio.h>
#define HANDLE_ERROR( err ) ( HandleError( err, __FILE__, __LINE__ ) )
static void HandleError( cudaError_t err, const char *file, int line )
{
if (err != cudaSuccess)
{
printf( "%s in %s at line %d\n", cudaGetErrorString( err ),
file, line );
exit( EXIT_FAILURE );
}
}
const short N = 100 ;
// CUDA Kernel for Vector Addition
__global__ void Vector_Addition ( const int *dev_a , const int *dev_b , int *dev_c)
{
//Get the id of thread within a block
unsigned short tid = blockDim.x*blockIdx.x+threadIdx.x;
if ( tid < N ) // check the boundry condition for the threads
dev_c [tid] = dev_a[tid] + dev_b[tid] ;
}
int main (void)
{
//Host array
int Host_a[N], Host_b[N], Host_c[N];
//Device array
int *dev_a , *dev_b, *dev_c ;
//Allocate the memory on the GPU
HANDLE_ERROR ( cudaMalloc((void **)&dev_a , N*sizeof(int) ) );
HANDLE_ERROR ( cudaMalloc((void **)&dev_b , N*sizeof(int) ) );
HANDLE_ERROR ( cudaMalloc((void **)&dev_c , N*sizeof(int) ) );
//fill the Host array with random elements on the CPU
for ( int i = 0; i <N ; i++ )
{
Host_a[i] = i ;
Host_b[i] = i+1 ;
}
//Copy Host array to Device array
HANDLE_ERROR (cudaMemcpy (dev_a , Host_a , N*sizeof(int) , cudaMemcpyHostToDevice));
HANDLE_ERROR (cudaMemcpy (dev_b , Host_b , N*sizeof(int) , cudaMemcpyHostToDevice));
//Make a call to GPU kernel
int threadsPerBlock = 10;
int blocksPerGrid = N / threadsPerBlock;
Vector_Addition <<< blocksPerGrid, threadsPerBlock >>> (dev_a , dev_b , dev_c ) ;
//Copy back to Host array from Device array
HANDLE_ERROR (cudaMemcpy(Host_c , dev_c , N*sizeof(int) , cudaMemcpyDeviceToHost));
//Display the result
for ( int i = 0; i<N; i++ )
printf ("%d + %d = %d\n", Host_a[i] , Host_b[i] , Host_c[i] ) ;
//Free the Device array memory
cudaFree (dev_a) ;
cudaFree (dev_b) ;
cudaFree (dev_c) ;
system("pause");
return 0 ;
} |
local_layer.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <vector>
#include "caffe/filler.hpp"
#include "caffe/layers/local_layer.hpp"
#include "caffe/util/math_functions.hpp"
namespace caffe {
template <typename Dtype>
__global__ void local_update1_gpu_kernel(
const Dtype* data_A, const Dtype* data_B,
Dtype* data_R, const int filter_num,
const int location_num, const int output_num) {
int total = filter_num * location_num * output_num;
CUDA_KERNEL_LOOP(index, total) {
int p = index % location_num;
int n = (index / location_num) % filter_num;
int q = (index / location_num) / filter_num;
data_R[index] += data_A[q*location_num+p] * data_B[n*location_num+p];
}
}
template <typename Dtype>
void local_update1_gpu(
const Dtype* data_A, const Dtype* data_B,
Dtype* data_R, const int filter_num,
const int location_num, const int output_num) {
// data_A is output_num x location_num
// data_B is filter_num x location_num
// data_R is output_num x filter_num x location_num,
// the update performed is Rqnp += Aqp * Bnp
const int nthreads = filter_num * location_num * output_num;
local_update1_gpu_kernel<Dtype> // NOLINT_NEXT_LINE(whitespacehipLaunchKernelGGL((/operators))
, dim3(CAFFE_GET_BLOCKS(nthreads)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
data_A, data_B, data_R, filter_num, location_num, output_num);
CUDA_POST_KERNEL_CHECK;
}
// Explicit instantiation
template void local_update1_gpu<float>(
const float* data_A, const float* data_B,
float* data_R, const int filter_num,
const int location_num, const int output_num);
template void local_update1_gpu<double>(
const double* data_A, const double* data_B,
double* data_R, const int filter_num,
const int location_num, const int output_num);
template <typename Dtype>
__global__ void local_update2_gpu_kernel(
const Dtype* data_A, const Dtype* data_B,
Dtype* data_R, const int filter_num,
const int location_num, const int output_num) {
int total = filter_num * location_num;
CUDA_KERNEL_LOOP(index, total) {
int p = index % location_num;
int n = (index / location_num);
for (int q = 0; q < output_num; q++) {
data_R[index] +=
data_A[q*location_num+p] * data_B[(q*filter_num+n)*location_num+p];
}
}
}
template <typename Dtype>
void local_update2_gpu(const Dtype* data_A, const Dtype* data_B,
Dtype* data_R, const int filter_num,
const int location_num, const int output_num) {
// data_A is output_num x location_num
// data_B is output_num x filter_num x location_num
// data_R is filter_num x location_num,
// the update performed is Rnp += \sum_q(Aqp * Bqnp)
int nthreads = filter_num * location_num;
local_update2_gpu_kernel<Dtype> // NOLINT_NEXT_LINE(whitespacehipLaunchKernelGGL((/operators))
, dim3(CAFFE_GET_BLOCKS(nthreads)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
data_A, data_B, data_R, filter_num,
location_num, output_num);
CUDA_POST_KERNEL_CHECK;
}
// Explicit instantiation
template void local_update2_gpu<float>(
const float* data_A, const float* data_B,
float* data_R, const int filter_num,
const int location_num, const int output_num);
template void local_update2_gpu<double>(
const double* data_A, const double* data_B,
double* data_R, const int filter_num,
const int location_num, const int output_num);
/// @brief refer to CPU forward -- the BLAS implementation is the same.
template <typename Dtype>
void LocalLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
Dtype* x_data = col_buffer_.mutable_gpu_data();
const Dtype* weight = this->blobs_[0]->gpu_data();
const Dtype* bottom_data = bottom[0]->gpu_data();
Dtype* top_data = top[0]->mutable_gpu_data();
Blob<Dtype> E;
E.Reshape(1, 1, 1, K_);
FillerParameter filler_param;
filler_param.set_value(1);
ConstantFiller<Dtype> filler(filler_param);
filler.Fill(&E);
Blob<Dtype> intermediate;
intermediate.Reshape(1, 1, K_, N_);
for (int n = 0; n < num_; n++) {
im2col_gpu(bottom_data + bottom[0]->offset(n), channels_, height_,
width_, kernel_size_, kernel_size_,
pad_, pad_, stride_, stride_, 1, 1, x_data);
for (int m = 0; m < num_output_; m++) {
caffe_gpu_mul(K_*N_, x_data, weight+this->blobs_[0]->offset(m),
intermediate.mutable_gpu_data());
caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasNoTrans, 1, N_, K_,
(Dtype)1., E.gpu_data(), intermediate.gpu_data(),
(Dtype)0., top_data + top[0]->offset(n, m));
}
if (bias_term_) {
caffe_gpu_add(M_ * N_, this->blobs_[1]->gpu_data(),
top_data + top[0]->offset(n),
top_data + top[0]->offset(n));
}
}
}
/// @brief refer to CPU backward -- the BLAS implementation is the same.
template <typename Dtype>
void LocalLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
const Dtype* top_diff = top[0]->gpu_diff();
const Dtype* bottom_data = bottom[0]->gpu_data();
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
Dtype* x_data = col_buffer_.mutable_gpu_data();
Dtype* x_diff = col_buffer_.mutable_gpu_diff();
const Dtype* weight = this->blobs_[0]->gpu_data();
Dtype* weight_diff = this->blobs_[0]->mutable_gpu_diff();
Dtype* bias_diff = NULL;
Blob<Dtype> intermediate;
intermediate.Reshape(1, 1, 1, N_);
Blob<Dtype> xt;
xt.Reshape(1, 1, K_, N_);
Dtype* xt_data = xt.mutable_gpu_data();
if (bias_term_) {
bias_diff = this->blobs_[1]->mutable_gpu_diff();
caffe_gpu_set(this->blobs_[1]->count(), Dtype(0.), bias_diff);
for (int n = 0; n < num_; ++n) {
caffe_gpu_add(M_ * N_, bias_diff,
top_diff + top[0]->offset(n),
bias_diff);
}
}
Blob<Dtype> buf;
buf.Reshape(1, 1, K_, N_);
Dtype* buf_data = buf.mutable_gpu_data();
caffe_gpu_set(this->blobs_[0]->count(), Dtype(0.), weight_diff);
for (int n = 0; n < num_; n++) {
im2col_gpu(bottom_data + bottom[0]->offset(n), channels_, height_,
width_, kernel_size_, kernel_size_,
pad_, pad_, stride_, stride_, 1, 1, x_data);
local_update1_gpu(
top_diff+top[0]->offset(n), x_data,
weight_diff, K_, N_, M_);
if (propagate_down[0]) {
caffe_gpu_set(col_buffer_.count(), Dtype(0.), x_diff);
local_update2_gpu(top_diff+top[0]->offset(n), weight, x_diff, K_, N_, M_);
// col2im back to the data
col2im_gpu(x_diff, channels_, height_, width_, kernel_size_, kernel_size_,
pad_, pad_, stride_, stride_, 1, 1, bottom_diff + bottom[0]->offset(n));
}
}
}
INSTANTIATE_LAYER_GPU_FUNCS(LocalLayer);
} // namespace caffe
| local_layer.cu | #include <vector>
#include "caffe/filler.hpp"
#include "caffe/layers/local_layer.hpp"
#include "caffe/util/math_functions.hpp"
namespace caffe {
template <typename Dtype>
__global__ void local_update1_gpu_kernel(
const Dtype* data_A, const Dtype* data_B,
Dtype* data_R, const int filter_num,
const int location_num, const int output_num) {
int total = filter_num * location_num * output_num;
CUDA_KERNEL_LOOP(index, total) {
int p = index % location_num;
int n = (index / location_num) % filter_num;
int q = (index / location_num) / filter_num;
data_R[index] += data_A[q*location_num+p] * data_B[n*location_num+p];
}
}
template <typename Dtype>
void local_update1_gpu(
const Dtype* data_A, const Dtype* data_B,
Dtype* data_R, const int filter_num,
const int location_num, const int output_num) {
// data_A is output_num x location_num
// data_B is filter_num x location_num
// data_R is output_num x filter_num x location_num,
// the update performed is Rqnp += Aqp * Bnp
const int nthreads = filter_num * location_num * output_num;
local_update1_gpu_kernel<Dtype> // NOLINT_NEXT_LINE(whitespace/operators)
<<<CAFFE_GET_BLOCKS(nthreads), CAFFE_CUDA_NUM_THREADS>>>(
data_A, data_B, data_R, filter_num, location_num, output_num);
CUDA_POST_KERNEL_CHECK;
}
// Explicit instantiation
template void local_update1_gpu<float>(
const float* data_A, const float* data_B,
float* data_R, const int filter_num,
const int location_num, const int output_num);
template void local_update1_gpu<double>(
const double* data_A, const double* data_B,
double* data_R, const int filter_num,
const int location_num, const int output_num);
template <typename Dtype>
__global__ void local_update2_gpu_kernel(
const Dtype* data_A, const Dtype* data_B,
Dtype* data_R, const int filter_num,
const int location_num, const int output_num) {
int total = filter_num * location_num;
CUDA_KERNEL_LOOP(index, total) {
int p = index % location_num;
int n = (index / location_num);
for (int q = 0; q < output_num; q++) {
data_R[index] +=
data_A[q*location_num+p] * data_B[(q*filter_num+n)*location_num+p];
}
}
}
template <typename Dtype>
void local_update2_gpu(const Dtype* data_A, const Dtype* data_B,
Dtype* data_R, const int filter_num,
const int location_num, const int output_num) {
// data_A is output_num x location_num
// data_B is output_num x filter_num x location_num
// data_R is filter_num x location_num,
// the update performed is Rnp += \sum_q(Aqp * Bqnp)
int nthreads = filter_num * location_num;
local_update2_gpu_kernel<Dtype> // NOLINT_NEXT_LINE(whitespace/operators)
<<<CAFFE_GET_BLOCKS(nthreads), CAFFE_CUDA_NUM_THREADS>>>(
data_A, data_B, data_R, filter_num,
location_num, output_num);
CUDA_POST_KERNEL_CHECK;
}
// Explicit instantiation
template void local_update2_gpu<float>(
const float* data_A, const float* data_B,
float* data_R, const int filter_num,
const int location_num, const int output_num);
template void local_update2_gpu<double>(
const double* data_A, const double* data_B,
double* data_R, const int filter_num,
const int location_num, const int output_num);
/// @brief refer to CPU forward -- the BLAS implementation is the same.
template <typename Dtype>
void LocalLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
Dtype* x_data = col_buffer_.mutable_gpu_data();
const Dtype* weight = this->blobs_[0]->gpu_data();
const Dtype* bottom_data = bottom[0]->gpu_data();
Dtype* top_data = top[0]->mutable_gpu_data();
Blob<Dtype> E;
E.Reshape(1, 1, 1, K_);
FillerParameter filler_param;
filler_param.set_value(1);
ConstantFiller<Dtype> filler(filler_param);
filler.Fill(&E);
Blob<Dtype> intermediate;
intermediate.Reshape(1, 1, K_, N_);
for (int n = 0; n < num_; n++) {
im2col_gpu(bottom_data + bottom[0]->offset(n), channels_, height_,
width_, kernel_size_, kernel_size_,
pad_, pad_, stride_, stride_, 1, 1, x_data);
for (int m = 0; m < num_output_; m++) {
caffe_gpu_mul(K_*N_, x_data, weight+this->blobs_[0]->offset(m),
intermediate.mutable_gpu_data());
caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasNoTrans, 1, N_, K_,
(Dtype)1., E.gpu_data(), intermediate.gpu_data(),
(Dtype)0., top_data + top[0]->offset(n, m));
}
if (bias_term_) {
caffe_gpu_add(M_ * N_, this->blobs_[1]->gpu_data(),
top_data + top[0]->offset(n),
top_data + top[0]->offset(n));
}
}
}
/// @brief refer to CPU backward -- the BLAS implementation is the same.
template <typename Dtype>
void LocalLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
const Dtype* top_diff = top[0]->gpu_diff();
const Dtype* bottom_data = bottom[0]->gpu_data();
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
Dtype* x_data = col_buffer_.mutable_gpu_data();
Dtype* x_diff = col_buffer_.mutable_gpu_diff();
const Dtype* weight = this->blobs_[0]->gpu_data();
Dtype* weight_diff = this->blobs_[0]->mutable_gpu_diff();
Dtype* bias_diff = NULL;
Blob<Dtype> intermediate;
intermediate.Reshape(1, 1, 1, N_);
Blob<Dtype> xt;
xt.Reshape(1, 1, K_, N_);
Dtype* xt_data = xt.mutable_gpu_data();
if (bias_term_) {
bias_diff = this->blobs_[1]->mutable_gpu_diff();
caffe_gpu_set(this->blobs_[1]->count(), Dtype(0.), bias_diff);
for (int n = 0; n < num_; ++n) {
caffe_gpu_add(M_ * N_, bias_diff,
top_diff + top[0]->offset(n),
bias_diff);
}
}
Blob<Dtype> buf;
buf.Reshape(1, 1, K_, N_);
Dtype* buf_data = buf.mutable_gpu_data();
caffe_gpu_set(this->blobs_[0]->count(), Dtype(0.), weight_diff);
for (int n = 0; n < num_; n++) {
im2col_gpu(bottom_data + bottom[0]->offset(n), channels_, height_,
width_, kernel_size_, kernel_size_,
pad_, pad_, stride_, stride_, 1, 1, x_data);
local_update1_gpu(
top_diff+top[0]->offset(n), x_data,
weight_diff, K_, N_, M_);
if (propagate_down[0]) {
caffe_gpu_set(col_buffer_.count(), Dtype(0.), x_diff);
local_update2_gpu(top_diff+top[0]->offset(n), weight, x_diff, K_, N_, M_);
// col2im back to the data
col2im_gpu(x_diff, channels_, height_, width_, kernel_size_, kernel_size_,
pad_, pad_, stride_, stride_, 1, 1, bottom_diff + bottom[0]->offset(n));
}
}
}
INSTANTIATE_LAYER_GPU_FUNCS(LocalLayer);
} // namespace caffe
|
ad335281191da934b7da86708dd232378a4c3f74.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 2.5.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date January 2019
@generated from sparse/blas/zgesellcmmv.cu, normal z -> c, Wed Jan 2 14:18:53 2019
*/
#include "magmasparse_internal.h"
#define PRECISION_c
//#define TEXTURE
// SELLP SpMV kernel
// see paper by M. KREUTZER, G. HAGER, G WELLEIN, H. FEHSKE A. BISHOP
// A UNIFIED SPARSE MATRIX DATA FORMAT
// FOR MODERN PROCESSORS WITH WIDE SIMD UNITS
// SELLC SpMV kernel modified assigning one thread to each row - 1D kernel
template<bool betazero>
__global__ void
zgesellptmv2d_kernel_1(
int num_rows,
int num_cols,
int blocksize,
int T,
magmaFloatComplex alpha,
const magmaFloatComplex * __restrict__ dval,
const magma_index_t * __restrict__ dcolind,
const magma_index_t * __restrict__ drowptr,
const magmaFloatComplex *__restrict__ dx,
magmaFloatComplex beta,
magmaFloatComplex * __restrict__ dy)
{
// threads assigned to rows
//int Idx = blockDim.x * blockIdx.x + threadIdx.x;
//int offset = drowptr[ blockIdx.x ];
//int border = (drowptr[ blockIdx.x+1 ]-offset)/blocksize;
// T threads assigned to each row
int idx = threadIdx.x; // local row
int bdx = blockIdx.y * gridDim.x + blockIdx.x; // global block index
int row = bdx * 256 + idx; // global row index
// int lblocksize = ( row + blocksize < num_rows) ? blocksize : ( num_rows - blocksize * (row/blocksize) );
int lrow = threadIdx.x%blocksize; // local row;
if( row < num_rows ) {
int offset = drowptr[ row/blocksize ];
int border = (drowptr[ row/blocksize+1 ]-offset)/blocksize;
magmaFloatComplex dot = MAGMA_C_MAKE(0.0, 0.0);
for ( int n = 0; n < border; n++) {
int col = dcolind [ offset+ blocksize * n + lrow ];
magmaFloatComplex val = dval[ offset+ blocksize * n + lrow ];
dot = dot + val * dx [ col ];
}
if (betazero) {
dy[ row ] = dot * alpha;
} else {
dy[ row ] = dot * alpha + beta * dy [ row ];
}
}
}
// SELLP SpMV kernel
// see paper by M. KREUTZER, G. HAGER, G WELLEIN, H. FEHSKE A. BISHOP
// A UNIFIED SPARSE MATRIX DATA FORMAT
// FOR MODERN PROCESSORS WITH WIDE SIMD UNITS
// SELLC SpMV kernel modified assigning multiple threads to each row - 2D kernel
template<bool betazero>
__global__ void
zgesellptmv2d_kernel_4(
int num_rows,
int num_cols,
int blocksize,
int T,
magmaFloatComplex alpha,
magmaFloatComplex * dval,
magma_index_t * dcolind,
magma_index_t * drowptr,
magmaFloatComplex * dx,
magmaFloatComplex beta,
magmaFloatComplex * dy)
{
// T threads assigned to each row
int idx = threadIdx.y; // thread in row
int idy = threadIdx.x; // local row
int ldx = idx * blocksize + idy;
int bdx = blockIdx.y * gridDim.x + blockIdx.x; // global block index
int row = bdx * blocksize + idy; // global row index
extern __shared__ magmaFloatComplex shared[];
if(row < num_rows ) {
magmaFloatComplex dot = MAGMA_C_MAKE(0.0, 0.0);
int offset = drowptr[ bdx ];
int block = blocksize * T; // total number of threads
int max_ = (drowptr[ bdx+1 ]-offset)/block;
// number of elements each thread handles
int kk, i1, i2;
magmaFloatComplex x1, x2, v1, v2;
dcolind += offset + ldx;
dval += offset + ldx;
for ( kk = 0; kk < max_-1; kk+=2 ) {
i1 = dcolind[ block*kk];
i2 = dcolind[ block*kk + block];
x1 = dx[ i1 ];
x2 = dx[ i2 ];
v1 = dval[ block*kk ];
v2 = dval[ block*kk + block];
dot += v1 * x1;
dot += v2 * x2;
}
if (kk<max_) {
x1 = dx[ dcolind[ block*kk] ];
v1 = dval[ block*kk ];
dot += v1 * x1;
}
shared[ldx] = dot;
__syncthreads();
if( idx < 2 ) {
shared[ldx]+=shared[ldx+blocksize*2];
__syncthreads();
if( idx == 0 ) {
if (betazero) {
dy[row] = (shared[ldx]+shared[ldx+blocksize*1])*alpha;
} else {
dy[row] =
(shared[ldx]+shared[ldx+blocksize*1])*alpha + beta*dy [row];
}
}
}
}
}
// SELLP SpMV kernel
// see paper by M. KREUTZER, G. HAGER, G WELLEIN, H. FEHSKE A. BISHOP
// A UNIFIED SPARSE MATRIX DATA FORMAT
// FOR MODERN PROCESSORS WITH WIDE SIMD UNITS
// SELLC SpMV kernel modified assigning multiple threads to each row - 2D kernel
template<bool betazero>
__global__ void
zgesellptmv2d_kernel_8(
int num_rows,
int num_cols,
int blocksize,
int T,
magmaFloatComplex alpha,
magmaFloatComplex * dval,
magma_index_t * dcolind,
magma_index_t * drowptr,
magmaFloatComplex * dx,
magmaFloatComplex beta,
magmaFloatComplex * dy)
{
// T threads assigned to each row
int idx = threadIdx.y; // thread in row
int idy = threadIdx.x; // local row
int ldx = idx * blocksize + idy;
int bdx = blockIdx.y * gridDim.x + blockIdx.x; // global block index
int row = bdx * blocksize + idy; // global row index
extern __shared__ magmaFloatComplex shared[];
if(row < num_rows ) {
magmaFloatComplex dot = MAGMA_C_MAKE(0.0, 0.0);
int offset = drowptr[ bdx ];
int block = blocksize * T; // total number of threads
int max_ = (drowptr[ bdx+1 ]-offset)/block;
// number of elements each thread handles
int kk, i1, i2;
magmaFloatComplex x1, x2, v1, v2;
dcolind += offset + ldx;
dval += offset + ldx;
for ( kk = 0; kk < max_-1; kk+=2 ) {
i1 = dcolind[ block*kk];
i2 = dcolind[ block*kk + block];
x1 = dx[ i1 ];
x2 = dx[ i2 ];
v1 = dval[ block*kk ];
v2 = dval[ block*kk + block];
dot += v1 * x1;
dot += v2 * x2;
}
if (kk<max_) {
x1 = dx[ dcolind[ block*kk] ];
v1 = dval[ block*kk ];
dot += v1 * x1;
}
shared[ldx] = dot;
__syncthreads();
if( idx < 4 ) {
shared[ldx]+=shared[ldx+blocksize*4];
__syncthreads();
if( idx < 2 ) shared[ldx]+=shared[ldx+blocksize*2];
__syncthreads();
if( idx == 0 ) {
if (betazero) {
dy[row] = (shared[ldx]+shared[ldx+blocksize*1])*alpha;
} else {
dy[row] =
(shared[ldx]+shared[ldx+blocksize*1])*alpha + beta*dy [row];
}
}
}
}
}
// SELLP SpMV kernel
// see paper by M. KREUTZER, G. HAGER, G WELLEIN, H. FEHSKE A. BISHOP
// A UNIFIED SPARSE MATRIX DATA FORMAT
// FOR MODERN PROCESSORS WITH WIDE SIMD UNITS
// SELLC SpMV kernel modified assigning multiple threads to each row - 2D kernel
template<bool betazero>
__global__ void
zgesellptmv2d_kernel_16(
int num_rows,
int num_cols,
int blocksize,
int T,
magmaFloatComplex alpha,
magmaFloatComplex * dval,
magma_index_t * dcolind,
magma_index_t * drowptr,
magmaFloatComplex * dx,
magmaFloatComplex beta,
magmaFloatComplex * dy)
{
// T threads assigned to each row
int idx = threadIdx.y; // thread in row
int idy = threadIdx.x; // local row
int ldx = idx * blocksize + idy;
int bdx = blockIdx.y * gridDim.x + blockIdx.x; // global block index
int row = bdx * blocksize + idy; // global row index
extern __shared__ magmaFloatComplex shared[];
if(row < num_rows ) {
magmaFloatComplex dot = MAGMA_C_MAKE(0.0, 0.0);
int offset = drowptr[ bdx ];
int block = blocksize * T; // total number of threads
int max_ = (drowptr[ bdx+1 ]-offset)/block;
// number of elements each thread handles
for ( int k = 0; k < max_; k++ ) {
magmaFloatComplex val =
dval[ offset + ldx + block*k ];
int col =
dcolind[ offset + ldx + block*k ];
dot += val * dx[ col ];
}
shared[ldx] = dot;
__syncthreads();
if( idx < 8 ) {
shared[ldx]+=shared[ldx+blocksize*8];
__syncthreads();
if( idx < 4 ) shared[ldx]+=shared[ldx+blocksize*4];
__syncthreads();
if( idx < 2 ) shared[ldx]+=shared[ldx+blocksize*2];
__syncthreads();
if( idx == 0 ) {
if (betazero) {
dy[row] = (shared[ldx]+shared[ldx+blocksize*1])*alpha;
} else {
dy[row] =
(shared[ldx]+shared[ldx+blocksize*1])*alpha + beta*dy [row];
}
}
}
}
}
// SELLP SpMV kernel
// see paper by M. KREUTZER, G. HAGER, G WELLEIN, H. FEHSKE A. BISHOP
// A UNIFIED SPARSE MATRIX DATA FORMAT
// FOR MODERN PROCESSORS WITH WIDE SIMD UNITS
// SELLC SpMV kernel modified assigning multiple threads to each row - 2D kernel
template<bool betazero>
__global__ void
zgesellptmv2d_kernel_32(
int num_rows,
int num_cols,
int blocksize,
int T,
magmaFloatComplex alpha,
magmaFloatComplex * dval,
magma_index_t * dcolind,
magma_index_t * drowptr,
magmaFloatComplex * dx,
magmaFloatComplex beta,
magmaFloatComplex * dy)
{
// T threads assigned to each row
int idx = threadIdx.y; // thread in row
int idy = threadIdx.x; // local row
int ldx = idx * blocksize + idy;
int bdx = blockIdx.y * gridDim.x + blockIdx.x; // global block index
int row = bdx * blocksize + idy; // global row index
extern __shared__ magmaFloatComplex shared[];
if(row < num_rows ) {
magmaFloatComplex dot = MAGMA_C_MAKE(0.0, 0.0);
int offset = drowptr[ bdx ];
int block = blocksize * T; // total number of threads
int max_ = (drowptr[ bdx+1 ]-offset)/block;
// number of elements each thread handles
for ( int k = 0; k < max_; k++ ) {
magmaFloatComplex val =
dval[ offset + ldx + block*k ];
int col =
dcolind[ offset + ldx + block*k ];
dot += val * dx[ col ];
}
shared[ldx] = dot;
__syncthreads();
if( idx < 16 ) {
shared[ldx]+=shared[ldx+blocksize*16];
__syncthreads();
if( idx < 8 ) shared[ldx]+=shared[ldx+blocksize*8];
__syncthreads();
if( idx < 4 ) shared[ldx]+=shared[ldx+blocksize*4];
__syncthreads();
if( idx < 2 ) shared[ldx]+=shared[ldx+blocksize*2];
__syncthreads();
if( idx == 0 ) {
if (betazero) {
dy[row] = (shared[ldx]+shared[ldx+blocksize*1])*alpha;
} else {
dy[row] =
(shared[ldx]+shared[ldx+blocksize*1])*alpha + beta*dy [row];
}
}
}
}
}
/************************* same but using texture mem *************************/
#if defined(PRECISION_d) && defined(TEXTURE)
__inline__ __device__ float
read_from_tex( hipTextureObject_t texdx, const int& i) {
int2 temp = tex1Dfetch<int2>( texdx, i );
return __hiloint2float(temp.y,temp.x);
}
// SELLP SpMV kernel
// see paper by M. KREUTZER, G. HAGER, G WELLEIN, H. FEHSKE A. BISHOP
// A UNIFIED SPARSE MATRIX DATA FORMAT
// FOR MODERN PROCESSORS WITH WIDE SIMD UNITS
// SELLC SpMV kernel modified assigning multiple threads to each row - 2D kernel
template<bool betazero>
__global__ void
zgesellptmv2d_kernel_4_tex(
int num_rows,
int num_cols,
int blocksize,
int T,
magmaFloatComplex alpha,
magmaFloatComplex * dval,
magma_index_t * dcolind,
magma_index_t * drowptr,
hipTextureObject_t texdx,
magmaFloatComplex beta,
magmaFloatComplex * dy)
{
// T threads assigned to each row
int idx = threadIdx.y; // thread in row
int idy = threadIdx.x; // local row
int ldx = idx * blocksize + idy;
int bdx = blockIdx.y * gridDim.x + blockIdx.x; // global block index
int row = bdx * blocksize + idy; // global row index
extern __shared__ magmaFloatComplex shared[];
if(row < num_rows ) {
magmaFloatComplex dot = MAGMA_C_MAKE(0.0, 0.0);
int offset = drowptr[ bdx ];
int block = blocksize * T; // total number of threads
int max_ = (drowptr[ bdx+1 ]-offset)/block;
// number of elements each thread handles
int kk, i1, i2;
magmaFloatComplex x1, x2, v1, v2;
dcolind += offset + ldx;
dval += offset + ldx;
for ( kk = 0; kk < max_-1; kk+=2 ) {
i1 = dcolind[ block*kk];
i2 = dcolind[ block*kk + block];
x1 = read_from_tex( texdx, i1 );
x2 = read_from_tex( texdx, i2 );
v1 = dval[ block*kk ];
v2 = dval[ block*kk + block];
dot += v1 * x1;
dot += v2 * x2;
}
if (kk<max_) {
x1 = read_from_tex( texdx, dcolind[ block*kk] );
v1 = dval[ block*kk ];
dot += v1 * x1;
}
shared[ldx] = dot;
__syncthreads();
if( idx < 2 ) {
shared[ldx]+=shared[ldx+blocksize*2];
__syncthreads();
if( idx == 0 ) {
if (betazero) {
dy[row] = (shared[ldx]+shared[ldx+blocksize*1])*alpha;
} else {
dy[row] =
(shared[ldx]+shared[ldx+blocksize*1])*alpha + beta*dy [row];
}
}
}
}
}
// SELLP SpMV kernel
// see paper by M. KREUTZER, G. HAGER, G WELLEIN, H. FEHSKE A. BISHOP
// A UNIFIED SPARSE MATRIX DATA FORMAT
// FOR MODERN PROCESSORS WITH WIDE SIMD UNITS
// SELLC SpMV kernel modified assigning multiple threads to each row - 2D kernel
template<bool betazero>
__global__ void
zgesellptmv2d_kernel_8_tex(
int num_rows,
int num_cols,
int blocksize,
int T,
magmaFloatComplex alpha,
magmaFloatComplex * dval,
magma_index_t * dcolind,
magma_index_t * drowptr,
hipTextureObject_t texdx,
magmaFloatComplex beta,
magmaFloatComplex * dy)
{
// T threads assigned to each row
int idx = threadIdx.y; // thread in row
int idy = threadIdx.x; // local row
int ldx = idx * blocksize + idy;
int bdx = blockIdx.y * gridDim.x + blockIdx.x; // global block index
int row = bdx * blocksize + idy; // global row index
extern __shared__ magmaFloatComplex shared[];
if(row < num_rows ) {
magmaFloatComplex dot = MAGMA_C_MAKE(0.0, 0.0);
int offset = drowptr[ bdx ];
int block = blocksize * T; // total number of threads
int max_ = (drowptr[ bdx+1 ]-offset)/block;
// number of elements each thread handles
int kk, i1, i2;
magmaFloatComplex x1, x2, v1, v2;
dcolind += offset + ldx;
dval += offset + ldx;
for ( kk = 0; kk < max_-1; kk+=2 ) {
i1 = dcolind[ block*kk];
i2 = dcolind[ block*kk + block];
x1 = read_from_tex( texdx, i1 );
x2 = read_from_tex( texdx, i2 );
v1 = dval[ block*kk ];
v2 = dval[ block*kk + block];
dot += v1 * x1;
dot += v2 * x2;
}
if (kk<max_) {
x1 = read_from_tex( texdx, dcolind[ block*kk] );
v1 = dval[ block*kk ];
dot += v1 * x1;
}
shared[ldx] = dot;
__syncthreads();
if( idx < 4 ) {
shared[ldx]+=shared[ldx+blocksize*4];
__syncthreads();
if( idx < 2 ) shared[ldx]+=shared[ldx+blocksize*2];
__syncthreads();
if( idx == 0 ) {
if (betazero) {
dy[row] = (shared[ldx]+shared[ldx+blocksize*1])*alpha;
} else {
dy[row] =
(shared[ldx]+shared[ldx+blocksize*1])*alpha + beta*dy [row];
}
}
}
}
}
// SELLP SpMV kernel
// see paper by M. KREUTZER, G. HAGER, G WELLEIN, H. FEHSKE A. BISHOP
// A UNIFIED SPARSE MATRIX DATA FORMAT
// FOR MODERN PROCESSORS WITH WIDE SIMD UNITS
// SELLC SpMV kernel modified assigning multiple threads to each row - 2D kernel
template<bool betazero>
__global__ void
zgesellptmv2d_kernel_16_tex(
int num_rows,
int num_cols,
int blocksize,
int T,
magmaFloatComplex alpha,
magmaFloatComplex * dval,
magma_index_t * dcolind,
magma_index_t * drowptr,
hipTextureObject_t texdx,
magmaFloatComplex beta,
magmaFloatComplex * dy)
{
// T threads assigned to each row
int idx = threadIdx.y; // thread in row
int idy = threadIdx.x; // local row
int ldx = idx * blocksize + idy;
int bdx = blockIdx.y * gridDim.x + blockIdx.x; // global block index
int row = bdx * blocksize + idy; // global row index
extern __shared__ magmaFloatComplex shared[];
if(row < num_rows ) {
magmaFloatComplex dot = MAGMA_C_MAKE(0.0, 0.0);
int offset = drowptr[ bdx ];
int block = blocksize * T; // total number of threads
int max_ = (drowptr[ bdx+1 ]-offset)/block;
// number of elements each thread handles
for ( int k = 0; k < max_; k++ ) {
magmaFloatComplex val =
dval[ offset + ldx + block*k ];
int col =
dcolind[ offset + ldx + block*k ];
dot += val * read_from_tex( texdx, col );
}
shared[ldx] = dot;
__syncthreads();
if( idx < 8 ) {
shared[ldx]+=shared[ldx+blocksize*8];
__syncthreads();
if( idx < 4 ) shared[ldx]+=shared[ldx+blocksize*4];
__syncthreads();
if( idx < 2 ) shared[ldx]+=shared[ldx+blocksize*2];
__syncthreads();
if( idx == 0 ) {
if (betazero) {
dy[row] = (shared[ldx]+shared[ldx+blocksize*1])*alpha;
} else {
dy[row] =
(shared[ldx]+shared[ldx+blocksize*1])*alpha + beta*dy [row];
}
}
}
}
}
// SELLP SpMV kernel
// see paper by M. KREUTZER, G. HAGER, G WELLEIN, H. FEHSKE A. BISHOP
// A UNIFIED SPARSE MATRIX DATA FORMAT
// FOR MODERN PROCESSORS WITH WIDE SIMD UNITS
// SELLC SpMV kernel modified assigning multiple threads to each row - 2D kernel
template<bool betazero>
__global__ void
zgesellptmv2d_kernel_32_tex(
int num_rows,
int num_cols,
int blocksize,
int T,
magmaFloatComplex alpha,
magmaFloatComplex * dval,
magma_index_t * dcolind,
magma_index_t * drowptr,
hipTextureObject_t texdx,
magmaFloatComplex beta,
magmaFloatComplex * dy)
{
// T threads assigned to each row
int idx = threadIdx.y; // thread in row
int idy = threadIdx.x; // local row
int ldx = idx * blocksize + idy;
int bdx = blockIdx.y * gridDim.x + blockIdx.x; // global block index
int row = bdx * blocksize + idy; // global row index
extern __shared__ magmaFloatComplex shared[];
if(row < num_rows ) {
magmaFloatComplex dot = MAGMA_C_MAKE(0.0, 0.0);
int offset = drowptr[ bdx ];
int block = blocksize * T; // total number of threads
int max_ = (drowptr[ bdx+1 ]-offset)/block;
// number of elements each thread handles
for ( int k = 0; k < max_; k++ ) {
magmaFloatComplex val =
dval[ offset + ldx + block*k ];
int col =
dcolind[ offset + ldx + block*k ];
dot += val * read_from_tex( texdx, col );
}
shared[ldx] = dot;
__syncthreads();
if( idx < 16 ) {
shared[ldx]+=shared[ldx+blocksize*16];
__syncthreads();
if( idx < 8 ) shared[ldx]+=shared[ldx+blocksize*8];
__syncthreads();
if( idx < 4 ) shared[ldx]+=shared[ldx+blocksize*4];
__syncthreads();
if( idx < 2 ) shared[ldx]+=shared[ldx+blocksize*2];
__syncthreads();
if( idx == 0 ) {
if (betazero) {
dy[row] = (shared[ldx]+shared[ldx+blocksize*1])*alpha;
} else {
dy[row] =
(shared[ldx]+shared[ldx+blocksize*1])*alpha + beta*dy [row];
}
}
}
}
}
#endif
/********************* end of texture versions **************************/
/**
Purpose
-------
This routine computes y = alpha * A^t * x + beta * y on the GPU.
Input format is SELLP.
Arguments
---------
@param[in]
transA magma_trans_t
transposition parameter for A
@param[in]
m magma_int_t
number of rows in A
@param[in]
n magma_int_t
number of columns in A
@param[in]
blocksize magma_int_t
number of rows in one ELL-slice
@param[in]
slices magma_int_t
number of slices in matrix
@param[in]
alignment magma_int_t
number of threads assigned to one row
@param[in]
alpha magmaFloatComplex
scalar multiplier
@param[in]
dval magmaFloatComplex_ptr
array containing values of A in SELLP
@param[in]
dcolind magmaIndex_ptr
columnindices of A in SELLP
@param[in]
drowptr magmaIndex_ptr
rowpointer of SELLP
@param[in]
dx magmaFloatComplex_ptr
input vector x
@param[in]
beta magmaFloatComplex
scalar multiplier
@param[out]
dy magmaFloatComplex_ptr
input/output vector y
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_cblas
********************************************************************/
extern "C" magma_int_t
magma_cgesellpmv(
magma_trans_t transA,
magma_int_t m, magma_int_t n,
magma_int_t blocksize,
magma_int_t slices,
magma_int_t alignment,
magmaFloatComplex alpha,
magmaFloatComplex_ptr dval,
magmaIndex_ptr dcolind,
magmaIndex_ptr drowptr,
magmaFloatComplex_ptr dx,
magmaFloatComplex beta,
magmaFloatComplex_ptr dy,
magma_queue_t queue )
{
// using a 2D thread grid
int num_threads = blocksize*alignment;
magma_int_t arch = magma_getdevice_arch();
if ( arch < 200 && num_threads > 256 )
printf("error: too much shared memory requested.\n");
int dimgrid1 = min( int( sqrt( float( slices ))), 65535 );
int dimgrid2 = min(magma_ceildiv( slices, dimgrid1 ), 65535);
int dimgrid3 = magma_ceildiv( slices, dimgrid1*dimgrid2 );
int num_tx = blocksize;
int Ms = num_threads * sizeof( magmaFloatComplex );
// special case: alignment 1:
if( alignment == 1 ){
Ms = 0;
num_tx = 256;
int num_blocks = magma_ceildiv( n, 256 );
dimgrid1 = num_blocks; //min( int( sqrt( float( num_blocks ))), 65535 );
dimgrid2 = 1; //magma_ceildiv( num_blocks, dimgrid1 );
dimgrid3 = 1;
//blocksize = 256;
}
dim3 block( num_tx, alignment, 1);
if( dimgrid3 > 65535 ){
printf("error: too many GPU thread blocks requested.\n");
}
dim3 grid( dimgrid1, dimgrid2, 1);
#if defined(PRECISION_d) && defined(TEXTURE)
// Create channel.
hipChannelFormatDesc channel_desc;
channel_desc =
hipCreateChannelDesc(32, 32, 0, 0, hipChannelFormatKindSigned);
// Create resource descriptor.
struct hipResourceDesc resDescdx;
memset(&resDescdx, 0, sizeof(resDescdx));
resDescdx.resType = hipResourceTypeLinear;
resDescdx.res.linear.devPtr = (void*)dx;
resDescdx.res.linear.desc = channel_desc;
resDescdx.res.linear.sizeInBytes = m*sizeof(float);
// Specify texture object parameters.
struct hipTextureDesc texDesc;
memset(&texDesc, 0, sizeof(texDesc));
texDesc.addressMode[0] = hipAddressModeClamp;
texDesc.filterMode = hipFilterModePoint;
texDesc.readMode = hipReadModeElementType;
// Create texture object.
hipTextureObject_t texdx = 0;
hipCreateTextureObject(&texdx, &resDescdx, &texDesc, NULL);
hipDeviceSetSharedMemConfig(hipSharedMemBankSizeEightByte);
if ( alignment == 1) {
if (beta == MAGMA_C_ZERO) {
hipLaunchKernelGGL(( zgesellptmv2d_kernel_1<true>), dim3(grid2), dim3(block), Ms, queue->cuda_stream() ,
m, n, blocksize, alignment, alpha,
dval, dcolind, drowptr, dx, beta, dy );
} else {
hipLaunchKernelGGL(( zgesellptmv2d_kernel_1<false>), dim3(grid), dim3(block), Ms, queue->cuda_stream() ,
m, n, blocksize, alignment, alpha,
dval, dcolind, drowptr, dx, beta, dy );
}
} else if ( alignment == 4){
if (beta == MAGMA_C_ZERO) {
hipLaunchKernelGGL(( zgesellptmv2d_kernel_4_tex<true>), dim3(grid), dim3(block), Ms, queue->cuda_stream() ,
m, n, blocksize, alignment, alpha,
dval, dcolind, drowptr, texdx, beta, dy );
} else {
hipLaunchKernelGGL(( zgesellptmv2d_kernel_4_tex<false>), dim3(grid), dim3(block), Ms, queue->cuda_stream() ,
m, n, blocksize, alignment, alpha,
dval, dcolind, drowptr, texdx, beta, dy );
}
}
else if ( alignment == 8){
if (beta == MAGMA_C_ZERO) {
hipLaunchKernelGGL(( zgesellptmv2d_kernel_8_tex<true>), dim3(grid), dim3(block), Ms, queue->cuda_stream() ,
m, n, blocksize, alignment, alpha,
dval, dcolind, drowptr, texdx, beta, dy );
} else {
hipLaunchKernelGGL(( zgesellptmv2d_kernel_8_tex<false>), dim3(grid), dim3(block), Ms, queue->cuda_stream() ,
m, n, blocksize, alignment, alpha,
dval, dcolind, drowptr, texdx, beta, dy );
}
}
else if ( alignment == 16){
if (beta == MAGMA_C_ZERO) {
hipLaunchKernelGGL(( zgesellptmv2d_kernel_16_tex<true>), dim3(grid), dim3(block), Ms, queue->cuda_stream() ,
m, n, blocksize, alignment, alpha,
dval, dcolind, drowptr, texdx, beta, dy );
} else {
hipLaunchKernelGGL(( zgesellptmv2d_kernel_16_tex<false>), dim3(grid), dim3(block), Ms, queue->cuda_stream() ,
m, n, blocksize, alignment, alpha,
dval, dcolind, drowptr, texdx, beta, dy );
}
}
else if ( alignment == 32){
if (beta == MAGMA_C_ZERO) {
hipLaunchKernelGGL(( zgesellptmv2d_kernel_32_tex<true>), dim3(grid), dim3(block), Ms, queue->cuda_stream() ,
m, n, blocksize, alignment, alpha,
dval, dcolind, drowptr, texdx, beta, dy );
} else {
hipLaunchKernelGGL(( zgesellptmv2d_kernel_32_tex<false>), dim3(grid), dim3(block), Ms, queue->cuda_stream() ,
m, n, blocksize, alignment, alpha,
dval, dcolind, drowptr, texdx, beta, dy );
}
}
else {
printf("error: alignment %d not supported.\n", alignment);
return MAGMA_ERR_NOT_SUPPORTED;
}
hipDestroyTextureObject(texdx);
#else
if ( alignment == 1) {
if (beta == MAGMA_C_ZERO) {
hipLaunchKernelGGL(( zgesellptmv2d_kernel_1<true>), dim3(grid), dim3(block), Ms, queue->cuda_stream() ,
m, n, blocksize, alignment, alpha,
dval, dcolind, drowptr, dx, beta, dy );
} else {
hipLaunchKernelGGL(( zgesellptmv2d_kernel_1<false>), dim3(grid), dim3(block), Ms, queue->cuda_stream() ,
m, n, blocksize, alignment, alpha,
dval, dcolind, drowptr, dx, beta, dy );
}
}
else if ( alignment == 4){
if (beta == MAGMA_C_ZERO) {
hipLaunchKernelGGL(( zgesellptmv2d_kernel_4<true>), dim3(grid), dim3(block), Ms, queue->cuda_stream() ,
m, n, blocksize, alignment, alpha,
dval, dcolind, drowptr, dx, beta, dy );
} else {
hipLaunchKernelGGL(( zgesellptmv2d_kernel_4<false>), dim3(grid), dim3(block), Ms, queue->cuda_stream() ,
m, n, blocksize, alignment, alpha,
dval, dcolind, drowptr, dx, beta, dy );
}
}
else if ( alignment == 8){
if (beta == MAGMA_C_ZERO) {
hipLaunchKernelGGL(( zgesellptmv2d_kernel_8<true>), dim3(grid), dim3(block), Ms, queue->cuda_stream() ,
m, n, blocksize, alignment, alpha,
dval, dcolind, drowptr, dx, beta, dy );
} else {
hipLaunchKernelGGL(( zgesellptmv2d_kernel_8<false>), dim3(grid), dim3(block), Ms, queue->cuda_stream() ,
m, n, blocksize, alignment, alpha,
dval, dcolind, drowptr, dx, beta, dy );
}
}
else if ( alignment == 16){
if (beta == MAGMA_C_ZERO) {
hipLaunchKernelGGL(( zgesellptmv2d_kernel_16<true>), dim3(grid), dim3(block), Ms, queue->cuda_stream() ,
m, n, blocksize, alignment, alpha,
dval, dcolind, drowptr, dx, beta, dy );
} else {
hipLaunchKernelGGL(( zgesellptmv2d_kernel_16<false>), dim3(grid), dim3(block), Ms, queue->cuda_stream() ,
m, n, blocksize, alignment, alpha,
dval, dcolind, drowptr, dx, beta, dy );
}
}
else if ( alignment == 32){
if (beta == MAGMA_C_ZERO) {
hipLaunchKernelGGL(( zgesellptmv2d_kernel_32<true>), dim3(grid), dim3(block), Ms, queue->cuda_stream() ,
m, n, blocksize, alignment, alpha,
dval, dcolind, drowptr, dx, beta, dy );
} else {
hipLaunchKernelGGL(( zgesellptmv2d_kernel_32<false>), dim3(grid), dim3(block), Ms, queue->cuda_stream() ,
m, n, blocksize, alignment, alpha,
dval, dcolind, drowptr, dx, beta, dy );
}
}
else {
printf("error: alignment %d not supported.\n", int(alignment) );
return MAGMA_ERR_NOT_SUPPORTED;
}
#endif
return MAGMA_SUCCESS;
}
| ad335281191da934b7da86708dd232378a4c3f74.cu | /*
-- MAGMA (version 2.5.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date January 2019
@generated from sparse/blas/zgesellcmmv.cu, normal z -> c, Wed Jan 2 14:18:53 2019
*/
#include "magmasparse_internal.h"
#define PRECISION_c
//#define TEXTURE
// SELLP SpMV kernel
// see paper by M. KREUTZER, G. HAGER, G WELLEIN, H. FEHSKE A. BISHOP
// A UNIFIED SPARSE MATRIX DATA FORMAT
// FOR MODERN PROCESSORS WITH WIDE SIMD UNITS
// SELLC SpMV kernel modified assigning one thread to each row - 1D kernel
template<bool betazero>
__global__ void
zgesellptmv2d_kernel_1(
int num_rows,
int num_cols,
int blocksize,
int T,
magmaFloatComplex alpha,
const magmaFloatComplex * __restrict__ dval,
const magma_index_t * __restrict__ dcolind,
const magma_index_t * __restrict__ drowptr,
const magmaFloatComplex *__restrict__ dx,
magmaFloatComplex beta,
magmaFloatComplex * __restrict__ dy)
{
// threads assigned to rows
//int Idx = blockDim.x * blockIdx.x + threadIdx.x;
//int offset = drowptr[ blockIdx.x ];
//int border = (drowptr[ blockIdx.x+1 ]-offset)/blocksize;
// T threads assigned to each row
int idx = threadIdx.x; // local row
int bdx = blockIdx.y * gridDim.x + blockIdx.x; // global block index
int row = bdx * 256 + idx; // global row index
// int lblocksize = ( row + blocksize < num_rows) ? blocksize : ( num_rows - blocksize * (row/blocksize) );
int lrow = threadIdx.x%blocksize; // local row;
if( row < num_rows ) {
int offset = drowptr[ row/blocksize ];
int border = (drowptr[ row/blocksize+1 ]-offset)/blocksize;
magmaFloatComplex dot = MAGMA_C_MAKE(0.0, 0.0);
for ( int n = 0; n < border; n++) {
int col = dcolind [ offset+ blocksize * n + lrow ];
magmaFloatComplex val = dval[ offset+ blocksize * n + lrow ];
dot = dot + val * dx [ col ];
}
if (betazero) {
dy[ row ] = dot * alpha;
} else {
dy[ row ] = dot * alpha + beta * dy [ row ];
}
}
}
// SELLP SpMV kernel
// see paper by M. KREUTZER, G. HAGER, G WELLEIN, H. FEHSKE A. BISHOP
// A UNIFIED SPARSE MATRIX DATA FORMAT
// FOR MODERN PROCESSORS WITH WIDE SIMD UNITS
// SELLC SpMV kernel modified assigning multiple threads to each row - 2D kernel
template<bool betazero>
__global__ void
zgesellptmv2d_kernel_4(
int num_rows,
int num_cols,
int blocksize,
int T,
magmaFloatComplex alpha,
magmaFloatComplex * dval,
magma_index_t * dcolind,
magma_index_t * drowptr,
magmaFloatComplex * dx,
magmaFloatComplex beta,
magmaFloatComplex * dy)
{
// T threads assigned to each row
int idx = threadIdx.y; // thread in row
int idy = threadIdx.x; // local row
int ldx = idx * blocksize + idy;
int bdx = blockIdx.y * gridDim.x + blockIdx.x; // global block index
int row = bdx * blocksize + idy; // global row index
extern __shared__ magmaFloatComplex shared[];
if(row < num_rows ) {
magmaFloatComplex dot = MAGMA_C_MAKE(0.0, 0.0);
int offset = drowptr[ bdx ];
int block = blocksize * T; // total number of threads
int max_ = (drowptr[ bdx+1 ]-offset)/block;
// number of elements each thread handles
int kk, i1, i2;
magmaFloatComplex x1, x2, v1, v2;
dcolind += offset + ldx;
dval += offset + ldx;
for ( kk = 0; kk < max_-1; kk+=2 ) {
i1 = dcolind[ block*kk];
i2 = dcolind[ block*kk + block];
x1 = dx[ i1 ];
x2 = dx[ i2 ];
v1 = dval[ block*kk ];
v2 = dval[ block*kk + block];
dot += v1 * x1;
dot += v2 * x2;
}
if (kk<max_) {
x1 = dx[ dcolind[ block*kk] ];
v1 = dval[ block*kk ];
dot += v1 * x1;
}
shared[ldx] = dot;
__syncthreads();
if( idx < 2 ) {
shared[ldx]+=shared[ldx+blocksize*2];
__syncthreads();
if( idx == 0 ) {
if (betazero) {
dy[row] = (shared[ldx]+shared[ldx+blocksize*1])*alpha;
} else {
dy[row] =
(shared[ldx]+shared[ldx+blocksize*1])*alpha + beta*dy [row];
}
}
}
}
}
// SELLP SpMV kernel
// see paper by M. KREUTZER, G. HAGER, G WELLEIN, H. FEHSKE A. BISHOP
// A UNIFIED SPARSE MATRIX DATA FORMAT
// FOR MODERN PROCESSORS WITH WIDE SIMD UNITS
// SELLC SpMV kernel modified assigning multiple threads to each row - 2D kernel
template<bool betazero>
__global__ void
zgesellptmv2d_kernel_8(
int num_rows,
int num_cols,
int blocksize,
int T,
magmaFloatComplex alpha,
magmaFloatComplex * dval,
magma_index_t * dcolind,
magma_index_t * drowptr,
magmaFloatComplex * dx,
magmaFloatComplex beta,
magmaFloatComplex * dy)
{
// T threads assigned to each row
int idx = threadIdx.y; // thread in row
int idy = threadIdx.x; // local row
int ldx = idx * blocksize + idy;
int bdx = blockIdx.y * gridDim.x + blockIdx.x; // global block index
int row = bdx * blocksize + idy; // global row index
extern __shared__ magmaFloatComplex shared[];
if(row < num_rows ) {
magmaFloatComplex dot = MAGMA_C_MAKE(0.0, 0.0);
int offset = drowptr[ bdx ];
int block = blocksize * T; // total number of threads
int max_ = (drowptr[ bdx+1 ]-offset)/block;
// number of elements each thread handles
int kk, i1, i2;
magmaFloatComplex x1, x2, v1, v2;
dcolind += offset + ldx;
dval += offset + ldx;
for ( kk = 0; kk < max_-1; kk+=2 ) {
i1 = dcolind[ block*kk];
i2 = dcolind[ block*kk + block];
x1 = dx[ i1 ];
x2 = dx[ i2 ];
v1 = dval[ block*kk ];
v2 = dval[ block*kk + block];
dot += v1 * x1;
dot += v2 * x2;
}
if (kk<max_) {
x1 = dx[ dcolind[ block*kk] ];
v1 = dval[ block*kk ];
dot += v1 * x1;
}
shared[ldx] = dot;
__syncthreads();
if( idx < 4 ) {
shared[ldx]+=shared[ldx+blocksize*4];
__syncthreads();
if( idx < 2 ) shared[ldx]+=shared[ldx+blocksize*2];
__syncthreads();
if( idx == 0 ) {
if (betazero) {
dy[row] = (shared[ldx]+shared[ldx+blocksize*1])*alpha;
} else {
dy[row] =
(shared[ldx]+shared[ldx+blocksize*1])*alpha + beta*dy [row];
}
}
}
}
}
// SELLP SpMV kernel
// see paper by M. KREUTZER, G. HAGER, G WELLEIN, H. FEHSKE A. BISHOP
// A UNIFIED SPARSE MATRIX DATA FORMAT
// FOR MODERN PROCESSORS WITH WIDE SIMD UNITS
// SELLC SpMV kernel modified assigning multiple threads to each row - 2D kernel
template<bool betazero>
__global__ void
zgesellptmv2d_kernel_16(
int num_rows,
int num_cols,
int blocksize,
int T,
magmaFloatComplex alpha,
magmaFloatComplex * dval,
magma_index_t * dcolind,
magma_index_t * drowptr,
magmaFloatComplex * dx,
magmaFloatComplex beta,
magmaFloatComplex * dy)
{
// T threads assigned to each row
int idx = threadIdx.y; // thread in row
int idy = threadIdx.x; // local row
int ldx = idx * blocksize + idy;
int bdx = blockIdx.y * gridDim.x + blockIdx.x; // global block index
int row = bdx * blocksize + idy; // global row index
extern __shared__ magmaFloatComplex shared[];
if(row < num_rows ) {
magmaFloatComplex dot = MAGMA_C_MAKE(0.0, 0.0);
int offset = drowptr[ bdx ];
int block = blocksize * T; // total number of threads
int max_ = (drowptr[ bdx+1 ]-offset)/block;
// number of elements each thread handles
for ( int k = 0; k < max_; k++ ) {
magmaFloatComplex val =
dval[ offset + ldx + block*k ];
int col =
dcolind[ offset + ldx + block*k ];
dot += val * dx[ col ];
}
shared[ldx] = dot;
__syncthreads();
if( idx < 8 ) {
shared[ldx]+=shared[ldx+blocksize*8];
__syncthreads();
if( idx < 4 ) shared[ldx]+=shared[ldx+blocksize*4];
__syncthreads();
if( idx < 2 ) shared[ldx]+=shared[ldx+blocksize*2];
__syncthreads();
if( idx == 0 ) {
if (betazero) {
dy[row] = (shared[ldx]+shared[ldx+blocksize*1])*alpha;
} else {
dy[row] =
(shared[ldx]+shared[ldx+blocksize*1])*alpha + beta*dy [row];
}
}
}
}
}
// SELLP SpMV kernel
// see paper by M. KREUTZER, G. HAGER, G WELLEIN, H. FEHSKE A. BISHOP
// A UNIFIED SPARSE MATRIX DATA FORMAT
// FOR MODERN PROCESSORS WITH WIDE SIMD UNITS
// SELLC SpMV kernel modified assigning multiple threads to each row - 2D kernel
template<bool betazero>
__global__ void
zgesellptmv2d_kernel_32(
int num_rows,
int num_cols,
int blocksize,
int T,
magmaFloatComplex alpha,
magmaFloatComplex * dval,
magma_index_t * dcolind,
magma_index_t * drowptr,
magmaFloatComplex * dx,
magmaFloatComplex beta,
magmaFloatComplex * dy)
{
// T threads assigned to each row
int idx = threadIdx.y; // thread in row
int idy = threadIdx.x; // local row
int ldx = idx * blocksize + idy;
int bdx = blockIdx.y * gridDim.x + blockIdx.x; // global block index
int row = bdx * blocksize + idy; // global row index
extern __shared__ magmaFloatComplex shared[];
if(row < num_rows ) {
magmaFloatComplex dot = MAGMA_C_MAKE(0.0, 0.0);
int offset = drowptr[ bdx ];
int block = blocksize * T; // total number of threads
int max_ = (drowptr[ bdx+1 ]-offset)/block;
// number of elements each thread handles
for ( int k = 0; k < max_; k++ ) {
magmaFloatComplex val =
dval[ offset + ldx + block*k ];
int col =
dcolind[ offset + ldx + block*k ];
dot += val * dx[ col ];
}
shared[ldx] = dot;
__syncthreads();
if( idx < 16 ) {
shared[ldx]+=shared[ldx+blocksize*16];
__syncthreads();
if( idx < 8 ) shared[ldx]+=shared[ldx+blocksize*8];
__syncthreads();
if( idx < 4 ) shared[ldx]+=shared[ldx+blocksize*4];
__syncthreads();
if( idx < 2 ) shared[ldx]+=shared[ldx+blocksize*2];
__syncthreads();
if( idx == 0 ) {
if (betazero) {
dy[row] = (shared[ldx]+shared[ldx+blocksize*1])*alpha;
} else {
dy[row] =
(shared[ldx]+shared[ldx+blocksize*1])*alpha + beta*dy [row];
}
}
}
}
}
/************************* same but using texture mem *************************/
#if defined(PRECISION_d) && defined(TEXTURE)
__inline__ __device__ float
read_from_tex( cudaTextureObject_t texdx, const int& i) {
int2 temp = tex1Dfetch<int2>( texdx, i );
return __hiloint2float(temp.y,temp.x);
}
// SELLP SpMV kernel
// see paper by M. KREUTZER, G. HAGER, G WELLEIN, H. FEHSKE A. BISHOP
// A UNIFIED SPARSE MATRIX DATA FORMAT
// FOR MODERN PROCESSORS WITH WIDE SIMD UNITS
// SELLC SpMV kernel modified assigning multiple threads to each row - 2D kernel
template<bool betazero>
__global__ void
zgesellptmv2d_kernel_4_tex(
int num_rows,
int num_cols,
int blocksize,
int T,
magmaFloatComplex alpha,
magmaFloatComplex * dval,
magma_index_t * dcolind,
magma_index_t * drowptr,
cudaTextureObject_t texdx,
magmaFloatComplex beta,
magmaFloatComplex * dy)
{
// T threads assigned to each row
int idx = threadIdx.y; // thread in row
int idy = threadIdx.x; // local row
int ldx = idx * blocksize + idy;
int bdx = blockIdx.y * gridDim.x + blockIdx.x; // global block index
int row = bdx * blocksize + idy; // global row index
extern __shared__ magmaFloatComplex shared[];
if(row < num_rows ) {
magmaFloatComplex dot = MAGMA_C_MAKE(0.0, 0.0);
int offset = drowptr[ bdx ];
int block = blocksize * T; // total number of threads
int max_ = (drowptr[ bdx+1 ]-offset)/block;
// number of elements each thread handles
int kk, i1, i2;
magmaFloatComplex x1, x2, v1, v2;
dcolind += offset + ldx;
dval += offset + ldx;
for ( kk = 0; kk < max_-1; kk+=2 ) {
i1 = dcolind[ block*kk];
i2 = dcolind[ block*kk + block];
x1 = read_from_tex( texdx, i1 );
x2 = read_from_tex( texdx, i2 );
v1 = dval[ block*kk ];
v2 = dval[ block*kk + block];
dot += v1 * x1;
dot += v2 * x2;
}
if (kk<max_) {
x1 = read_from_tex( texdx, dcolind[ block*kk] );
v1 = dval[ block*kk ];
dot += v1 * x1;
}
shared[ldx] = dot;
__syncthreads();
if( idx < 2 ) {
shared[ldx]+=shared[ldx+blocksize*2];
__syncthreads();
if( idx == 0 ) {
if (betazero) {
dy[row] = (shared[ldx]+shared[ldx+blocksize*1])*alpha;
} else {
dy[row] =
(shared[ldx]+shared[ldx+blocksize*1])*alpha + beta*dy [row];
}
}
}
}
}
// SELLP SpMV kernel
// see paper by M. KREUTZER, G. HAGER, G WELLEIN, H. FEHSKE A. BISHOP
// A UNIFIED SPARSE MATRIX DATA FORMAT
// FOR MODERN PROCESSORS WITH WIDE SIMD UNITS
// SELLC SpMV kernel modified assigning multiple threads to each row - 2D kernel
template<bool betazero>
__global__ void
zgesellptmv2d_kernel_8_tex(
int num_rows,
int num_cols,
int blocksize,
int T,
magmaFloatComplex alpha,
magmaFloatComplex * dval,
magma_index_t * dcolind,
magma_index_t * drowptr,
cudaTextureObject_t texdx,
magmaFloatComplex beta,
magmaFloatComplex * dy)
{
// T threads assigned to each row
int idx = threadIdx.y; // thread in row
int idy = threadIdx.x; // local row
int ldx = idx * blocksize + idy;
int bdx = blockIdx.y * gridDim.x + blockIdx.x; // global block index
int row = bdx * blocksize + idy; // global row index
extern __shared__ magmaFloatComplex shared[];
if(row < num_rows ) {
magmaFloatComplex dot = MAGMA_C_MAKE(0.0, 0.0);
int offset = drowptr[ bdx ];
int block = blocksize * T; // total number of threads
int max_ = (drowptr[ bdx+1 ]-offset)/block;
// number of elements each thread handles
int kk, i1, i2;
magmaFloatComplex x1, x2, v1, v2;
dcolind += offset + ldx;
dval += offset + ldx;
for ( kk = 0; kk < max_-1; kk+=2 ) {
i1 = dcolind[ block*kk];
i2 = dcolind[ block*kk + block];
x1 = read_from_tex( texdx, i1 );
x2 = read_from_tex( texdx, i2 );
v1 = dval[ block*kk ];
v2 = dval[ block*kk + block];
dot += v1 * x1;
dot += v2 * x2;
}
if (kk<max_) {
x1 = read_from_tex( texdx, dcolind[ block*kk] );
v1 = dval[ block*kk ];
dot += v1 * x1;
}
shared[ldx] = dot;
__syncthreads();
if( idx < 4 ) {
shared[ldx]+=shared[ldx+blocksize*4];
__syncthreads();
if( idx < 2 ) shared[ldx]+=shared[ldx+blocksize*2];
__syncthreads();
if( idx == 0 ) {
if (betazero) {
dy[row] = (shared[ldx]+shared[ldx+blocksize*1])*alpha;
} else {
dy[row] =
(shared[ldx]+shared[ldx+blocksize*1])*alpha + beta*dy [row];
}
}
}
}
}
// SELLP SpMV kernel
// see paper by M. KREUTZER, G. HAGER, G WELLEIN, H. FEHSKE A. BISHOP
// A UNIFIED SPARSE MATRIX DATA FORMAT
// FOR MODERN PROCESSORS WITH WIDE SIMD UNITS
// SELLC SpMV kernel modified assigning multiple threads to each row - 2D kernel
template<bool betazero>
__global__ void
zgesellptmv2d_kernel_16_tex(
int num_rows,
int num_cols,
int blocksize,
int T,
magmaFloatComplex alpha,
magmaFloatComplex * dval,
magma_index_t * dcolind,
magma_index_t * drowptr,
cudaTextureObject_t texdx,
magmaFloatComplex beta,
magmaFloatComplex * dy)
{
// T threads assigned to each row
int idx = threadIdx.y; // thread in row
int idy = threadIdx.x; // local row
int ldx = idx * blocksize + idy;
int bdx = blockIdx.y * gridDim.x + blockIdx.x; // global block index
int row = bdx * blocksize + idy; // global row index
extern __shared__ magmaFloatComplex shared[];
if(row < num_rows ) {
magmaFloatComplex dot = MAGMA_C_MAKE(0.0, 0.0);
int offset = drowptr[ bdx ];
int block = blocksize * T; // total number of threads
int max_ = (drowptr[ bdx+1 ]-offset)/block;
// number of elements each thread handles
for ( int k = 0; k < max_; k++ ) {
magmaFloatComplex val =
dval[ offset + ldx + block*k ];
int col =
dcolind[ offset + ldx + block*k ];
dot += val * read_from_tex( texdx, col );
}
shared[ldx] = dot;
__syncthreads();
if( idx < 8 ) {
shared[ldx]+=shared[ldx+blocksize*8];
__syncthreads();
if( idx < 4 ) shared[ldx]+=shared[ldx+blocksize*4];
__syncthreads();
if( idx < 2 ) shared[ldx]+=shared[ldx+blocksize*2];
__syncthreads();
if( idx == 0 ) {
if (betazero) {
dy[row] = (shared[ldx]+shared[ldx+blocksize*1])*alpha;
} else {
dy[row] =
(shared[ldx]+shared[ldx+blocksize*1])*alpha + beta*dy [row];
}
}
}
}
}
// SELLP SpMV kernel
// see paper by M. KREUTZER, G. HAGER, G WELLEIN, H. FEHSKE A. BISHOP
// A UNIFIED SPARSE MATRIX DATA FORMAT
// FOR MODERN PROCESSORS WITH WIDE SIMD UNITS
// SELLC SpMV kernel modified assigning multiple threads to each row - 2D kernel
template<bool betazero>
__global__ void
zgesellptmv2d_kernel_32_tex(
int num_rows,
int num_cols,
int blocksize,
int T,
magmaFloatComplex alpha,
magmaFloatComplex * dval,
magma_index_t * dcolind,
magma_index_t * drowptr,
cudaTextureObject_t texdx,
magmaFloatComplex beta,
magmaFloatComplex * dy)
{
// T threads assigned to each row
int idx = threadIdx.y; // thread in row
int idy = threadIdx.x; // local row
int ldx = idx * blocksize + idy;
int bdx = blockIdx.y * gridDim.x + blockIdx.x; // global block index
int row = bdx * blocksize + idy; // global row index
extern __shared__ magmaFloatComplex shared[];
if(row < num_rows ) {
magmaFloatComplex dot = MAGMA_C_MAKE(0.0, 0.0);
int offset = drowptr[ bdx ];
int block = blocksize * T; // total number of threads
int max_ = (drowptr[ bdx+1 ]-offset)/block;
// number of elements each thread handles
for ( int k = 0; k < max_; k++ ) {
magmaFloatComplex val =
dval[ offset + ldx + block*k ];
int col =
dcolind[ offset + ldx + block*k ];
dot += val * read_from_tex( texdx, col );
}
shared[ldx] = dot;
__syncthreads();
if( idx < 16 ) {
shared[ldx]+=shared[ldx+blocksize*16];
__syncthreads();
if( idx < 8 ) shared[ldx]+=shared[ldx+blocksize*8];
__syncthreads();
if( idx < 4 ) shared[ldx]+=shared[ldx+blocksize*4];
__syncthreads();
if( idx < 2 ) shared[ldx]+=shared[ldx+blocksize*2];
__syncthreads();
if( idx == 0 ) {
if (betazero) {
dy[row] = (shared[ldx]+shared[ldx+blocksize*1])*alpha;
} else {
dy[row] =
(shared[ldx]+shared[ldx+blocksize*1])*alpha + beta*dy [row];
}
}
}
}
}
#endif
/********************* end of texture versions **************************/
/**
Purpose
-------
This routine computes y = alpha * A^t * x + beta * y on the GPU.
Input format is SELLP.
Arguments
---------
@param[in]
transA magma_trans_t
transposition parameter for A
@param[in]
m magma_int_t
number of rows in A
@param[in]
n magma_int_t
number of columns in A
@param[in]
blocksize magma_int_t
number of rows in one ELL-slice
@param[in]
slices magma_int_t
number of slices in matrix
@param[in]
alignment magma_int_t
number of threads assigned to one row
@param[in]
alpha magmaFloatComplex
scalar multiplier
@param[in]
dval magmaFloatComplex_ptr
array containing values of A in SELLP
@param[in]
dcolind magmaIndex_ptr
columnindices of A in SELLP
@param[in]
drowptr magmaIndex_ptr
rowpointer of SELLP
@param[in]
dx magmaFloatComplex_ptr
input vector x
@param[in]
beta magmaFloatComplex
scalar multiplier
@param[out]
dy magmaFloatComplex_ptr
input/output vector y
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_cblas
********************************************************************/
extern "C" magma_int_t
magma_cgesellpmv(
magma_trans_t transA,
magma_int_t m, magma_int_t n,
magma_int_t blocksize,
magma_int_t slices,
magma_int_t alignment,
magmaFloatComplex alpha,
magmaFloatComplex_ptr dval,
magmaIndex_ptr dcolind,
magmaIndex_ptr drowptr,
magmaFloatComplex_ptr dx,
magmaFloatComplex beta,
magmaFloatComplex_ptr dy,
magma_queue_t queue )
{
// using a 2D thread grid
int num_threads = blocksize*alignment;
magma_int_t arch = magma_getdevice_arch();
if ( arch < 200 && num_threads > 256 )
printf("error: too much shared memory requested.\n");
int dimgrid1 = min( int( sqrt( float( slices ))), 65535 );
int dimgrid2 = min(magma_ceildiv( slices, dimgrid1 ), 65535);
int dimgrid3 = magma_ceildiv( slices, dimgrid1*dimgrid2 );
int num_tx = blocksize;
int Ms = num_threads * sizeof( magmaFloatComplex );
// special case: alignment 1:
if( alignment == 1 ){
Ms = 0;
num_tx = 256;
int num_blocks = magma_ceildiv( n, 256 );
dimgrid1 = num_blocks; //min( int( sqrt( float( num_blocks ))), 65535 );
dimgrid2 = 1; //magma_ceildiv( num_blocks, dimgrid1 );
dimgrid3 = 1;
//blocksize = 256;
}
dim3 block( num_tx, alignment, 1);
if( dimgrid3 > 65535 ){
printf("error: too many GPU thread blocks requested.\n");
}
dim3 grid( dimgrid1, dimgrid2, 1);
#if defined(PRECISION_d) && defined(TEXTURE)
// Create channel.
cudaChannelFormatDesc channel_desc;
channel_desc =
cudaCreateChannelDesc(32, 32, 0, 0, cudaChannelFormatKindSigned);
// Create resource descriptor.
struct cudaResourceDesc resDescdx;
memset(&resDescdx, 0, sizeof(resDescdx));
resDescdx.resType = cudaResourceTypeLinear;
resDescdx.res.linear.devPtr = (void*)dx;
resDescdx.res.linear.desc = channel_desc;
resDescdx.res.linear.sizeInBytes = m*sizeof(float);
// Specify texture object parameters.
struct cudaTextureDesc texDesc;
memset(&texDesc, 0, sizeof(texDesc));
texDesc.addressMode[0] = cudaAddressModeClamp;
texDesc.filterMode = cudaFilterModePoint;
texDesc.readMode = cudaReadModeElementType;
// Create texture object.
cudaTextureObject_t texdx = 0;
cudaCreateTextureObject(&texdx, &resDescdx, &texDesc, NULL);
cudaDeviceSetSharedMemConfig(cudaSharedMemBankSizeEightByte);
if ( alignment == 1) {
if (beta == MAGMA_C_ZERO) {
zgesellptmv2d_kernel_1<true><<< grid2, block, Ms, queue->cuda_stream() >>>
( m, n, blocksize, alignment, alpha,
dval, dcolind, drowptr, dx, beta, dy );
} else {
zgesellptmv2d_kernel_1<false><<< grid, block, Ms, queue->cuda_stream() >>>
( m, n, blocksize, alignment, alpha,
dval, dcolind, drowptr, dx, beta, dy );
}
} else if ( alignment == 4){
if (beta == MAGMA_C_ZERO) {
zgesellptmv2d_kernel_4_tex<true><<< grid, block, Ms, queue->cuda_stream() >>>
( m, n, blocksize, alignment, alpha,
dval, dcolind, drowptr, texdx, beta, dy );
} else {
zgesellptmv2d_kernel_4_tex<false><<< grid, block, Ms, queue->cuda_stream() >>>
( m, n, blocksize, alignment, alpha,
dval, dcolind, drowptr, texdx, beta, dy );
}
}
else if ( alignment == 8){
if (beta == MAGMA_C_ZERO) {
zgesellptmv2d_kernel_8_tex<true><<< grid, block, Ms, queue->cuda_stream() >>>
( m, n, blocksize, alignment, alpha,
dval, dcolind, drowptr, texdx, beta, dy );
} else {
zgesellptmv2d_kernel_8_tex<false><<< grid, block, Ms, queue->cuda_stream() >>>
( m, n, blocksize, alignment, alpha,
dval, dcolind, drowptr, texdx, beta, dy );
}
}
else if ( alignment == 16){
if (beta == MAGMA_C_ZERO) {
zgesellptmv2d_kernel_16_tex<true><<< grid, block, Ms, queue->cuda_stream() >>>
( m, n, blocksize, alignment, alpha,
dval, dcolind, drowptr, texdx, beta, dy );
} else {
zgesellptmv2d_kernel_16_tex<false><<< grid, block, Ms, queue->cuda_stream() >>>
( m, n, blocksize, alignment, alpha,
dval, dcolind, drowptr, texdx, beta, dy );
}
}
else if ( alignment == 32){
if (beta == MAGMA_C_ZERO) {
zgesellptmv2d_kernel_32_tex<true><<< grid, block, Ms, queue->cuda_stream() >>>
( m, n, blocksize, alignment, alpha,
dval, dcolind, drowptr, texdx, beta, dy );
} else {
zgesellptmv2d_kernel_32_tex<false><<< grid, block, Ms, queue->cuda_stream() >>>
( m, n, blocksize, alignment, alpha,
dval, dcolind, drowptr, texdx, beta, dy );
}
}
else {
printf("error: alignment %d not supported.\n", alignment);
return MAGMA_ERR_NOT_SUPPORTED;
}
cudaDestroyTextureObject(texdx);
#else
if ( alignment == 1) {
if (beta == MAGMA_C_ZERO) {
zgesellptmv2d_kernel_1<true><<< grid, block, Ms, queue->cuda_stream() >>>
( m, n, blocksize, alignment, alpha,
dval, dcolind, drowptr, dx, beta, dy );
} else {
zgesellptmv2d_kernel_1<false><<< grid, block, Ms, queue->cuda_stream() >>>
( m, n, blocksize, alignment, alpha,
dval, dcolind, drowptr, dx, beta, dy );
}
}
else if ( alignment == 4){
if (beta == MAGMA_C_ZERO) {
zgesellptmv2d_kernel_4<true><<< grid, block, Ms, queue->cuda_stream() >>>
( m, n, blocksize, alignment, alpha,
dval, dcolind, drowptr, dx, beta, dy );
} else {
zgesellptmv2d_kernel_4<false><<< grid, block, Ms, queue->cuda_stream() >>>
( m, n, blocksize, alignment, alpha,
dval, dcolind, drowptr, dx, beta, dy );
}
}
else if ( alignment == 8){
if (beta == MAGMA_C_ZERO) {
zgesellptmv2d_kernel_8<true><<< grid, block, Ms, queue->cuda_stream() >>>
( m, n, blocksize, alignment, alpha,
dval, dcolind, drowptr, dx, beta, dy );
} else {
zgesellptmv2d_kernel_8<false><<< grid, block, Ms, queue->cuda_stream() >>>
( m, n, blocksize, alignment, alpha,
dval, dcolind, drowptr, dx, beta, dy );
}
}
else if ( alignment == 16){
if (beta == MAGMA_C_ZERO) {
zgesellptmv2d_kernel_16<true><<< grid, block, Ms, queue->cuda_stream() >>>
( m, n, blocksize, alignment, alpha,
dval, dcolind, drowptr, dx, beta, dy );
} else {
zgesellptmv2d_kernel_16<false><<< grid, block, Ms, queue->cuda_stream() >>>
( m, n, blocksize, alignment, alpha,
dval, dcolind, drowptr, dx, beta, dy );
}
}
else if ( alignment == 32){
if (beta == MAGMA_C_ZERO) {
zgesellptmv2d_kernel_32<true><<< grid, block, Ms, queue->cuda_stream() >>>
( m, n, blocksize, alignment, alpha,
dval, dcolind, drowptr, dx, beta, dy );
} else {
zgesellptmv2d_kernel_32<false><<< grid, block, Ms, queue->cuda_stream() >>>
( m, n, blocksize, alignment, alpha,
dval, dcolind, drowptr, dx, beta, dy );
}
}
else {
printf("error: alignment %d not supported.\n", int(alignment) );
return MAGMA_ERR_NOT_SUPPORTED;
}
#endif
return MAGMA_SUCCESS;
}
|
c59fc257fd8056e2f7e0ea1113c8cc3aef4e6857.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/fluid/operators/elementwise/elementwise_mul_op.h"
#include "paddle/fluid/operators/elementwise/elementwise_op_broadcast.cu.h"
#include "paddle/fluid/platform/complex.h"
#include "paddle/fluid/platform/float16.h"
// only can include the headers in paddle/top/api dirs
#include "paddle/pten/api/lib/utils/tensor_utils.h"
#include "paddle/pten/include/core.h"
#include "paddle/pten/include/math.h"
namespace ops = paddle::operators;
namespace plat = paddle::platform;
namespace paddle {
namespace operators {
template <typename T>
class ElementwiseMulKernel<platform::CUDADeviceContext, T>
: public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& ctx) const override {
auto x_var = ctx.InputVar("X");
PADDLE_ENFORCE_EQ(x_var != nullptr, true,
platform::errors::InvalidArgument(
"Cannot get input Variable X, Variable name = %s.",
ctx.InputName("X")));
const auto& cuda_ctx =
ctx.template device_context<platform::CUDADeviceContext>();
if (x_var->IsType<framework::SelectedRows>()) {
framework::Tensor x_for_selectedrows;
std::vector<const framework::Tensor*> ins;
std::vector<framework::Tensor*> outs;
int axis =
PackTensorsIntoVector<T>(ctx, &ins, &outs, &x_for_selectedrows);
LaunchElementwiseCudaKernel<ElementwiseType::kBinary, T, T>(
cuda_ctx, ins, &outs, axis, MulFunctor<T>());
} else if (x_var->IsType<framework::LoDTensor>()) {
auto* x_lod = ctx.Input<framework::LoDTensor>("X");
auto* y_lod = ctx.Input<framework::LoDTensor>("Y");
auto* z_lod = ctx.Output<framework::LoDTensor>("Out");
z_lod->mutable_data<T>(ctx.GetPlace());
int axis = ctx.Attr<int>("axis");
auto pt_x = paddle::experimental::MakePtenDenseTensor(*x_lod);
auto pt_y = paddle::experimental::MakePtenDenseTensor(*y_lod);
auto pt_z = paddle::experimental::MakePtenDenseTensor(*z_lod);
pten::Multiply<T>(cuda_ctx, *pt_x.get(), *pt_y.get(), axis, pt_z.get());
} else {
PADDLE_THROW(platform::errors::InvalidArgument(
"X's type[%s] is not supported by elementwise_op. X's type should be "
"LoDTensor or SelectedRows.",
framework::ToTypeName(x_var->Type())));
}
}
};
template <typename T>
static __global__ void SimpleElemwiseMulGradCUDAKernel(const T* x, const T* y,
const T* out,
const T* dout,
int64_t size, T* dx,
T* dy) {
int col = blockIdx.x * blockDim.x + threadIdx.x;
while (col < size) {
T o = dout[col];
dx[col] = y[col] * o;
dy[col] = x[col] * o;
col += blockDim.x * gridDim.x;
}
}
template <>
__global__ void SimpleElemwiseMulGradCUDAKernel<plat::complex<float>>(
const plat::complex<float>* x, const plat::complex<float>* y,
const plat::complex<float>* out, const plat::complex<float>* dout,
int64_t size, plat::complex<float>* dx, plat::complex<float>* dy) {
int col = blockIdx.x * blockDim.x + threadIdx.x;
while (col < size) {
plat::complex<float> o = dout[col];
dx[col] = plat::complex<float>(y[col].real, -y[col].imag) * o;
dy[col] = plat::complex<float>(x[col].real, -x[col].imag) * o;
col += blockDim.x * gridDim.x;
}
}
template <>
__global__ void SimpleElemwiseMulGradCUDAKernel<plat::complex<double>>(
const plat::complex<double>* x, const plat::complex<double>* y,
const plat::complex<double>* out, const plat::complex<double>* dout,
int64_t size, plat::complex<double>* dx, plat::complex<double>* dy) {
int col = blockIdx.x * blockDim.x + threadIdx.x;
while (col < size) {
plat::complex<double> o = dout[col];
dx[col] = plat::complex<double>(y[col].real, -y[col].imag) * o;
dy[col] = plat::complex<double>(x[col].real, -x[col].imag) * o;
col += blockDim.x * gridDim.x;
}
}
template <typename DeviceContext, typename T>
typename std::enable_if<
std::is_same<DeviceContext, plat::CUDADeviceContext>::value>::type
elementwise_mul_grad(const framework::ExecutionContext& ctx,
const framework::Tensor* x, const framework::Tensor* y,
const framework::Tensor* out,
const framework::Tensor* dout, framework::Tensor* dx,
framework::Tensor* dy) {
dim3 block_size = dim3(ELEMENTWISE_BLOCK_SIZE, 1);
auto size = x->numel();
dim3 grid_size =
dim3((size + ELEMENTWISE_BLOCK_SIZE - 1) / ELEMENTWISE_BLOCK_SIZE, 1);
hipLaunchKernelGGL(( SimpleElemwiseMulGradCUDAKernel<
T>), dim3(grid_size), dim3(block_size), 0,
ctx.template device_context<plat::CUDADeviceContext>().stream(),
x->data<T>(), y->data<T>(), out->data<T>(), dout->data<T>(), size,
dx->mutable_data<T>(ctx.GetPlace()), dy->mutable_data<T>(ctx.GetPlace()));
}
} // namespace operators
} // namespace paddle
REGISTER_OP_CUDA_KERNEL(
elementwise_mul, ops::ElementwiseMulKernel<plat::CUDADeviceContext, float>,
ops::ElementwiseMulKernel<plat::CUDADeviceContext, double>,
ops::ElementwiseMulKernel<plat::CUDADeviceContext, int>,
ops::ElementwiseMulKernel<plat::CUDADeviceContext, int64_t>,
ops::ElementwiseMulKernel<plat::CUDADeviceContext, bool>,
ops::ElementwiseMulKernel<plat::CUDADeviceContext, plat::float16>,
ops::ElementwiseMulKernel<plat::CUDADeviceContext, plat::complex<float>>,
ops::ElementwiseMulKernel<plat::CUDADeviceContext, plat::complex<double>>);
REGISTER_OP_CUDA_KERNEL(
elementwise_mul_grad,
ops::ElementwiseMulGradKernel<plat::CUDADeviceContext, float>,
ops::ElementwiseMulGradKernel<plat::CUDADeviceContext, double>,
ops::ElementwiseMulGradKernel<plat::CUDADeviceContext, int>,
ops::ElementwiseMulGradKernel<plat::CUDADeviceContext, int64_t>,
ops::ElementwiseMulGradKernel<plat::CUDADeviceContext, bool>,
ops::ElementwiseMulGradKernel<plat::CUDADeviceContext, plat::float16>,
ops::ElementwiseMulGradKernel<plat::CUDADeviceContext,
plat::complex<float>>,
ops::ElementwiseMulGradKernel<plat::CUDADeviceContext,
plat::complex<double>>);
REGISTER_OP_CUDA_KERNEL(
elementwise_mul_grad_grad,
ops::ElementwiseMulDoubleGradKernel<plat::CUDADeviceContext, float>,
ops::ElementwiseMulDoubleGradKernel<plat::CUDADeviceContext, double>,
ops::ElementwiseMulDoubleGradKernel<plat::CUDADeviceContext, int>,
ops::ElementwiseMulDoubleGradKernel<plat::CUDADeviceContext, int64_t>,
ops::ElementwiseMulDoubleGradKernel<plat::CUDADeviceContext, bool>,
ops::ElementwiseMulDoubleGradKernel<plat::CUDADeviceContext, plat::float16>,
ops::ElementwiseMulDoubleGradKernel<plat::CUDADeviceContext,
plat::complex<float>>,
ops::ElementwiseMulDoubleGradKernel<plat::CUDADeviceContext,
plat::complex<double>>);
REGISTER_OP_CUDA_KERNEL(
elementwise_mul_triple_grad,
ops::ElementwiseMulTripleGradKernel<plat::CUDADeviceContext, float>,
ops::ElementwiseMulTripleGradKernel<plat::CUDADeviceContext, double>,
ops::ElementwiseMulTripleGradKernel<plat::CUDADeviceContext, int>,
ops::ElementwiseMulTripleGradKernel<plat::CUDADeviceContext, int64_t>,
ops::ElementwiseMulTripleGradKernel<plat::CUDADeviceContext, bool>,
ops::ElementwiseMulTripleGradKernel<plat::CUDADeviceContext, plat::float16>,
ops::ElementwiseMulTripleGradKernel<plat::CUDADeviceContext,
plat::complex<float>>,
ops::ElementwiseMulTripleGradKernel<plat::CUDADeviceContext,
plat::complex<double>>);
| c59fc257fd8056e2f7e0ea1113c8cc3aef4e6857.cu | /* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/fluid/operators/elementwise/elementwise_mul_op.h"
#include "paddle/fluid/operators/elementwise/elementwise_op_broadcast.cu.h"
#include "paddle/fluid/platform/complex.h"
#include "paddle/fluid/platform/float16.h"
// only can include the headers in paddle/top/api dirs
#include "paddle/pten/api/lib/utils/tensor_utils.h"
#include "paddle/pten/include/core.h"
#include "paddle/pten/include/math.h"
namespace ops = paddle::operators;
namespace plat = paddle::platform;
namespace paddle {
namespace operators {
template <typename T>
class ElementwiseMulKernel<platform::CUDADeviceContext, T>
: public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& ctx) const override {
auto x_var = ctx.InputVar("X");
PADDLE_ENFORCE_EQ(x_var != nullptr, true,
platform::errors::InvalidArgument(
"Cannot get input Variable X, Variable name = %s.",
ctx.InputName("X")));
const auto& cuda_ctx =
ctx.template device_context<platform::CUDADeviceContext>();
if (x_var->IsType<framework::SelectedRows>()) {
framework::Tensor x_for_selectedrows;
std::vector<const framework::Tensor*> ins;
std::vector<framework::Tensor*> outs;
int axis =
PackTensorsIntoVector<T>(ctx, &ins, &outs, &x_for_selectedrows);
LaunchElementwiseCudaKernel<ElementwiseType::kBinary, T, T>(
cuda_ctx, ins, &outs, axis, MulFunctor<T>());
} else if (x_var->IsType<framework::LoDTensor>()) {
auto* x_lod = ctx.Input<framework::LoDTensor>("X");
auto* y_lod = ctx.Input<framework::LoDTensor>("Y");
auto* z_lod = ctx.Output<framework::LoDTensor>("Out");
z_lod->mutable_data<T>(ctx.GetPlace());
int axis = ctx.Attr<int>("axis");
auto pt_x = paddle::experimental::MakePtenDenseTensor(*x_lod);
auto pt_y = paddle::experimental::MakePtenDenseTensor(*y_lod);
auto pt_z = paddle::experimental::MakePtenDenseTensor(*z_lod);
pten::Multiply<T>(cuda_ctx, *pt_x.get(), *pt_y.get(), axis, pt_z.get());
} else {
PADDLE_THROW(platform::errors::InvalidArgument(
"X's type[%s] is not supported by elementwise_op. X's type should be "
"LoDTensor or SelectedRows.",
framework::ToTypeName(x_var->Type())));
}
}
};
template <typename T>
static __global__ void SimpleElemwiseMulGradCUDAKernel(const T* x, const T* y,
const T* out,
const T* dout,
int64_t size, T* dx,
T* dy) {
int col = blockIdx.x * blockDim.x + threadIdx.x;
while (col < size) {
T o = dout[col];
dx[col] = y[col] * o;
dy[col] = x[col] * o;
col += blockDim.x * gridDim.x;
}
}
template <>
__global__ void SimpleElemwiseMulGradCUDAKernel<plat::complex<float>>(
const plat::complex<float>* x, const plat::complex<float>* y,
const plat::complex<float>* out, const plat::complex<float>* dout,
int64_t size, plat::complex<float>* dx, plat::complex<float>* dy) {
int col = blockIdx.x * blockDim.x + threadIdx.x;
while (col < size) {
plat::complex<float> o = dout[col];
dx[col] = plat::complex<float>(y[col].real, -y[col].imag) * o;
dy[col] = plat::complex<float>(x[col].real, -x[col].imag) * o;
col += blockDim.x * gridDim.x;
}
}
template <>
__global__ void SimpleElemwiseMulGradCUDAKernel<plat::complex<double>>(
const plat::complex<double>* x, const plat::complex<double>* y,
const plat::complex<double>* out, const plat::complex<double>* dout,
int64_t size, plat::complex<double>* dx, plat::complex<double>* dy) {
int col = blockIdx.x * blockDim.x + threadIdx.x;
while (col < size) {
plat::complex<double> o = dout[col];
dx[col] = plat::complex<double>(y[col].real, -y[col].imag) * o;
dy[col] = plat::complex<double>(x[col].real, -x[col].imag) * o;
col += blockDim.x * gridDim.x;
}
}
template <typename DeviceContext, typename T>
typename std::enable_if<
std::is_same<DeviceContext, plat::CUDADeviceContext>::value>::type
elementwise_mul_grad(const framework::ExecutionContext& ctx,
const framework::Tensor* x, const framework::Tensor* y,
const framework::Tensor* out,
const framework::Tensor* dout, framework::Tensor* dx,
framework::Tensor* dy) {
dim3 block_size = dim3(ELEMENTWISE_BLOCK_SIZE, 1);
auto size = x->numel();
dim3 grid_size =
dim3((size + ELEMENTWISE_BLOCK_SIZE - 1) / ELEMENTWISE_BLOCK_SIZE, 1);
SimpleElemwiseMulGradCUDAKernel<
T><<<grid_size, block_size, 0,
ctx.template device_context<plat::CUDADeviceContext>().stream()>>>(
x->data<T>(), y->data<T>(), out->data<T>(), dout->data<T>(), size,
dx->mutable_data<T>(ctx.GetPlace()), dy->mutable_data<T>(ctx.GetPlace()));
}
} // namespace operators
} // namespace paddle
REGISTER_OP_CUDA_KERNEL(
elementwise_mul, ops::ElementwiseMulKernel<plat::CUDADeviceContext, float>,
ops::ElementwiseMulKernel<plat::CUDADeviceContext, double>,
ops::ElementwiseMulKernel<plat::CUDADeviceContext, int>,
ops::ElementwiseMulKernel<plat::CUDADeviceContext, int64_t>,
ops::ElementwiseMulKernel<plat::CUDADeviceContext, bool>,
ops::ElementwiseMulKernel<plat::CUDADeviceContext, plat::float16>,
ops::ElementwiseMulKernel<plat::CUDADeviceContext, plat::complex<float>>,
ops::ElementwiseMulKernel<plat::CUDADeviceContext, plat::complex<double>>);
REGISTER_OP_CUDA_KERNEL(
elementwise_mul_grad,
ops::ElementwiseMulGradKernel<plat::CUDADeviceContext, float>,
ops::ElementwiseMulGradKernel<plat::CUDADeviceContext, double>,
ops::ElementwiseMulGradKernel<plat::CUDADeviceContext, int>,
ops::ElementwiseMulGradKernel<plat::CUDADeviceContext, int64_t>,
ops::ElementwiseMulGradKernel<plat::CUDADeviceContext, bool>,
ops::ElementwiseMulGradKernel<plat::CUDADeviceContext, plat::float16>,
ops::ElementwiseMulGradKernel<plat::CUDADeviceContext,
plat::complex<float>>,
ops::ElementwiseMulGradKernel<plat::CUDADeviceContext,
plat::complex<double>>);
REGISTER_OP_CUDA_KERNEL(
elementwise_mul_grad_grad,
ops::ElementwiseMulDoubleGradKernel<plat::CUDADeviceContext, float>,
ops::ElementwiseMulDoubleGradKernel<plat::CUDADeviceContext, double>,
ops::ElementwiseMulDoubleGradKernel<plat::CUDADeviceContext, int>,
ops::ElementwiseMulDoubleGradKernel<plat::CUDADeviceContext, int64_t>,
ops::ElementwiseMulDoubleGradKernel<plat::CUDADeviceContext, bool>,
ops::ElementwiseMulDoubleGradKernel<plat::CUDADeviceContext, plat::float16>,
ops::ElementwiseMulDoubleGradKernel<plat::CUDADeviceContext,
plat::complex<float>>,
ops::ElementwiseMulDoubleGradKernel<plat::CUDADeviceContext,
plat::complex<double>>);
REGISTER_OP_CUDA_KERNEL(
elementwise_mul_triple_grad,
ops::ElementwiseMulTripleGradKernel<plat::CUDADeviceContext, float>,
ops::ElementwiseMulTripleGradKernel<plat::CUDADeviceContext, double>,
ops::ElementwiseMulTripleGradKernel<plat::CUDADeviceContext, int>,
ops::ElementwiseMulTripleGradKernel<plat::CUDADeviceContext, int64_t>,
ops::ElementwiseMulTripleGradKernel<plat::CUDADeviceContext, bool>,
ops::ElementwiseMulTripleGradKernel<plat::CUDADeviceContext, plat::float16>,
ops::ElementwiseMulTripleGradKernel<plat::CUDADeviceContext,
plat::complex<float>>,
ops::ElementwiseMulTripleGradKernel<plat::CUDADeviceContext,
plat::complex<double>>);
|
2f9360e0de0b50f218ee25293d48bec02c1e75ae.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/**
* @file paper.cu
* @brief Source code for paper
* @author Andre Maximo
* @date Dec, 2019
* @copyright The MIT License
*/
#include <cstdlib>
#include <algorithm>
#include <iostream>
#include <fstream>
#include <util/util.h>
#include <util/timer.h>
#include <util/symbol.h>
#include <util/dvector.h>
#include <util/gaussian.h>
#include <util/alg0_xd_cpu.h>
#define FTYPE float // filter float type
#define FORDER 1 // filter order
#define FM 0xffffffff // shuffle's full mask
#define WS 32 // warp size
#define NW 3 // number of warps in a block
#define NB 21 // number of blocks in a grid
#include <util/linalg.h>
#include <util/recfilter.h>
using namespace gpufilter;
__constant__
Vector<FTYPE, FORDER+1> c_weights;
__constant__
Vector<Matrix<FTYPE, FORDER, FORDER>, 10>
c_AbF_T, c_AbR_T;
__constant__
Matrix<FTYPE,FORDER,FORDER> c_HARB_AFP_T;
template<typename T>
__device__
void read_block(
Matrix<T, WS, WS+1>& block,
const T *input,
const int& tx, const int& ty, const int& bi) {
T *bcp = &block[ty][tx];
const T *icp = &input[bi*WS*WS+ty*WS+tx];
for (int i = 0; i < WS - (WS % NW); i += NW) {
*bcp = *icp;
bcp += NW*(WS+1);
icp += NW*WS;
}
if (ty < WS % NW) {
*bcp = *icp;
}
}
template<typename T>
__device__
void write_block(
T *output,
Matrix<T, WS, WS+1>& block,
const int& tx, const int& ty, const int& bi) {
T *bcp = &block[ty][tx];
T *ocp = &output[bi*WS*WS+ty*WS+tx];
for (int i = 0; i < WS - (WS % NW); i += NW) {
*ocp = *bcp;
bcp += NW*(WS+1);
ocp += NW*WS;
}
if (ty < WS % NW) {
*ocp = *bcp;
}
}
template <typename T, int R>
__device__
void compute_py(
Vector<T, R>& py,
Matrix<T, WS, WS+1>& block,
const int& tx, const bool& save_in_block) {
T x[WS];
for (int i = 0; i < WS; ++i)
x[i] = block[tx][i];
for (int i = 0; i < WS; ++i) {
if (save_in_block)
block[tx][i] = fwdI(py, x[i], c_weights);
else
fwdI(py, x[i], c_weights);
}
}
template <typename T, int R>
__device__
void compute_ez(
Vector<T, R>& ez,
Matrix<T, WS, WS+1>& block,
const int& tx, const bool& save_in_block) {
T x[WS];
for (int i = 0; i < WS; ++i)
x[i] = block[tx][i];
for (int i = WS-1; i >= 0; --i) {
if (save_in_block)
block[tx][i] = revI(x[i], ez, c_weights);
else
revI(x[i], ez, c_weights);
}
}
template <typename T, int R>
__device__
void fix_py(
Vector<T, R>& py,
const int& tx,
const int& ci=0) {
Vector<T, R> pyprev;
for (int i = 0; i < 5; ++i) {
int k = 1 << i;
for (int r = 0; r < R; ++r) {
pyprev[r] = __shfl_up_sync(FM, py[r], k);
}
if (tx >= k) {
py = py + pyprev * c_AbF_T[ci+i];
}
}
}
template <typename T, int R>
__device__
void fix_ez(
Vector<T, R>& ez,
const int& tx,
const int& ci=0) {
Vector<T, R> eznext;
for (int i = 0; i < 5; ++i) {
int k = 1 << i;
for (int r = 0; r < R; ++r) {
eznext[r] = __shfl_down_sync(FM, ez[r], k);
}
if (tx < WS - k) {
ez = ez + eznext * c_AbR_T[ci+i];
}
}
}
template <typename T, int R>
__global__ __launch_bounds__(WS*NW, NB)
void alg3_step1(
Vector<T, R> *g_py,
Vector<T, R> *g_ez,
const T *g_in) {
const int tx = threadIdx.x, ty = threadIdx.y;
const int bi = blockIdx.x;
__shared__ Matrix<T, WS, WS+1> s_block;
read_block(s_block, g_in, tx, ty, bi);
__syncthreads();
if (ty == 0) {
Vector<T, R> py = zeros<T, R>();
compute_py(py, s_block, tx, false);
fix_py(py, tx);
if (tx == WS-1)
g_py[bi+1] = py;
for (int r = 0; r < R; ++r)
py[r] = __shfl_up_sync(FM, py[r], 1);
if (tx == 0)
py = zeros<T, R>();
__syncwarp();
compute_py(py, s_block, tx, true);
Vector<T, R> ez = zeros<T, R>();
compute_ez(ez, s_block, tx, false);
fix_ez(ez, tx);
if (tx == 0)
g_ez[bi] = ez;
}
}
template <typename T, int R>
__global__ __launch_bounds__(WS, 2)
void alg3_step2(
Vector<T, R> *g_py,
Vector<T, R> *g_ez,
int num_blocks) {
const int tx = threadIdx.x, ty = threadIdx.y;
Vector<T, R> pe;
for (int i = 0; i < num_blocks+1; i += WS) {
if (ty == 0) {
if (i > 0 && tx == 0)
pe = g_py[i+tx+1] + pe * c_AbF_T[5];
else if (i+tx < num_blocks+1)
pe = g_py[i+tx+1];
fix_py(pe, tx, 5);
if (i+tx < num_blocks+1)
g_py[i+tx+1] = pe;
for (int r = 0; r < R; ++r)
pe[r] = __shfl_down_sync(FM, pe[r], WS-1);
} else if (ty == 1) {
i = num_blocks+1 - i;
if (i < num_blocks+1 && tx == WS-1)
pe = g_ez[i+tx+1] + pe * c_AbR_T[5];
else if (i+tx >= 0)
pe = g_ez[i+tx+1];
fix_ez(pe, tx, 5);
if (i+tx >= 0)
g_ez[i+tx+1] = pe;
for (int r = 0; r < R; ++r)
pe[r] = __shfl_up_sync(FM, pe[r], WS-1);
}
}
}
template <typename T, int R>
__global__ __launch_bounds__(WS*NW, NB)
void alg3_step3(
T *g_out,
const Vector<T, R> *g_py,
const Vector<T, R> *g_ez,
const T *g_in ) {
const int tx = threadIdx.x, ty = threadIdx.y;
const int bi = blockIdx.x;
__shared__ Matrix<T, WS, WS+1> s_block;
read_block(s_block, g_in, tx, ty, bi);
__syncthreads();
if (ty == 0) {
Vector<T, R> py = zeros<T, R>();
if (tx == 0) {
for (int r = 0; r < R; ++r)
py[r] = __ldg((const T*)&g_py[bi][r]);
} else {
compute_py(py, s_block, tx-1, false);
}
fix_py(py, tx);
compute_py(py, s_block, tx, true);
Vector<T, R> ez = zeros<T, R>();
if (tx == WS-1) {
for (int r = 0; r < R; ++r)
ez[r] = __ldg((const T*)&g_ez[bi+1][r]);
if (bi < gridDim.x-1)
ez = ez + py * c_HARB_AFP_T;
} else {
compute_ez(ez, s_block, tx+1, false);
}
__syncwarp();
fix_ez(ez, tx);
compute_ez(ez, s_block, tx, true);
}
__syncthreads();
write_block(g_out, s_block, tx, ty, bi);
}
template <typename T, int R>
__host__
void oa1d_gpu(
T *h_in,
const long int& num_samples,
const long int& num_repeats,
const Vector<T, R+1> &w ) {
const int B = WS;
// pre-compute basic alg1d matrices
Matrix<T,R,B> Zrb = zeros<T,R,B>();
Matrix<T,B,R> Zbr = zeros<T,B,R>();
Matrix<T,R,R> Ir = identity<T,R,R>();
Matrix<T,B,B> Ib = identity<T,B,B>();
Matrix<T,R,B> AFP_T = fwd(Ir, Zrb, w);
Matrix<T,R,B> ARE_T = rev(Zrb, Ir, w);
Matrix<T,B,B> ARB_T = rev(Ib, Zbr, w);
Matrix<T,R,R> AbF_T = tail<R>(AFP_T);
Matrix<T,R,R> AbR_T = head<R>(ARE_T);
Matrix<T,R,R> HARB_AFP_T = AFP_T*head<R>(ARB_T);
Vector<Matrix<FTYPE, FORDER, FORDER>, 10> v_AbF_T;
Vector<Matrix<FTYPE, FORDER, FORDER>, 10> v_AbR_T;
v_AbF_T[0] = AbF_T;
v_AbR_T[0] = AbR_T;
for (int i = 1; i < 10; ++i) {
v_AbF_T[i] = v_AbF_T[i-1] * v_AbF_T[i-1];
v_AbR_T[i] = v_AbR_T[i-1] * v_AbR_T[i-1];
}
// upload to the GPU
copy_to_symbol(c_weights, w);
copy_to_symbol(c_AbF_T, v_AbF_T);
copy_to_symbol(c_AbR_T, v_AbR_T);
copy_to_symbol(c_HARB_AFP_T, HARB_AFP_T);
dvector<T> d_in(h_in, num_samples), d_out(num_samples);
long int num_blocks = num_samples/(B*B);
dim3 grid(num_blocks);
dim3 block(WS, NW);
dvector< Vector<T, R> > d_pybar(num_blocks+1);
dvector< Vector<T, R> > d_ezhat(num_blocks+1);
d_pybar.fillzero();
d_ezhat.fillzero();
hipDeviceSetSharedMemConfig(hipSharedMemBankSizeFourByte);
hipDeviceSetCacheConfig(hipFuncCachePreferShared);
base_timer &timer_total = timers.gpu_add("paper_code", num_samples, "iP");
// first run to warm the GPU up
hipLaunchKernelGGL(( alg3_step1), dim3(grid), dim3(block) , 0, 0, &d_pybar, &d_ezhat, &d_in );
hipLaunchKernelGGL(( alg3_step2), dim3(dim3(2)), dim3(dim3(WS)) , 0, 0, &d_pybar, &d_ezhat, num_blocks );
hipLaunchKernelGGL(( alg3_step3), dim3(grid), dim3(block) , 0, 0, &d_out, &d_pybar, &d_ezhat, &d_in );
for (int r = 0; r < num_repeats; ++r) {
hipLaunchKernelGGL(( alg3_step1), dim3(grid), dim3(block) , 0, 0, &d_pybar, &d_ezhat, &d_in );
hipLaunchKernelGGL(( alg3_step2), dim3(dim3(2)), dim3(dim3(WS)) , 0, 0, &d_pybar, &d_ezhat, num_blocks );
hipLaunchKernelGGL(( alg3_step3), dim3(grid), dim3(block) , 0, 0, &d_out, &d_pybar, &d_ezhat, &d_in );
}
timer_total.stop();
if (num_repeats > 1) {
std::size_t proc_samples = timer_total.data_size()*num_repeats;
double time_sec_inv_mebi = timer_total.elapsed()*1024*1024;
std::cout << std::fixed << proc_samples/time_sec_inv_mebi << std::flush;
} else { // running for debugging
timers.flush();
Vector<T, R> *pybar = new Vector<T, R>[d_pybar.size()];
d_pybar.copy_to(pybar, d_pybar.size());
std::cout << std::fixed << std::flush;
print_array(pybar, 32, "d_pybar [:32]:");
}
d_out.copy_to(h_in, num_samples);
}
int main(int argc, char** argv) {
long int num_samples = 1 << 23, num_repeats = 1; // defaults
char array_bin_fn[200] = "../bin/random_array.bin";
if ((argc != 1 && argc != 4)
|| (argc==4 && (sscanf(argv[1], "%ld", &num_samples) != 1 ||
sscanf(argv[2], "%ld", &num_repeats) != 1 ||
sscanf(argv[3], "%s", array_bin_fn) != 1))) {
std::cerr << " Bad arguments!\n";
std::cerr << " Usage: " << argv[0]
<< " [num_samples num_repeats array_bin_fn] ->"
<< " Output: Mis/s MAE MRE\n";
std::cerr << " Where: num_samples = number of samples "
<< "in the 1D array to run this on (up to 1Gi)\n";
std::cerr << " Where: num_repeats = number of repetitions "
<< "to measure the run timing performance\n";
std::cerr << " Where: array_bin_fn = array of inputs in "
<< "binary to read 1D input data from\n";
std::cerr << " Where: Mis/s = Mebi samples per second; "
<< "MAE = max. abs. error; MRE = max. rel. error\n";
return EXIT_FAILURE;
}
// https://docs.nvidia.com/cuda/cuda-runtime-api/group__CUDART__DEVICE.html
if (num_repeats == 1) { // running for debugging
std::cout << get_cuda_device_properties();
}
Vector<FTYPE, FORDER+1> iir_weights;
FTYPE gaussian_sigma = 4.0;
weights(gaussian_sigma, iir_weights);
FTYPE *cpu_arr = new FTYPE[num_samples];
FTYPE *gpu_arr = new FTYPE[num_samples];
std::ifstream in_file(array_bin_fn, std::ios::binary);
in_file.read(reinterpret_cast<char*>(cpu_arr),
sizeof(FTYPE)*num_samples);
in_file.close();
memcpy(gpu_arr, cpu_arr, sizeof(FTYPE) * num_samples);
recursive_1d<0,true,FORDER>(cpu_arr, num_samples, iir_weights);
recursive_1d<0,false,FORDER>(cpu_arr, num_samples, iir_weights);
oa1d_gpu<FTYPE,FORDER>(
gpu_arr, num_samples, num_repeats, iir_weights);
FTYPE max_abs_err, max_rel_err;
check_cpu_reference(cpu_arr, gpu_arr, num_samples,
max_abs_err, max_rel_err);
if (num_repeats == 1) // running for debugging
std::cout << " [max-absolute-error] [max-relative-error]:";
std::cout << " " << std::scientific << max_abs_err << " "
<< std::scientific << max_rel_err << "\n";
if (cpu_arr) delete [] cpu_arr;
if (gpu_arr) delete [] gpu_arr;
return EXIT_SUCCESS;
}
| 2f9360e0de0b50f218ee25293d48bec02c1e75ae.cu | /**
* @file paper.cu
* @brief Source code for paper
* @author Andre Maximo
* @date Dec, 2019
* @copyright The MIT License
*/
#include <cstdlib>
#include <algorithm>
#include <iostream>
#include <fstream>
#include <util/util.h>
#include <util/timer.h>
#include <util/symbol.h>
#include <util/dvector.h>
#include <util/gaussian.h>
#include <util/alg0_xd_cpu.h>
#define FTYPE float // filter float type
#define FORDER 1 // filter order
#define FM 0xffffffff // shuffle's full mask
#define WS 32 // warp size
#define NW 3 // number of warps in a block
#define NB 21 // number of blocks in a grid
#include <util/linalg.h>
#include <util/recfilter.h>
using namespace gpufilter;
__constant__
Vector<FTYPE, FORDER+1> c_weights;
__constant__
Vector<Matrix<FTYPE, FORDER, FORDER>, 10>
c_AbF_T, c_AbR_T;
__constant__
Matrix<FTYPE,FORDER,FORDER> c_HARB_AFP_T;
template<typename T>
__device__
void read_block(
Matrix<T, WS, WS+1>& block,
const T *input,
const int& tx, const int& ty, const int& bi) {
T *bcp = &block[ty][tx];
const T *icp = &input[bi*WS*WS+ty*WS+tx];
for (int i = 0; i < WS - (WS % NW); i += NW) {
*bcp = *icp;
bcp += NW*(WS+1);
icp += NW*WS;
}
if (ty < WS % NW) {
*bcp = *icp;
}
}
template<typename T>
__device__
void write_block(
T *output,
Matrix<T, WS, WS+1>& block,
const int& tx, const int& ty, const int& bi) {
T *bcp = &block[ty][tx];
T *ocp = &output[bi*WS*WS+ty*WS+tx];
for (int i = 0; i < WS - (WS % NW); i += NW) {
*ocp = *bcp;
bcp += NW*(WS+1);
ocp += NW*WS;
}
if (ty < WS % NW) {
*ocp = *bcp;
}
}
template <typename T, int R>
__device__
void compute_py(
Vector<T, R>& py,
Matrix<T, WS, WS+1>& block,
const int& tx, const bool& save_in_block) {
T x[WS];
for (int i = 0; i < WS; ++i)
x[i] = block[tx][i];
for (int i = 0; i < WS; ++i) {
if (save_in_block)
block[tx][i] = fwdI(py, x[i], c_weights);
else
fwdI(py, x[i], c_weights);
}
}
template <typename T, int R>
__device__
void compute_ez(
Vector<T, R>& ez,
Matrix<T, WS, WS+1>& block,
const int& tx, const bool& save_in_block) {
T x[WS];
for (int i = 0; i < WS; ++i)
x[i] = block[tx][i];
for (int i = WS-1; i >= 0; --i) {
if (save_in_block)
block[tx][i] = revI(x[i], ez, c_weights);
else
revI(x[i], ez, c_weights);
}
}
template <typename T, int R>
__device__
void fix_py(
Vector<T, R>& py,
const int& tx,
const int& ci=0) {
Vector<T, R> pyprev;
for (int i = 0; i < 5; ++i) {
int k = 1 << i;
for (int r = 0; r < R; ++r) {
pyprev[r] = __shfl_up_sync(FM, py[r], k);
}
if (tx >= k) {
py = py + pyprev * c_AbF_T[ci+i];
}
}
}
template <typename T, int R>
__device__
void fix_ez(
Vector<T, R>& ez,
const int& tx,
const int& ci=0) {
Vector<T, R> eznext;
for (int i = 0; i < 5; ++i) {
int k = 1 << i;
for (int r = 0; r < R; ++r) {
eznext[r] = __shfl_down_sync(FM, ez[r], k);
}
if (tx < WS - k) {
ez = ez + eznext * c_AbR_T[ci+i];
}
}
}
template <typename T, int R>
__global__ __launch_bounds__(WS*NW, NB)
void alg3_step1(
Vector<T, R> *g_py,
Vector<T, R> *g_ez,
const T *g_in) {
const int tx = threadIdx.x, ty = threadIdx.y;
const int bi = blockIdx.x;
__shared__ Matrix<T, WS, WS+1> s_block;
read_block(s_block, g_in, tx, ty, bi);
__syncthreads();
if (ty == 0) {
Vector<T, R> py = zeros<T, R>();
compute_py(py, s_block, tx, false);
fix_py(py, tx);
if (tx == WS-1)
g_py[bi+1] = py;
for (int r = 0; r < R; ++r)
py[r] = __shfl_up_sync(FM, py[r], 1);
if (tx == 0)
py = zeros<T, R>();
__syncwarp();
compute_py(py, s_block, tx, true);
Vector<T, R> ez = zeros<T, R>();
compute_ez(ez, s_block, tx, false);
fix_ez(ez, tx);
if (tx == 0)
g_ez[bi] = ez;
}
}
template <typename T, int R>
__global__ __launch_bounds__(WS, 2)
void alg3_step2(
Vector<T, R> *g_py,
Vector<T, R> *g_ez,
int num_blocks) {
const int tx = threadIdx.x, ty = threadIdx.y;
Vector<T, R> pe;
for (int i = 0; i < num_blocks+1; i += WS) {
if (ty == 0) {
if (i > 0 && tx == 0)
pe = g_py[i+tx+1] + pe * c_AbF_T[5];
else if (i+tx < num_blocks+1)
pe = g_py[i+tx+1];
fix_py(pe, tx, 5);
if (i+tx < num_blocks+1)
g_py[i+tx+1] = pe;
for (int r = 0; r < R; ++r)
pe[r] = __shfl_down_sync(FM, pe[r], WS-1);
} else if (ty == 1) {
i = num_blocks+1 - i;
if (i < num_blocks+1 && tx == WS-1)
pe = g_ez[i+tx+1] + pe * c_AbR_T[5];
else if (i+tx >= 0)
pe = g_ez[i+tx+1];
fix_ez(pe, tx, 5);
if (i+tx >= 0)
g_ez[i+tx+1] = pe;
for (int r = 0; r < R; ++r)
pe[r] = __shfl_up_sync(FM, pe[r], WS-1);
}
}
}
template <typename T, int R>
__global__ __launch_bounds__(WS*NW, NB)
void alg3_step3(
T *g_out,
const Vector<T, R> *g_py,
const Vector<T, R> *g_ez,
const T *g_in ) {
const int tx = threadIdx.x, ty = threadIdx.y;
const int bi = blockIdx.x;
__shared__ Matrix<T, WS, WS+1> s_block;
read_block(s_block, g_in, tx, ty, bi);
__syncthreads();
if (ty == 0) {
Vector<T, R> py = zeros<T, R>();
if (tx == 0) {
for (int r = 0; r < R; ++r)
py[r] = __ldg((const T*)&g_py[bi][r]);
} else {
compute_py(py, s_block, tx-1, false);
}
fix_py(py, tx);
compute_py(py, s_block, tx, true);
Vector<T, R> ez = zeros<T, R>();
if (tx == WS-1) {
for (int r = 0; r < R; ++r)
ez[r] = __ldg((const T*)&g_ez[bi+1][r]);
if (bi < gridDim.x-1)
ez = ez + py * c_HARB_AFP_T;
} else {
compute_ez(ez, s_block, tx+1, false);
}
__syncwarp();
fix_ez(ez, tx);
compute_ez(ez, s_block, tx, true);
}
__syncthreads();
write_block(g_out, s_block, tx, ty, bi);
}
template <typename T, int R>
__host__
void oa1d_gpu(
T *h_in,
const long int& num_samples,
const long int& num_repeats,
const Vector<T, R+1> &w ) {
const int B = WS;
// pre-compute basic alg1d matrices
Matrix<T,R,B> Zrb = zeros<T,R,B>();
Matrix<T,B,R> Zbr = zeros<T,B,R>();
Matrix<T,R,R> Ir = identity<T,R,R>();
Matrix<T,B,B> Ib = identity<T,B,B>();
Matrix<T,R,B> AFP_T = fwd(Ir, Zrb, w);
Matrix<T,R,B> ARE_T = rev(Zrb, Ir, w);
Matrix<T,B,B> ARB_T = rev(Ib, Zbr, w);
Matrix<T,R,R> AbF_T = tail<R>(AFP_T);
Matrix<T,R,R> AbR_T = head<R>(ARE_T);
Matrix<T,R,R> HARB_AFP_T = AFP_T*head<R>(ARB_T);
Vector<Matrix<FTYPE, FORDER, FORDER>, 10> v_AbF_T;
Vector<Matrix<FTYPE, FORDER, FORDER>, 10> v_AbR_T;
v_AbF_T[0] = AbF_T;
v_AbR_T[0] = AbR_T;
for (int i = 1; i < 10; ++i) {
v_AbF_T[i] = v_AbF_T[i-1] * v_AbF_T[i-1];
v_AbR_T[i] = v_AbR_T[i-1] * v_AbR_T[i-1];
}
// upload to the GPU
copy_to_symbol(c_weights, w);
copy_to_symbol(c_AbF_T, v_AbF_T);
copy_to_symbol(c_AbR_T, v_AbR_T);
copy_to_symbol(c_HARB_AFP_T, HARB_AFP_T);
dvector<T> d_in(h_in, num_samples), d_out(num_samples);
long int num_blocks = num_samples/(B*B);
dim3 grid(num_blocks);
dim3 block(WS, NW);
dvector< Vector<T, R> > d_pybar(num_blocks+1);
dvector< Vector<T, R> > d_ezhat(num_blocks+1);
d_pybar.fillzero();
d_ezhat.fillzero();
cudaDeviceSetSharedMemConfig(cudaSharedMemBankSizeFourByte);
cudaDeviceSetCacheConfig(cudaFuncCachePreferShared);
base_timer &timer_total = timers.gpu_add("paper_code", num_samples, "iP");
// first run to warm the GPU up
alg3_step1<<< grid, block >>>( &d_pybar, &d_ezhat, &d_in );
alg3_step2<<< dim3(2), dim3(WS) >>>( &d_pybar, &d_ezhat, num_blocks );
alg3_step3<<< grid, block >>>( &d_out, &d_pybar, &d_ezhat, &d_in );
for (int r = 0; r < num_repeats; ++r) {
alg3_step1<<< grid, block >>>( &d_pybar, &d_ezhat, &d_in );
alg3_step2<<< dim3(2), dim3(WS) >>>( &d_pybar, &d_ezhat, num_blocks );
alg3_step3<<< grid, block >>>( &d_out, &d_pybar, &d_ezhat, &d_in );
}
timer_total.stop();
if (num_repeats > 1) {
std::size_t proc_samples = timer_total.data_size()*num_repeats;
double time_sec_inv_mebi = timer_total.elapsed()*1024*1024;
std::cout << std::fixed << proc_samples/time_sec_inv_mebi << std::flush;
} else { // running for debugging
timers.flush();
Vector<T, R> *pybar = new Vector<T, R>[d_pybar.size()];
d_pybar.copy_to(pybar, d_pybar.size());
std::cout << std::fixed << std::flush;
print_array(pybar, 32, "d_pybar [:32]:");
}
d_out.copy_to(h_in, num_samples);
}
int main(int argc, char** argv) {
long int num_samples = 1 << 23, num_repeats = 1; // defaults
char array_bin_fn[200] = "../bin/random_array.bin";
if ((argc != 1 && argc != 4)
|| (argc==4 && (sscanf(argv[1], "%ld", &num_samples) != 1 ||
sscanf(argv[2], "%ld", &num_repeats) != 1 ||
sscanf(argv[3], "%s", array_bin_fn) != 1))) {
std::cerr << " Bad arguments!\n";
std::cerr << " Usage: " << argv[0]
<< " [num_samples num_repeats array_bin_fn] ->"
<< " Output: Mis/s MAE MRE\n";
std::cerr << " Where: num_samples = number of samples "
<< "in the 1D array to run this on (up to 1Gi)\n";
std::cerr << " Where: num_repeats = number of repetitions "
<< "to measure the run timing performance\n";
std::cerr << " Where: array_bin_fn = array of inputs in "
<< "binary to read 1D input data from\n";
std::cerr << " Where: Mis/s = Mebi samples per second; "
<< "MAE = max. abs. error; MRE = max. rel. error\n";
return EXIT_FAILURE;
}
// https://docs.nvidia.com/cuda/cuda-runtime-api/group__CUDART__DEVICE.html
if (num_repeats == 1) { // running for debugging
std::cout << get_cuda_device_properties();
}
Vector<FTYPE, FORDER+1> iir_weights;
FTYPE gaussian_sigma = 4.0;
weights(gaussian_sigma, iir_weights);
FTYPE *cpu_arr = new FTYPE[num_samples];
FTYPE *gpu_arr = new FTYPE[num_samples];
std::ifstream in_file(array_bin_fn, std::ios::binary);
in_file.read(reinterpret_cast<char*>(cpu_arr),
sizeof(FTYPE)*num_samples);
in_file.close();
memcpy(gpu_arr, cpu_arr, sizeof(FTYPE) * num_samples);
recursive_1d<0,true,FORDER>(cpu_arr, num_samples, iir_weights);
recursive_1d<0,false,FORDER>(cpu_arr, num_samples, iir_weights);
oa1d_gpu<FTYPE,FORDER>(
gpu_arr, num_samples, num_repeats, iir_weights);
FTYPE max_abs_err, max_rel_err;
check_cpu_reference(cpu_arr, gpu_arr, num_samples,
max_abs_err, max_rel_err);
if (num_repeats == 1) // running for debugging
std::cout << " [max-absolute-error] [max-relative-error]:";
std::cout << " " << std::scientific << max_abs_err << " "
<< std::scientific << max_rel_err << "\n";
if (cpu_arr) delete [] cpu_arr;
if (gpu_arr) delete [] gpu_arr;
return EXIT_SUCCESS;
}
|
a51f217cdb01145b7446910e0c3bad14908c6115.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//
// auto-generated by op2.py
//
//user function
__device__ void update_gpu( const float *qold, float *q, float *res, const float *adt,
float *rms) {
float del, adti;
float rmsl = 0.0f;
adti = 1.0f / (*adt);
for (int n = 0; n < 4; n++) {
del = adti * res[n];
q[n] = qold[n] - del;
res[n] = 0.0f;
rmsl += del * del;
}
*rms += rmsl;
}
// CUDA kernel function
__global__ void op_cuda_update(
const float *__restrict arg0,
float *arg1,
float *arg2,
const float *__restrict arg3,
float *arg4,
int set_size ) {
float arg4_l[1];
for ( int d=0; d<1; d++ ){
arg4_l[d]=ZERO_float;
}
//process set elements
for ( int n=threadIdx.x+blockIdx.x*blockDim.x; n<set_size; n+=blockDim.x*gridDim.x ){
//user-supplied kernel call
update_gpu(arg0+n*4,
arg1+n*4,
arg2+n*4,
arg3+n*1,
arg4_l);
}
//global reductions
for ( int d=0; d<1; d++ ){
op_reduction<OP_INC>(&arg4[d+blockIdx.x*1],arg4_l[d]);
}
}
//host stub function
void op_par_loop_update(char const *name, op_set set,
op_arg arg0,
op_arg arg1,
op_arg arg2,
op_arg arg3,
op_arg arg4){
float*arg4h = (float *)arg4.data;
int nargs = 5;
op_arg args[5];
args[0] = arg0;
args[1] = arg1;
args[2] = arg2;
args[3] = arg3;
args[4] = arg4;
// initialise timers
double cpu_t1, cpu_t2, wall_t1, wall_t2;
op_timing_realloc(4);
op_timers_core(&cpu_t1, &wall_t1);
OP_kernels[4].name = name;
OP_kernels[4].count += 1;
if (OP_diags>2) {
printf(" kernel routine w/o indirection: update");
}
op_mpi_halo_exchanges_cuda(set, nargs, args);
if (set->size > 0) {
//set CUDA execution parameters
#ifdef OP_BLOCK_SIZE_4
int nthread = OP_BLOCK_SIZE_4;
#else
int nthread = OP_block_size;
// int nthread = 128;
#endif
int nblocks = 200;
//transfer global reduction data to GPU
int maxblocks = nblocks;
int reduct_bytes = 0;
int reduct_size = 0;
reduct_bytes += ROUND_UP(maxblocks*1*sizeof(float));
reduct_size = MAX(reduct_size,sizeof(float));
reallocReductArrays(reduct_bytes);
reduct_bytes = 0;
arg4.data = OP_reduct_h + reduct_bytes;
arg4.data_d = OP_reduct_d + reduct_bytes;
for ( int b=0; b<maxblocks; b++ ){
for ( int d=0; d<1; d++ ){
((float *)arg4.data)[d+b*1] = ZERO_float;
}
}
reduct_bytes += ROUND_UP(maxblocks*1*sizeof(float));
mvReductArraysToDevice(reduct_bytes);
int nshared = reduct_size*nthread;
hipLaunchKernelGGL(( op_cuda_update), dim3(nblocks),dim3(nthread),nshared, 0,
(float *) arg0.data_d,
(float *) arg1.data_d,
(float *) arg2.data_d,
(float *) arg3.data_d,
(float *) arg4.data_d,
set->size );
//transfer global reduction data back to CPU
mvReductArraysToHost(reduct_bytes);
for ( int b=0; b<maxblocks; b++ ){
for ( int d=0; d<1; d++ ){
arg4h[d] = arg4h[d] + ((float *)arg4.data)[d+b*1];
}
}
arg4.data = (char *)arg4h;
op_mpi_reduce(&arg4,arg4h);
}
op_mpi_set_dirtybit_cuda(nargs, args);
cutilSafeCall(hipDeviceSynchronize());
//update kernel record
op_timers_core(&cpu_t2, &wall_t2);
OP_kernels[4].time += wall_t2 - wall_t1;
OP_kernels[4].transfer += (float)set->size * arg0.size;
OP_kernels[4].transfer += (float)set->size * arg1.size;
OP_kernels[4].transfer += (float)set->size * arg2.size * 2.0f;
OP_kernels[4].transfer += (float)set->size * arg3.size;
}
| a51f217cdb01145b7446910e0c3bad14908c6115.cu | //
// auto-generated by op2.py
//
//user function
__device__ void update_gpu( const float *qold, float *q, float *res, const float *adt,
float *rms) {
float del, adti;
float rmsl = 0.0f;
adti = 1.0f / (*adt);
for (int n = 0; n < 4; n++) {
del = adti * res[n];
q[n] = qold[n] - del;
res[n] = 0.0f;
rmsl += del * del;
}
*rms += rmsl;
}
// CUDA kernel function
__global__ void op_cuda_update(
const float *__restrict arg0,
float *arg1,
float *arg2,
const float *__restrict arg3,
float *arg4,
int set_size ) {
float arg4_l[1];
for ( int d=0; d<1; d++ ){
arg4_l[d]=ZERO_float;
}
//process set elements
for ( int n=threadIdx.x+blockIdx.x*blockDim.x; n<set_size; n+=blockDim.x*gridDim.x ){
//user-supplied kernel call
update_gpu(arg0+n*4,
arg1+n*4,
arg2+n*4,
arg3+n*1,
arg4_l);
}
//global reductions
for ( int d=0; d<1; d++ ){
op_reduction<OP_INC>(&arg4[d+blockIdx.x*1],arg4_l[d]);
}
}
//host stub function
void op_par_loop_update(char const *name, op_set set,
op_arg arg0,
op_arg arg1,
op_arg arg2,
op_arg arg3,
op_arg arg4){
float*arg4h = (float *)arg4.data;
int nargs = 5;
op_arg args[5];
args[0] = arg0;
args[1] = arg1;
args[2] = arg2;
args[3] = arg3;
args[4] = arg4;
// initialise timers
double cpu_t1, cpu_t2, wall_t1, wall_t2;
op_timing_realloc(4);
op_timers_core(&cpu_t1, &wall_t1);
OP_kernels[4].name = name;
OP_kernels[4].count += 1;
if (OP_diags>2) {
printf(" kernel routine w/o indirection: update");
}
op_mpi_halo_exchanges_cuda(set, nargs, args);
if (set->size > 0) {
//set CUDA execution parameters
#ifdef OP_BLOCK_SIZE_4
int nthread = OP_BLOCK_SIZE_4;
#else
int nthread = OP_block_size;
// int nthread = 128;
#endif
int nblocks = 200;
//transfer global reduction data to GPU
int maxblocks = nblocks;
int reduct_bytes = 0;
int reduct_size = 0;
reduct_bytes += ROUND_UP(maxblocks*1*sizeof(float));
reduct_size = MAX(reduct_size,sizeof(float));
reallocReductArrays(reduct_bytes);
reduct_bytes = 0;
arg4.data = OP_reduct_h + reduct_bytes;
arg4.data_d = OP_reduct_d + reduct_bytes;
for ( int b=0; b<maxblocks; b++ ){
for ( int d=0; d<1; d++ ){
((float *)arg4.data)[d+b*1] = ZERO_float;
}
}
reduct_bytes += ROUND_UP(maxblocks*1*sizeof(float));
mvReductArraysToDevice(reduct_bytes);
int nshared = reduct_size*nthread;
op_cuda_update<<<nblocks,nthread,nshared>>>(
(float *) arg0.data_d,
(float *) arg1.data_d,
(float *) arg2.data_d,
(float *) arg3.data_d,
(float *) arg4.data_d,
set->size );
//transfer global reduction data back to CPU
mvReductArraysToHost(reduct_bytes);
for ( int b=0; b<maxblocks; b++ ){
for ( int d=0; d<1; d++ ){
arg4h[d] = arg4h[d] + ((float *)arg4.data)[d+b*1];
}
}
arg4.data = (char *)arg4h;
op_mpi_reduce(&arg4,arg4h);
}
op_mpi_set_dirtybit_cuda(nargs, args);
cutilSafeCall(cudaDeviceSynchronize());
//update kernel record
op_timers_core(&cpu_t2, &wall_t2);
OP_kernels[4].time += wall_t2 - wall_t1;
OP_kernels[4].transfer += (float)set->size * arg0.size;
OP_kernels[4].transfer += (float)set->size * arg1.size;
OP_kernels[4].transfer += (float)set->size * arg2.size * 2.0f;
OP_kernels[4].transfer += (float)set->size * arg3.size;
}
|
0ad7dfac911342e74b4d35a2398ba5f1b531cc38.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "pointwise_hist2_one_byte_templ.cuh"
#include "split_properties_helpers.cuh"
#include <hip/hip_cooperative_groups.h>
#include <catboost/libs/cuda_wrappers/arch.cuh>
#include <catboost/cuda/cuda_util/kernel/instructions.cuh>
#include <catboost/cuda/cuda_util/kernel/kernel_helpers.cuh>
#include <catboost/libs/cuda_wrappers/arch.cuh>
using namespace cooperative_groups;
namespace NKernel
{
template <>
struct TLoadEntriesTrait<2, false> {
constexpr static ELoadType LoadType() {
#if __CUDA_ARCH__ < 520
return ELoadType::OneElement;
#else
return ELoadType::FourElements;
#endif
}
};
template <>
struct TLoadEntriesTrait<2, true> {
constexpr static ELoadType LoadType() {
#if __CUDA_ARCH__ < 520
return ELoadType::OneElement;
#elif __CUDA_ARCH__ < 700
return ELoadType::TwoElements;
#else
return ELoadType::FourElements;
#endif
}
};
template <>
struct TDeclarePassInnerOuterBitsTrait<2> {
constexpr static int Inner() {
return 2;
}
constexpr static int Outer() {
return 0;
}
};
template <int BLOCK_SIZE>
struct TPointHist<0, 2, BLOCK_SIZE> {
float* __restrict__ Buffer;
__forceinline__ __device__ int SliceOffset() {
const int warpId = (threadIdx.x / 32);
const int warpOffset = 1024 * warpId;
return warpOffset;
}
__forceinline__ __device__ TPointHist(float* buff) {
const int HIST_SIZE = 32 * BLOCK_SIZE;
#pragma unroll 8
for (int i = threadIdx.x; i < HIST_SIZE; i += BLOCK_SIZE) {
buff[i] = 0;
}
Buffer = buff + SliceOffset();
__syncthreads();
}
__forceinline__ __device__ void AddPoint(ui32 ci,
const float t,
const float w) {
thread_block_tile<32> syncTile = tiled_partition<32>(this_thread_block());
const bool flag = threadIdx.x & 1;
const float stat1 = flag ? t : w;
const float stat2 = flag ? w : t;
#pragma unroll
for (int i = 0; i < 4; i++) {
const int f = ((2 * i + threadIdx.x) & 6);
const int bin = (ci >> (24 - (f << 2))) & 255;
const float pass = bin != 128;
int offset = f;
offset += 8 * (bin & 127);
//
const int writeTime = (threadIdx.x >> 3) & 3;
const float val1 = pass * stat1;
offset += flag;
#pragma unroll
for (int k = 0; k < 4; ++k) {
if (k == writeTime) {
Buffer[offset] += val1;
}
syncTile.sync();
}
const float val2 = pass * stat2;
offset = flag ? offset - 1 : offset + 1;
#pragma unroll
for (int k = 0; k < 4; ++k) {
if (k == writeTime) {
Buffer[offset] += val2;
}
syncTile.sync();
}
}
}
#if __CUDA_ARCH__ < 700
__forceinline__ __device__ void AddPoint2(uint2 bin, const float2 t, const float2 w) {
AddPoint(bin.x, t.x, w.x);
AddPoint(bin.y, t.y, w.y);
}
#else
__forceinline__ __device__ void AddPoint2(uint2 ci, const float2 t, const float2 w) {
const bool flag = threadIdx.x & 1;
const float2 stat1 = flag ? t : w;
const float2 stat2 = flag ? w : t;
#pragma unroll
for (int i = 0; i < 4; i++) {
const int f = ((2 * i + threadIdx.x) & 6);
const int binx = (ci.x >> (24 - (f << 2))) & 255;
const int biny = (ci.y >> (24 - (f << 2))) & 255;
float* buffer = Buffer + f + flag;
int offsetx = 8 * (binx & 127);
int offsety = 8 * (biny & 127);
const bool passx = binx != 128;
const bool passy = biny != 128;
const float val1x = passx ? stat1.x : 0.0f;
const float val1y = passy ? stat1.y : 0.0f;
const float val2x = passx ? stat2.x : 0.0f;
const float val2y = passy ? stat2.y : 0.0f;
const int writeTime = (threadIdx.x >> 3) & 3;
#pragma unroll
for (int k = 0; k < 4; ++k) {
if (k == writeTime) {
buffer[offsetx] += val1x;
buffer[offsety] += val1y;
}
__syncwarp();
}
buffer += flag ? -1 : 1;
#pragma unroll
for (int k = 0; k < 4; ++k) {
if (k == writeTime) {
buffer[offsetx] += val2x;
buffer[offsety] += val2y;
}
__syncwarp();
}
}
}
#endif
__forceinline__ __device__ void AddPoint4(uint4 ci, const float4 t, const float4 w) {
thread_block_tile<32> syncTile = tiled_partition<32>(this_thread_block());
const bool flag = threadIdx.x & 1;
const float4 stat1 = flag ? t : w;
const float4 stat2 = flag ? w : t;
#pragma unroll
for (int i = 0; i < 4; i++) {
const int f = ((2 * i + threadIdx.x) & 6);
const int binx = (ci.x >> (24 - (f << 2))) & 255;
const int biny = (ci.y >> (24 - (f << 2))) & 255;
const int binz = (ci.z >> (24 - (f << 2))) & 255;
const int binw = (ci.w >> (24 - (f << 2))) & 255;
float* buffer = Buffer + f + flag;
int offsetx = 8 * (binx & 127);
int offsety = 8 * (biny & 127);
int offsetz = 8 * (binz & 127);
int offsetw = 8 * (binw & 127);
const bool passx = binx != 128;
const bool passy = biny != 128;
const bool passz = binz != 128;
const bool passw = binw != 128;
const float val1x = passx ? stat1.x : 0.0f;
const float val1y = passy ? stat1.y : 0.0f;
const float val1z = passz ? stat1.z : 0.0f;
const float val1w = passw ? stat1.w : 0.0f;
const float val2x = passx ? stat2.x : 0.0f;
const float val2y = passy ? stat2.y : 0.0f;
const float val2z = passz ? stat2.z : 0.0f;
const float val2w = passw ? stat2.w : 0.0f;
const int writeTime = (threadIdx.x >> 3) & 3;
#pragma unroll
for (int k = 0; k < 4; ++k) {
if (k == writeTime) {
buffer[offsetx] += val1x;
buffer[offsety] += val1y;
buffer[offsetz] += val1z;
buffer[offsetw] += val1w;
}
syncTile.sync();
}
buffer += flag ? - 1 : 1;
#pragma unroll
for (int k = 0; k < 4; ++k) {
if (k == writeTime) {
buffer[offsetx] += val2x;
buffer[offsety] += val2y;
buffer[offsetz] += val2z;
buffer[offsetw] += val2w;
}
syncTile.sync();
}
}
}
//After reduce we store histograms by blocks: 256 floats (4 x 2 x 32)
// for first 32 bins; than 256 floats for second 32 bins, etc
__forceinline__ __device__ void Reduce() {
Buffer -= SliceOffset();
__syncthreads();
{
const int warpHistSize = 1024;
for (int start = threadIdx.x; start < warpHistSize; start += BLOCK_SIZE) {
float sum = 0;
// 12 iterations at 32-bin
#pragma unroll 12
for (int i = start; i < 32 * BLOCK_SIZE; i += warpHistSize) {
sum += Buffer[i];
}
Buffer[warpHistSize + start] = sum;
}
}
__syncthreads();
if (threadIdx.x < 256) {
const int w = threadIdx.x & 1;
const int f = threadIdx.x / 64;
const int fold0 = (threadIdx.x >> 1) & 31;
const int maxFoldCount = 128;
{
const volatile float* __restrict__ src = Buffer
+ 1024 //warpHistSize
+ 2 * f
+ w;
#pragma unroll
for (int k = 0; k < 4; ++k) {
int fold = fold0 + 32 * k;
Buffer[2 * (maxFoldCount * f + fold) + w] = src[8 * fold];
}
}
}
__syncthreads();
}
};
DEFINE_NON_BINARY(7)
}
| 0ad7dfac911342e74b4d35a2398ba5f1b531cc38.cu | #include "pointwise_hist2_one_byte_templ.cuh"
#include "split_properties_helpers.cuh"
#include <cooperative_groups.h>
#include <catboost/libs/cuda_wrappers/arch.cuh>
#include <catboost/cuda/cuda_util/kernel/instructions.cuh>
#include <catboost/cuda/cuda_util/kernel/kernel_helpers.cuh>
#include <catboost/libs/cuda_wrappers/arch.cuh>
using namespace cooperative_groups;
namespace NKernel
{
template <>
struct TLoadEntriesTrait<2, false> {
constexpr static ELoadType LoadType() {
#if __CUDA_ARCH__ < 520
return ELoadType::OneElement;
#else
return ELoadType::FourElements;
#endif
}
};
template <>
struct TLoadEntriesTrait<2, true> {
constexpr static ELoadType LoadType() {
#if __CUDA_ARCH__ < 520
return ELoadType::OneElement;
#elif __CUDA_ARCH__ < 700
return ELoadType::TwoElements;
#else
return ELoadType::FourElements;
#endif
}
};
template <>
struct TDeclarePassInnerOuterBitsTrait<2> {
constexpr static int Inner() {
return 2;
}
constexpr static int Outer() {
return 0;
}
};
template <int BLOCK_SIZE>
struct TPointHist<0, 2, BLOCK_SIZE> {
float* __restrict__ Buffer;
__forceinline__ __device__ int SliceOffset() {
const int warpId = (threadIdx.x / 32);
const int warpOffset = 1024 * warpId;
return warpOffset;
}
__forceinline__ __device__ TPointHist(float* buff) {
const int HIST_SIZE = 32 * BLOCK_SIZE;
#pragma unroll 8
for (int i = threadIdx.x; i < HIST_SIZE; i += BLOCK_SIZE) {
buff[i] = 0;
}
Buffer = buff + SliceOffset();
__syncthreads();
}
__forceinline__ __device__ void AddPoint(ui32 ci,
const float t,
const float w) {
thread_block_tile<32> syncTile = tiled_partition<32>(this_thread_block());
const bool flag = threadIdx.x & 1;
const float stat1 = flag ? t : w;
const float stat2 = flag ? w : t;
#pragma unroll
for (int i = 0; i < 4; i++) {
const int f = ((2 * i + threadIdx.x) & 6);
const int bin = (ci >> (24 - (f << 2))) & 255;
const float pass = bin != 128;
int offset = f;
offset += 8 * (bin & 127);
//
const int writeTime = (threadIdx.x >> 3) & 3;
const float val1 = pass * stat1;
offset += flag;
#pragma unroll
for (int k = 0; k < 4; ++k) {
if (k == writeTime) {
Buffer[offset] += val1;
}
syncTile.sync();
}
const float val2 = pass * stat2;
offset = flag ? offset - 1 : offset + 1;
#pragma unroll
for (int k = 0; k < 4; ++k) {
if (k == writeTime) {
Buffer[offset] += val2;
}
syncTile.sync();
}
}
}
#if __CUDA_ARCH__ < 700
__forceinline__ __device__ void AddPoint2(uint2 bin, const float2 t, const float2 w) {
AddPoint(bin.x, t.x, w.x);
AddPoint(bin.y, t.y, w.y);
}
#else
__forceinline__ __device__ void AddPoint2(uint2 ci, const float2 t, const float2 w) {
const bool flag = threadIdx.x & 1;
const float2 stat1 = flag ? t : w;
const float2 stat2 = flag ? w : t;
#pragma unroll
for (int i = 0; i < 4; i++) {
const int f = ((2 * i + threadIdx.x) & 6);
const int binx = (ci.x >> (24 - (f << 2))) & 255;
const int biny = (ci.y >> (24 - (f << 2))) & 255;
float* buffer = Buffer + f + flag;
int offsetx = 8 * (binx & 127);
int offsety = 8 * (biny & 127);
const bool passx = binx != 128;
const bool passy = biny != 128;
const float val1x = passx ? stat1.x : 0.0f;
const float val1y = passy ? stat1.y : 0.0f;
const float val2x = passx ? stat2.x : 0.0f;
const float val2y = passy ? stat2.y : 0.0f;
const int writeTime = (threadIdx.x >> 3) & 3;
#pragma unroll
for (int k = 0; k < 4; ++k) {
if (k == writeTime) {
buffer[offsetx] += val1x;
buffer[offsety] += val1y;
}
__syncwarp();
}
buffer += flag ? -1 : 1;
#pragma unroll
for (int k = 0; k < 4; ++k) {
if (k == writeTime) {
buffer[offsetx] += val2x;
buffer[offsety] += val2y;
}
__syncwarp();
}
}
}
#endif
__forceinline__ __device__ void AddPoint4(uint4 ci, const float4 t, const float4 w) {
thread_block_tile<32> syncTile = tiled_partition<32>(this_thread_block());
const bool flag = threadIdx.x & 1;
const float4 stat1 = flag ? t : w;
const float4 stat2 = flag ? w : t;
#pragma unroll
for (int i = 0; i < 4; i++) {
const int f = ((2 * i + threadIdx.x) & 6);
const int binx = (ci.x >> (24 - (f << 2))) & 255;
const int biny = (ci.y >> (24 - (f << 2))) & 255;
const int binz = (ci.z >> (24 - (f << 2))) & 255;
const int binw = (ci.w >> (24 - (f << 2))) & 255;
float* buffer = Buffer + f + flag;
int offsetx = 8 * (binx & 127);
int offsety = 8 * (biny & 127);
int offsetz = 8 * (binz & 127);
int offsetw = 8 * (binw & 127);
const bool passx = binx != 128;
const bool passy = biny != 128;
const bool passz = binz != 128;
const bool passw = binw != 128;
const float val1x = passx ? stat1.x : 0.0f;
const float val1y = passy ? stat1.y : 0.0f;
const float val1z = passz ? stat1.z : 0.0f;
const float val1w = passw ? stat1.w : 0.0f;
const float val2x = passx ? stat2.x : 0.0f;
const float val2y = passy ? stat2.y : 0.0f;
const float val2z = passz ? stat2.z : 0.0f;
const float val2w = passw ? stat2.w : 0.0f;
const int writeTime = (threadIdx.x >> 3) & 3;
#pragma unroll
for (int k = 0; k < 4; ++k) {
if (k == writeTime) {
buffer[offsetx] += val1x;
buffer[offsety] += val1y;
buffer[offsetz] += val1z;
buffer[offsetw] += val1w;
}
syncTile.sync();
}
buffer += flag ? - 1 : 1;
#pragma unroll
for (int k = 0; k < 4; ++k) {
if (k == writeTime) {
buffer[offsetx] += val2x;
buffer[offsety] += val2y;
buffer[offsetz] += val2z;
buffer[offsetw] += val2w;
}
syncTile.sync();
}
}
}
//After reduce we store histograms by blocks: 256 floats (4 x 2 x 32)
// for first 32 bins; than 256 floats for second 32 bins, etc
__forceinline__ __device__ void Reduce() {
Buffer -= SliceOffset();
__syncthreads();
{
const int warpHistSize = 1024;
for (int start = threadIdx.x; start < warpHistSize; start += BLOCK_SIZE) {
float sum = 0;
// 12 iterations at 32-bin
#pragma unroll 12
for (int i = start; i < 32 * BLOCK_SIZE; i += warpHistSize) {
sum += Buffer[i];
}
Buffer[warpHistSize + start] = sum;
}
}
__syncthreads();
if (threadIdx.x < 256) {
const int w = threadIdx.x & 1;
const int f = threadIdx.x / 64;
const int fold0 = (threadIdx.x >> 1) & 31;
const int maxFoldCount = 128;
{
const volatile float* __restrict__ src = Buffer
+ 1024 //warpHistSize
+ 2 * f
+ w;
#pragma unroll
for (int k = 0; k < 4; ++k) {
int fold = fold0 + 32 * k;
Buffer[2 * (maxFoldCount * f + fold) + w] = src[8 * fold];
}
}
}
__syncthreads();
}
};
DEFINE_NON_BINARY(7)
}
|
d156ea3ae43c4ed506439e1d65aeeeb9e0c49369.hip | // !!! This is a file automatically generated by hipify!!!
#include <cudnn.h>
#include <stdio.h>
#include <hip/hip_runtime.h>
#include <malloc.h>
#include <cstdlib>
#include <time.h>
#include <iostream>
#include <sys/types.h>
#include <errno.h>
#include <vector>
#include <fstream>
#include <string>
#include <omp.h>
#define TH 1
#define TW 4
#define TC 16
#define C 64
#define N 64
#define H 56
#define W 56
#define TCS ((C-1)/TC + 1)
#define THS ((H-1)/TH + 1)
#define TWS ((W-1)/TW+1)
#define WPAD (TWS*TW + 2)
#define R 3
#define S 3
using namespace std;
#define checkCUDNN(expression) \
{ \
cudnnStatus_t status = (expression); \
if (status != CUDNN_STATUS_SUCCESS) { \
std::cerr << "Error on line " << __LINE__ << ": " \
<< cudnnGetErrorString(status) << std::endl; \
std::exit(EXIT_FAILURE); \
} \
}
inline void chkerr(hipError_t code)
{
if (code != hipSuccess)
{
std::cerr << "ERROR!!!:" << hipGetErrorString(code) <<endl;
exit(-1);
}
}
extern "C" __global__ void default_function_kernel0(float* __restrict__ data, float* __restrict__ kernel, float* __restrict__ compute) {
float compute_local[16];
__shared__ float pad_temp_shared[2784];
__shared__ float kernel_shared[1152];
float pad_temp_shared_local[32];
float kernel_shared_local[32];
#pragma unroll
for (int ff_c_init = 0; ff_c_init < 2; ++ff_c_init) {
compute_local[(ff_c_init)] = 0.000000e+00f;
compute_local[((ff_c_init + 8))] = 0.000000e+00f;
compute_local[((ff_c_init + 2))] = 0.000000e+00f;
compute_local[((ff_c_init + 10))] = 0.000000e+00f;
compute_local[((ff_c_init + 4))] = 0.000000e+00f;
compute_local[((ff_c_init + 12))] = 0.000000e+00f;
compute_local[((ff_c_init + 6))] = 0.000000e+00f;
compute_local[((ff_c_init + 14))] = 0.000000e+00f;
}
for (int rc_outer = 0; rc_outer < 8; ++rc_outer) {
__syncthreads();
#pragma unroll
for (int ax0_ax1_fused_ax2_fused_ax3_fused_inner_inner_inner = 0; ax0_ax1_fused_ax2_fused_ax3_fused_inner_inner_inner < 13; ++ax0_ax1_fused_ax2_fused_ax3_fused_inner_inner_inner) {
if (((((int)threadIdx.z) * 2) + (((((int)threadIdx.y) * 13) + ax0_ax1_fused_ax2_fused_ax3_fused_inner_inner_inner) / 348)) < 8) {
if (((((int)threadIdx.z) * 116) + (((((int)threadIdx.y) * 13) + ax0_ax1_fused_ax2_fused_ax3_fused_inner_inner_inner) / 6)) < 464) {
if ((((((int)threadIdx.z) * 696) + (((int)threadIdx.y) * 13)) + ax0_ax1_fused_ax2_fused_ax3_fused_inner_inner_inner) < 2784) {
if (((((int)threadIdx.y) * 13) + ax0_ax1_fused_ax2_fused_ax3_fused_inner_inner_inner) < 696) {
pad_temp_shared[((((((int)threadIdx.z) * 696) + (((int)threadIdx.y) * 13)) + ax0_ax1_fused_ax2_fused_ax3_fused_inner_inner_inner))] = (((((6 <= (((((int)threadIdx.y) * 13) + ax0_ax1_fused_ax2_fused_ax3_fused_inner_inner_inner) % 348)) && ((((((int)threadIdx.y) * 13) + ax0_ax1_fused_ax2_fused_ax3_fused_inner_inner_inner) % 348) < 342)) && (1 <= ((((int)blockIdx.x) * 4) + (((((int)threadIdx.y) * 13) + ax0_ax1_fused_ax2_fused_ax3_fused_inner_inner_inner) % 6)))) && (((((int)blockIdx.x) * 4) + (((((int)threadIdx.y) * 13) + ax0_ax1_fused_ax2_fused_ax3_fused_inner_inner_inner) % 6)) < 57)) ? data[((((((((rc_outer * 25088) + (((int)threadIdx.z) * 6272)) + ((((((int)threadIdx.y) * 13) + ax0_ax1_fused_ax2_fused_ax3_fused_inner_inner_inner) / 348) * 3136)) + (((((((int)threadIdx.y) * 13) + ax0_ax1_fused_ax2_fused_ax3_fused_inner_inner_inner) % 348) / 6) * 56)) + (((int)blockIdx.x) * 4)) + (((((int)threadIdx.y) * 13) + ax0_ax1_fused_ax2_fused_ax3_fused_inner_inner_inner) % 6)) - 57))] : 0.000000e+00f);
}
}
}
}
}
#pragma unroll
for (int ax0_ax1_fused_ax2_fused_ax3_fused_inner_inner_inner1 = 0; ax0_ax1_fused_ax2_fused_ax3_fused_inner_inner_inner1 < 6; ++ax0_ax1_fused_ax2_fused_ax3_fused_inner_inner_inner1) {
if (((((int)threadIdx.z) * 4) + (((((int)threadIdx.y) * 2) + (ax0_ax1_fused_ax2_fused_ax3_fused_inner_inner_inner1 / 3)) / 24)) < 16) {
if (((((int)threadIdx.z) * 32) + (((((int)threadIdx.y) * 2) + (ax0_ax1_fused_ax2_fused_ax3_fused_inner_inner_inner1 / 3)) / 3)) < 128) {
if ((((((int)threadIdx.z) * 96) + (((int)threadIdx.y) * 2)) + (ax0_ax1_fused_ax2_fused_ax3_fused_inner_inner_inner1 / 3)) < 384) {
if ((((((int)threadIdx.z) * 288) + (((int)threadIdx.y) * 6)) + ax0_ax1_fused_ax2_fused_ax3_fused_inner_inner_inner1) < 1152) {
if (((((int)threadIdx.y) * 6) + ax0_ax1_fused_ax2_fused_ax3_fused_inner_inner_inner1) < 288) {
if ((((((int)blockIdx.z) * 16) + (((int)threadIdx.z) * 4)) + (((((int)threadIdx.y) * 2) + (ax0_ax1_fused_ax2_fused_ax3_fused_inner_inner_inner1 / 3)) / 24)) < 64) {
kernel_shared[((((((int)threadIdx.z) * 288) + (((int)threadIdx.y) * 6)) + ax0_ax1_fused_ax2_fused_ax3_fused_inner_inner_inner1))] = kernel[(((((((((int)blockIdx.z) * 9216) + (((int)threadIdx.z) * 2304)) + ((((((int)threadIdx.y) * 2) + (ax0_ax1_fused_ax2_fused_ax3_fused_inner_inner_inner1 / 3)) / 24) * 576)) + (rc_outer * 72)) + ((((((int)threadIdx.y) * 2) + (ax0_ax1_fused_ax2_fused_ax3_fused_inner_inner_inner1 / 3)) % 24) * 3)) + (ax0_ax1_fused_ax2_fused_ax3_fused_inner_inner_inner1 % 3)))];
}
}
}
}
}
}
}
__syncthreads();
for (int ry_inner_outer = 0; ry_inner_outer < 3; ++ry_inner_outer) {
#pragma unroll
for (int rx_inner_outer = 0; rx_inner_outer < 3; ++rx_inner_outer) {
#pragma unroll
for (int ax1 = 0; ax1 < 8; ++ax1) {
pad_temp_shared_local[(ax1)] = pad_temp_shared[(((((ax1 * 348) + (((int)threadIdx.y) * 6)) + (ry_inner_outer * 6)) + rx_inner_outer))];
pad_temp_shared_local[((ax1 + 8))] = pad_temp_shared[((((((ax1 * 348) + (((int)threadIdx.y) * 6)) + (ry_inner_outer * 6)) + rx_inner_outer) + 1))];
pad_temp_shared_local[((ax1 + 16))] = pad_temp_shared[((((((ax1 * 348) + (((int)threadIdx.y) * 6)) + (ry_inner_outer * 6)) + rx_inner_outer) + 2))];
pad_temp_shared_local[((ax1 + 24))] = pad_temp_shared[((((((ax1 * 348) + (((int)threadIdx.y) * 6)) + (ry_inner_outer * 6)) + rx_inner_outer) + 3))];
}
#pragma unroll
for (int ax0 = 0; ax0 < 2; ++ax0) {
#pragma unroll
for (int ax11 = 0; ax11 < 8; ++ax11) {
kernel_shared_local[(((ax0 * 8) + ax11))] = kernel_shared[((((((((int)threadIdx.z) * 144) + (ax0 * 72)) + (ax11 * 9)) + (ry_inner_outer * 3)) + rx_inner_outer))];
kernel_shared_local[((((ax0 * 8) + ax11) + 16))] = kernel_shared[(((((((((int)threadIdx.z) * 144) + (ax0 * 72)) + (ax11 * 9)) + (ry_inner_outer * 3)) + rx_inner_outer) + 576))];
}
}
#pragma unroll
for (int rc_inner_inner = 0; rc_inner_inner < 8; ++rc_inner_inner) {
#pragma unroll
for (int ff_c = 0; ff_c < 2; ++ff_c) {
compute_local[(ff_c)] = (compute_local[(ff_c)] + (pad_temp_shared_local[(rc_inner_inner)] * kernel_shared_local[(((ff_c * 8) + rc_inner_inner))]));
compute_local[((ff_c + 8))] = (compute_local[((ff_c + 8))] + (pad_temp_shared_local[(rc_inner_inner)] * kernel_shared_local[((((ff_c * 8) + rc_inner_inner) + 16))]));
compute_local[((ff_c + 2))] = (compute_local[((ff_c + 2))] + (pad_temp_shared_local[((rc_inner_inner + 8))] * kernel_shared_local[(((ff_c * 8) + rc_inner_inner))]));
compute_local[((ff_c + 10))] = (compute_local[((ff_c + 10))] + (pad_temp_shared_local[((rc_inner_inner + 8))] * kernel_shared_local[((((ff_c * 8) + rc_inner_inner) + 16))]));
compute_local[((ff_c + 4))] = (compute_local[((ff_c + 4))] + (pad_temp_shared_local[((rc_inner_inner + 16))] * kernel_shared_local[(((ff_c * 8) + rc_inner_inner))]));
compute_local[((ff_c + 12))] = (compute_local[((ff_c + 12))] + (pad_temp_shared_local[((rc_inner_inner + 16))] * kernel_shared_local[((((ff_c * 8) + rc_inner_inner) + 16))]));
compute_local[((ff_c + 6))] = (compute_local[((ff_c + 6))] + (pad_temp_shared_local[((rc_inner_inner + 24))] * kernel_shared_local[(((ff_c * 8) + rc_inner_inner))]));
compute_local[((ff_c + 14))] = (compute_local[((ff_c + 14))] + (pad_temp_shared_local[((rc_inner_inner + 24))] * kernel_shared_local[((((ff_c * 8) + rc_inner_inner) + 16))]));
}
}
}
}
}
#pragma unroll
for (int ff_inner_inner_inner = 0; ff_inner_inner_inner < 2; ++ff_inner_inner_inner) {
compute[((((((((int)blockIdx.z) * 50176) + (((int)threadIdx.z) * 6272)) + (ff_inner_inner_inner * 3136)) + (((int)threadIdx.y) * 56)) + (((int)blockIdx.x) * 4)))] = compute_local[(ff_inner_inner_inner)];
compute[(((((((((int)blockIdx.z) * 50176) + (((int)threadIdx.z) * 6272)) + (ff_inner_inner_inner * 3136)) + (((int)threadIdx.y) * 56)) + (((int)blockIdx.x) * 4)) + 25088))] = compute_local[((ff_inner_inner_inner + 8))];
compute[(((((((((int)blockIdx.z) * 50176) + (((int)threadIdx.z) * 6272)) + (ff_inner_inner_inner * 3136)) + (((int)threadIdx.y) * 56)) + (((int)blockIdx.x) * 4)) + 1))] = compute_local[((ff_inner_inner_inner + 2))];
compute[(((((((((int)blockIdx.z) * 50176) + (((int)threadIdx.z) * 6272)) + (ff_inner_inner_inner * 3136)) + (((int)threadIdx.y) * 56)) + (((int)blockIdx.x) * 4)) + 25089))] = compute_local[((ff_inner_inner_inner + 10))];
compute[(((((((((int)blockIdx.z) * 50176) + (((int)threadIdx.z) * 6272)) + (ff_inner_inner_inner * 3136)) + (((int)threadIdx.y) * 56)) + (((int)blockIdx.x) * 4)) + 2))] = compute_local[((ff_inner_inner_inner + 4))];
compute[(((((((((int)blockIdx.z) * 50176) + (((int)threadIdx.z) * 6272)) + (ff_inner_inner_inner * 3136)) + (((int)threadIdx.y) * 56)) + (((int)blockIdx.x) * 4)) + 25090))] = compute_local[((ff_inner_inner_inner + 12))];
compute[(((((((((int)blockIdx.z) * 50176) + (((int)threadIdx.z) * 6272)) + (ff_inner_inner_inner * 3136)) + (((int)threadIdx.y) * 56)) + (((int)blockIdx.x) * 4)) + 3))] = compute_local[((ff_inner_inner_inner + 6))];
compute[(((((((((int)blockIdx.z) * 50176) + (((int)threadIdx.z) * 6272)) + (ff_inner_inner_inner * 3136)) + (((int)threadIdx.y) * 56)) + (((int)blockIdx.x) * 4)) + 25091))] = compute_local[((ff_inner_inner_inner + 14))];
}
}
class ConvGemm{
public:
float *cpuKernel;
float alpha = 1.0f;
float beta = 0.0f;
cudnnHandle_t convCudnn;
void* d_workspace{nullptr};
size_t workspace_bytes{0};
cudnnTensorDescriptor_t convInputDescriptor;
cudnnTensorDescriptor_t convOutputDescriptor;
cudnnFilterDescriptor_t convKernelDescriptor;
cudnnConvolutionDescriptor_t convDesc;
float *output;
float *kernel;
void initialize();
float *forward(float *input);
};
void ConvGemm::initialize(){
hipMalloc(&kernel,sizeof(float)*C*N*9);
hipMalloc(&this->output,sizeof(float)*N*H*W);
cudnnCreate(&convCudnn);
cudnnCreateTensorDescriptor(&convInputDescriptor);
cudnnSetTensor4dDescriptor(convInputDescriptor,
/*format=*/CUDNN_TENSOR_NCHW,
/*dataType=*/CUDNN_DATA_FLOAT,
/*batch_size=*/1,
/*channels=*/C,
/*image_height=*/H,
/*image_width=*/W);
cudnnCreateFilterDescriptor(&convKernelDescriptor);
cudnnSetFilter4dDescriptor(convKernelDescriptor,
/*dataType=*/CUDNN_DATA_FLOAT,
/*format=*/CUDNN_TENSOR_NCHW,
/*out_channels=*/N,
/*in_channels=*/C,
/*kernel_height=*/R,
/*kernel_width=*/S);
cudnnCreateConvolutionDescriptor(&convDesc);
cudnnSetConvolution2dDescriptor(convDesc,
/*pad_height=*/1,
/*pad_width=*/1,
/*vertical_stride=*/1,
/*horizontal_stride=*/1,
/*dilation_height=*/1,
/*dilation_width=*/1,
/*mode=*/CUDNN_CROSS_CORRELATION,
CUDNN_DATA_FLOAT);
int batch_size{0}, channels{0}, height{0}, width{0};
cudnnGetConvolution2dForwardOutputDim(convDesc,
convInputDescriptor,
convKernelDescriptor,
&batch_size,
&channels,
&height,
&width);
cudnnCreateTensorDescriptor(&convOutputDescriptor);
cudnnSetTensor4dDescriptor(convOutputDescriptor,
/*format=*/CUDNN_TENSOR_NCHW,
/*dataType=*/CUDNN_DATA_FLOAT,
/*batch_size=*/1,
/*channels=*/N,
/*image_height=*/H,
/*image_width=*/W);
cudnnGetConvolutionForwardWorkspaceSize(convCudnn,
convInputDescriptor,
convKernelDescriptor,
convDesc,
convOutputDescriptor,
CUDNN_CONVOLUTION_FWD_ALGO_IMPLICIT_GEMM,
&workspace_bytes);
hipMalloc(&d_workspace, workspace_bytes);
unsigned int kernelSize = R*S*C*N;//kernel
this->cpuKernel = (float *)malloc(kernelSize*sizeof(float));
for(int i=0;i<kernelSize;++i){
this->cpuKernel[i] = 1.0f;
}
hipMemcpy(kernel,cpuKernel,R*S*C*N*sizeof(float),hipMemcpyHostToDevice);
free(cpuKernel);
}
float * ConvGemm::forward(float *input) {
hipMemset(output, 0, 1*N*H*W*sizeof(float));
checkCUDNN(cudnnConvolutionForward(convCudnn,
&alpha,
convInputDescriptor,
input,
convKernelDescriptor,
kernel,
convDesc,
CUDNN_CONVOLUTION_FWD_ALGO_IMPLICIT_GEMM,
d_workspace,
workspace_bytes,
&beta,
convOutputDescriptor,
output));
return output;
}
class ConvWinogradeNon{
public:
float *cpuKernel;
float alpha = 1.0f;
float beta = 0.0f;
cudnnHandle_t convCudnn;
void* d_workspace{nullptr};
size_t workspace_bytes{0};
cudnnTensorDescriptor_t convInputDescriptor;
cudnnTensorDescriptor_t convOutputDescriptor;
cudnnFilterDescriptor_t convKernelDescriptor;
cudnnConvolutionDescriptor_t convDesc;
float *output;
float *kernel;
void initialize();
float *forward(float *input);
};
void ConvWinogradeNon::initialize(){
hipMalloc(&kernel,sizeof(float)*C*N*9);
hipMalloc(&this->output,sizeof(float)*N*H*W);
cudnnCreate(&convCudnn);
cudnnCreateTensorDescriptor(&convInputDescriptor);
cudnnSetTensor4dDescriptor(convInputDescriptor,
/*format=*/CUDNN_TENSOR_NCHW,
/*dataType=*/CUDNN_DATA_FLOAT,
/*batch_size=*/1,
/*channels=*/C,
/*image_height=*/H,
/*image_width=*/W);
cudnnCreateFilterDescriptor(&convKernelDescriptor);
cudnnSetFilter4dDescriptor(convKernelDescriptor,
/*dataType=*/CUDNN_DATA_FLOAT,
/*format=*/CUDNN_TENSOR_NCHW,
/*out_channels=*/N,
/*in_channels=*/C,
/*kernel_height=*/R,
/*kernel_width=*/S);
cudnnCreateConvolutionDescriptor(&convDesc);
cudnnSetConvolution2dDescriptor(convDesc,
/*pad_height=*/1,
/*pad_width=*/1,
/*vertical_stride=*/1,
/*horizontal_stride=*/1,
/*dilation_height=*/1,
/*dilation_width=*/1,
/*mode=*/CUDNN_CROSS_CORRELATION,
CUDNN_DATA_FLOAT);
int batch_size{0}, channels{0}, height{0}, width{0};
cudnnGetConvolution2dForwardOutputDim(convDesc,
convInputDescriptor,
convKernelDescriptor,
&batch_size,
&channels,
&height,
&width);
cudnnCreateTensorDescriptor(&convOutputDescriptor);
cudnnSetTensor4dDescriptor(convOutputDescriptor,
/*format=*/CUDNN_TENSOR_NCHW,
/*dataType=*/CUDNN_DATA_FLOAT,
/*batch_size=*/1,
/*channels=*/N,
/*image_height=*/H,
/*image_width=*/W);
cudnnGetConvolutionForwardWorkspaceSize(convCudnn,
convInputDescriptor,
convKernelDescriptor,
convDesc,
convOutputDescriptor,
CUDNN_CONVOLUTION_FWD_ALGO_WINOGRAD_NONFUSED,
&workspace_bytes);
hipMalloc(&d_workspace, workspace_bytes);
unsigned int kernelSize = R*S*C*N;//kernel
this->cpuKernel = (float *)malloc(kernelSize*sizeof(float));
for(int i=0;i<kernelSize;++i){
this->cpuKernel[i] = 1.0f;
}
hipMemcpy(kernel,cpuKernel,R*S*C*N*sizeof(float),hipMemcpyHostToDevice);
free(cpuKernel);
}
float * ConvWinogradeNon::forward(float *input) {
hipMemset(output, 0, 1*N*H*W*sizeof(float));
checkCUDNN(cudnnConvolutionForward(convCudnn,
&alpha,
convInputDescriptor,
input,
convKernelDescriptor,
kernel,
convDesc,
CUDNN_CONVOLUTION_FWD_ALGO_WINOGRAD_NONFUSED,
d_workspace,
workspace_bytes,
&beta,
convOutputDescriptor,
output));
return output;
}
class ConvFFT{
public:
float *cpuKernel;
float alpha = 1.0f;
float beta = 0.0f;
cudnnHandle_t convCudnn;
void* d_workspace{nullptr};
size_t workspace_bytes{0};
cudnnTensorDescriptor_t convInputDescriptor;
cudnnTensorDescriptor_t convOutputDescriptor;
cudnnFilterDescriptor_t convKernelDescriptor;
cudnnConvolutionDescriptor_t convDesc;
float *output;
float *kernel;
void initialize();
float *forward(float *input);
};
void ConvFFT::initialize(){
hipMalloc(&kernel,sizeof(float)*C*N*9);
hipMalloc(&this->output,sizeof(float)*N*H*W);
cudnnCreate(&convCudnn);
cudnnCreateTensorDescriptor(&convInputDescriptor);
cudnnSetTensor4dDescriptor(convInputDescriptor,
/*format=*/CUDNN_TENSOR_NCHW,
/*dataType=*/CUDNN_DATA_FLOAT,
/*batch_size=*/1,
/*channels=*/C,
/*image_height=*/H,
/*image_width=*/W);
cudnnCreateFilterDescriptor(&convKernelDescriptor);
cudnnSetFilter4dDescriptor(convKernelDescriptor,
/*dataType=*/CUDNN_DATA_FLOAT,
/*format=*/CUDNN_TENSOR_NCHW,
/*out_channels=*/N,
/*in_channels=*/C,
/*kernel_height=*/R,
/*kernel_width=*/S);
cudnnCreateConvolutionDescriptor(&convDesc);
cudnnSetConvolution2dDescriptor(convDesc,
/*pad_height=*/1,
/*pad_width=*/1,
/*vertical_stride=*/1,
/*horizontal_stride=*/1,
/*dilation_height=*/1,
/*dilation_width=*/1,
/*mode=*/CUDNN_CROSS_CORRELATION,
CUDNN_DATA_FLOAT);
int batch_size{0}, channels{0}, height{0}, width{0};
cudnnGetConvolution2dForwardOutputDim(convDesc,
convInputDescriptor,
convKernelDescriptor,
&batch_size,
&channels,
&height,
&width);
cudnnCreateTensorDescriptor(&convOutputDescriptor);
cudnnSetTensor4dDescriptor(convOutputDescriptor,
/*format=*/CUDNN_TENSOR_NCHW,
/*dataType=*/CUDNN_DATA_FLOAT,
/*batch_size=*/1,
/*channels=*/N,
/*image_height=*/H,
/*image_width=*/W);
cudnnGetConvolutionForwardWorkspaceSize(convCudnn,
convInputDescriptor,
convKernelDescriptor,
convDesc,
convOutputDescriptor,
CUDNN_CONVOLUTION_FWD_ALGO_FFT,
&workspace_bytes);
hipMalloc(&d_workspace, workspace_bytes);
unsigned int kernelSize = R*S*C*N;//kernel
this->cpuKernel = (float *)malloc(kernelSize*sizeof(float));
for(int i=0;i<kernelSize;++i){
this->cpuKernel[i] = 1.0f;
}
hipMemcpy(kernel,cpuKernel,R*S*C*N*sizeof(float),hipMemcpyHostToDevice);
free(cpuKernel);
}
float * ConvFFT::forward(float *input) {
hipMemset(output, 0, 1*N*H*W*sizeof(float));
checkCUDNN(cudnnConvolutionForward(convCudnn,
&alpha,
convInputDescriptor,
input,
convKernelDescriptor,
kernel,
convDesc,
CUDNN_CONVOLUTION_FWD_ALGO_FFT,
d_workspace,
workspace_bytes,
&beta,
convOutputDescriptor,
output));
return output;
}
__device__ void load_input_2_shared_memory(float *input, float *shared_input, unsigned int h_start,
unsigned int h_end, unsigned int h_offset, unsigned int c_start,
unsigned int warp_id, unsigned int lane_id, unsigned int warp_size){
switch(h_offset){
case 0:
for(unsigned int c = warp_id; c<TC; c+=TWS){
for(unsigned int i=lane_id; i<(h_end - h_start) * W; i+=warp_size){
unsigned int r = i/W;
unsigned int s = i%W;
shared_input[c*(TH + 2)*(WPAD) + r * WPAD + s + 1] = input[(c_start + c) * H * W + h_start * W + i];
}
}
break;
case 1:
for(unsigned int c = warp_id; c<TC; c+=TWS){
for(unsigned int i=lane_id; i<(h_end - h_start) * W; i+=warp_size){
unsigned int r = i/W;
unsigned int s = i%W;
shared_input[c*(TH + 2)*(WPAD) + (1 + r) * WPAD + s + 1] = input[(c_start + c) * H * W + h_start * W + i];
}
}
break;
}
}
__device__ __forceinline__ void switch_write_back(unsigned int write_h, unsigned int write_w, unsigned int h_out_start, unsigned int w_out_start, unsigned int n, float * outputs, float * temp_result){
switch(write_h){
case 1:
switch(write_w){
case 1:
#pragma unroll
for (unsigned int th = 0; th < 1; ++th) {
#pragma unroll
for (unsigned int tw = 0; tw < 1; ++tw) {
atomicAdd(&outputs[n*H*W+(h_out_start + th) * W+(w_out_start + tw)],temp_result[(th * TW + tw)]);
}
}
break;
case 2:
#pragma unroll
for (unsigned int th = 0; th < 1; ++th) {
#pragma unroll
for (unsigned int tw = 0; tw < 2; ++tw) {
atomicAdd(&outputs[n*H*W+(h_out_start + th) * W+(w_out_start + tw)],temp_result[(th * TW + tw)]);
}
}
break;
case 3:
#pragma unroll
for (unsigned int th = 0; th < 1; ++th) {
#pragma unroll
for (unsigned int tw = 0; tw < 3; ++tw) {
atomicAdd(&outputs[n*H*W+(h_out_start + th) * W+(w_out_start + tw)],temp_result[(th * TW + tw)]);
}
}
break;
case 4:
#pragma unroll
for (unsigned int th = 0; th < 1; ++th) {
#pragma unroll
for (unsigned int tw = 0; tw < 4; ++tw) {
atomicAdd(&outputs[n*H*W+(h_out_start + th) * W+(w_out_start + tw)],temp_result[(th * TW + tw)]);
}
}
break;
}
break;
}
}
__global__ void conv2d(float * __restrict__ input,const float * __restrict__ kernel, float * __restrict__ outputs){
extern __shared__ float shared_input[];
const unsigned int tile_id = blockIdx.x;
const unsigned int tc_id = tile_id / THS;
const unsigned int th_id = tile_id % THS;
const unsigned int tw_id = threadIdx.x / N;
const int h_out_start = th_id * TH;
const int w_out_start = tw_id * TW;
const unsigned int warp_id = tw_id;
const unsigned int lane_id = threadIdx.x % N;
float data_array[9];
float temp_result[TH*TW] = {0.0f};
for(unsigned int i=threadIdx.x;i<TC*(TH+2)*WPAD;i+=blockDim.x){
shared_input[i] = 0.0f;
}
unsigned int n = lane_id;
unsigned int c_offset = tc_id * TC;
int h_offset = (h_out_start == 0)?1:0;
int h_padded_start = h_out_start;
int h_padded_end = min(h_padded_start + TH + 2, H + 2);
int h_non_padded_start = max(h_out_start - 1, 0);
int h_non_padded_end = min(H, h_padded_end - 1);
__syncthreads();
load_input_2_shared_memory(input, shared_input, h_non_padded_start, h_non_padded_end, h_offset, c_offset, warp_id, lane_id, N);
__syncthreads();
#pragma unroll
for(unsigned int c=0;c<TC;c++){
#pragma unroll
for(unsigned int r=0;r<R;++r){
#pragma unroll
for(unsigned int s=0;s<S;++s){
data_array[r*S+s] = kernel[(c + c_offset)*N*9+r*3*N+s*N+n];
}
}
temp_result[0] += shared_input[c*(TH+2)*(WPAD) + 0 * WPAD + tw_id * TW + 0]*data_array[0];
temp_result[1] += shared_input[c*(TH+2)*(WPAD) + 0 * WPAD + tw_id * TW + 1]*data_array[0];
temp_result[0] += shared_input[c*(TH+2)*(WPAD) + 0 * WPAD + tw_id * TW + 1]*data_array[1];
temp_result[2] += shared_input[c*(TH+2)*(WPAD) + 0 * WPAD + tw_id * TW + 2]*data_array[0];
temp_result[1] += shared_input[c*(TH+2)*(WPAD) + 0 * WPAD + tw_id * TW + 2]*data_array[1];
temp_result[0] += shared_input[c*(TH+2)*(WPAD) + 0 * WPAD + tw_id * TW + 2]*data_array[2];
temp_result[3] += shared_input[c*(TH+2)*(WPAD) + 0 * WPAD + tw_id * TW + 3]*data_array[0];
temp_result[2] += shared_input[c*(TH+2)*(WPAD) + 0 * WPAD + tw_id * TW + 3]*data_array[1];
temp_result[1] += shared_input[c*(TH+2)*(WPAD) + 0 * WPAD + tw_id * TW + 3]*data_array[2];
temp_result[3] += shared_input[c*(TH+2)*(WPAD) + 0 * WPAD + tw_id * TW + 4]*data_array[1];
temp_result[2] += shared_input[c*(TH+2)*(WPAD) + 0 * WPAD + tw_id * TW + 4]*data_array[2];
temp_result[3] += shared_input[c*(TH+2)*(WPAD) + 0 * WPAD + tw_id * TW + 5]*data_array[2];
temp_result[0] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 0]*data_array[3];
temp_result[1] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 1]*data_array[3];
temp_result[0] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 1]*data_array[4];
temp_result[2] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 2]*data_array[3];
temp_result[1] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 2]*data_array[4];
temp_result[0] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 2]*data_array[5];
temp_result[3] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 3]*data_array[3];
temp_result[2] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 3]*data_array[4];
temp_result[1] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 3]*data_array[5];
temp_result[3] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 4]*data_array[4];
temp_result[2] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 4]*data_array[5];
temp_result[3] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 5]*data_array[5];
temp_result[0] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 0]*data_array[6];
temp_result[1] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 1]*data_array[6];
temp_result[0] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 1]*data_array[7];
temp_result[2] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 2]*data_array[6];
temp_result[1] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 2]*data_array[7];
temp_result[0] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 2]*data_array[8];
temp_result[3] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 3]*data_array[6];
temp_result[2] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 3]*data_array[7];
temp_result[1] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 3]*data_array[8];
temp_result[3] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 4]*data_array[7];
temp_result[2] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 4]*data_array[8];
temp_result[3] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 5]*data_array[8];
}
switch_write_back(min(TH, H - h_out_start), min(TW, W - w_out_start), h_out_start, w_out_start, n, outputs, temp_result);
}
float check_diff(float *x, float *y, unsigned int size){
float diff = 0.0f;
#pragma omp parallel for reduction(+ : diff)
for(unsigned int i=0;i<size;++i){
diff += abs(x[i] - y[i]);
}
return diff;
}
int main(void){
float *input = new float[C*H*W];
time_t t;
float *matrix;
hipMalloc(&matrix,C*(TH+2)*(TW+2)*THS*TWS*sizeof(float));
hipMemset(matrix,0,C*(TH+2)*(TW+2)*THS*TWS*sizeof(float));
srand((unsigned) time(&t));
for(int i =0;i<C*H*W;++i){
input[i] = rand() % 10;
}
float *device_input;
hipMalloc(&device_input,C*H*W*sizeof(float));
hipMemcpy(device_input,input,C*H*W*sizeof(float),hipMemcpyHostToDevice);
float *K = new float[C*N*9];
for(int i=0;i<C*N*9;++i){
K[i] = 1.0f;
}
ConvGemm convGemm;
convGemm.initialize();
ConvWinogradeNon convWinogradeNon;
convWinogradeNon.initialize();
ConvFFT convFFT;
convFFT.initialize();
float *out_cudnn;
float *out_cudnn_host = new float[N*H*W];
hipEvent_t event_start;
hipEvent_t event_stop;
hipEventCreate(&event_start);
hipEventCreate(&event_stop);
out_cudnn = convGemm.forward(device_input);
hipMemcpy(out_cudnn_host,out_cudnn,N*H*W*sizeof(float),hipMemcpyDeviceToHost);
out_cudnn = convFFT.forward(device_input);
out_cudnn = convWinogradeNon.forward(device_input);
float *device_K;
float *device_out;
hipMalloc(&device_out,H*W*N*sizeof(float));
hipMemset(device_out,0,H*W*N*sizeof(float));
hipMalloc(&device_K,C*N*9*sizeof(float));
hipMemcpy(device_K,K,C*N*9*sizeof(float),hipMemcpyHostToDevice);
hipEventRecord(event_start);
convGemm.forward(device_input);
hipEventRecord(event_stop);
hipEventSynchronize(event_stop);
float cudnnGemmTime;
hipEventElapsedTime(&cudnnGemmTime, event_start, event_stop);
hipEventRecord(event_start);
convWinogradeNon.forward(device_input);
hipEventRecord(event_stop);
hipEventSynchronize(event_stop);
float cudnnWinogradeTimeNon;
hipEventElapsedTime(&cudnnWinogradeTimeNon, event_start, event_stop);
hipEventRecord(event_start);
convFFT.forward(device_input);
hipEventRecord(event_stop);
hipEventSynchronize(event_stop);
float cudnnFFTTime;
hipEventElapsedTime(&cudnnFFTTime, event_start, event_stop);
dim3 grid(14,1,4);
dim3 block(1,56,4);
hipEventRecord(event_start);
hipLaunchKernelGGL(( default_function_kernel0), dim3(grid), dim3(block), 0, 0, device_input, device_K, device_out);
hipEventRecord(event_stop);
hipEventSynchronize(event_stop);
float time_tvm;
hipEventElapsedTime(&time_tvm, event_start, event_stop);
float *out_tvm = new float[N*H*W];
hipMemcpy(out_tvm,device_out,N*H*W*sizeof(float),hipMemcpyDeviceToHost);
hipMemset(device_out, 0, sizeof(float)*N*H*W);
chkerr(hipFuncSetAttribute(conv2d,hipFuncAttributeMaxDynamicSharedMemorySize, TC*(TH+2)*(WPAD)*4));
hipEventRecord(event_start);
hipLaunchKernelGGL(( conv2d), dim3(TCS*THS), dim3(N * TWS), TC*(TH+2)*(WPAD)*4, 0, device_input, device_K, device_out);
hipEventRecord(event_stop);
hipEventSynchronize(event_stop);
float time_tdc;
hipEventElapsedTime(&time_tdc, event_start, event_stop);
float *out_tdc = new float[N*H*W];
hipMemcpy(out_tdc,device_out,N*H*W*sizeof(float),hipMemcpyDeviceToHost);
ofstream outfile;
char buffer[1000];
int ret = sprintf(buffer,"%d,%d,%d,%d,%f,%f,%f,%f,%f,%f,%f,%f,%f\n",N,C,H,W,
cudnnFFTTime,cudnnWinogradeTimeNon,cudnnGemmTime,time_tvm,time_tdc,
cudnnFFTTime/time_tdc,cudnnWinogradeTimeNon/time_tdc,cudnnGemmTime/time_tdc,time_tvm/time_tdc);
outfile.open("../../evaluation_outcome/A100-layers-eval-modeling.csv", std::ios_base::app);
outfile << buffer;
float difference = check_diff(out_tvm, out_tdc, N*H*W);
cout<<N<<","<<C<<","<<H<<","<<W<<","<<cudnnFFTTime<<","<<cudnnWinogradeTimeNon<<","<<cudnnGemmTime<<","<<
time_tvm<<","<<time_tdc<<","<<cudnnFFTTime/time_tdc<<","<<
cudnnWinogradeTimeNon/time_tdc<<","<<cudnnGemmTime/time_tdc<<","<<time_tvm/time_tdc<<endl;
return 0;
}
| d156ea3ae43c4ed506439e1d65aeeeb9e0c49369.cu | #include <cudnn.h>
#include <stdio.h>
#include <cuda.h>
#include <malloc.h>
#include <cstdlib>
#include <time.h>
#include <iostream>
#include <sys/types.h>
#include <errno.h>
#include <vector>
#include <fstream>
#include <string>
#include <omp.h>
#define TH 1
#define TW 4
#define TC 16
#define C 64
#define N 64
#define H 56
#define W 56
#define TCS ((C-1)/TC + 1)
#define THS ((H-1)/TH + 1)
#define TWS ((W-1)/TW+1)
#define WPAD (TWS*TW + 2)
#define R 3
#define S 3
using namespace std;
#define checkCUDNN(expression) \
{ \
cudnnStatus_t status = (expression); \
if (status != CUDNN_STATUS_SUCCESS) { \
std::cerr << "Error on line " << __LINE__ << ": " \
<< cudnnGetErrorString(status) << std::endl; \
std::exit(EXIT_FAILURE); \
} \
}
inline void chkerr(cudaError_t code)
{
if (code != cudaSuccess)
{
std::cerr << "ERROR!!!:" << cudaGetErrorString(code) <<endl;
exit(-1);
}
}
extern "C" __global__ void default_function_kernel0(float* __restrict__ data, float* __restrict__ kernel, float* __restrict__ compute) {
float compute_local[16];
__shared__ float pad_temp_shared[2784];
__shared__ float kernel_shared[1152];
float pad_temp_shared_local[32];
float kernel_shared_local[32];
#pragma unroll
for (int ff_c_init = 0; ff_c_init < 2; ++ff_c_init) {
compute_local[(ff_c_init)] = 0.000000e+00f;
compute_local[((ff_c_init + 8))] = 0.000000e+00f;
compute_local[((ff_c_init + 2))] = 0.000000e+00f;
compute_local[((ff_c_init + 10))] = 0.000000e+00f;
compute_local[((ff_c_init + 4))] = 0.000000e+00f;
compute_local[((ff_c_init + 12))] = 0.000000e+00f;
compute_local[((ff_c_init + 6))] = 0.000000e+00f;
compute_local[((ff_c_init + 14))] = 0.000000e+00f;
}
for (int rc_outer = 0; rc_outer < 8; ++rc_outer) {
__syncthreads();
#pragma unroll
for (int ax0_ax1_fused_ax2_fused_ax3_fused_inner_inner_inner = 0; ax0_ax1_fused_ax2_fused_ax3_fused_inner_inner_inner < 13; ++ax0_ax1_fused_ax2_fused_ax3_fused_inner_inner_inner) {
if (((((int)threadIdx.z) * 2) + (((((int)threadIdx.y) * 13) + ax0_ax1_fused_ax2_fused_ax3_fused_inner_inner_inner) / 348)) < 8) {
if (((((int)threadIdx.z) * 116) + (((((int)threadIdx.y) * 13) + ax0_ax1_fused_ax2_fused_ax3_fused_inner_inner_inner) / 6)) < 464) {
if ((((((int)threadIdx.z) * 696) + (((int)threadIdx.y) * 13)) + ax0_ax1_fused_ax2_fused_ax3_fused_inner_inner_inner) < 2784) {
if (((((int)threadIdx.y) * 13) + ax0_ax1_fused_ax2_fused_ax3_fused_inner_inner_inner) < 696) {
pad_temp_shared[((((((int)threadIdx.z) * 696) + (((int)threadIdx.y) * 13)) + ax0_ax1_fused_ax2_fused_ax3_fused_inner_inner_inner))] = (((((6 <= (((((int)threadIdx.y) * 13) + ax0_ax1_fused_ax2_fused_ax3_fused_inner_inner_inner) % 348)) && ((((((int)threadIdx.y) * 13) + ax0_ax1_fused_ax2_fused_ax3_fused_inner_inner_inner) % 348) < 342)) && (1 <= ((((int)blockIdx.x) * 4) + (((((int)threadIdx.y) * 13) + ax0_ax1_fused_ax2_fused_ax3_fused_inner_inner_inner) % 6)))) && (((((int)blockIdx.x) * 4) + (((((int)threadIdx.y) * 13) + ax0_ax1_fused_ax2_fused_ax3_fused_inner_inner_inner) % 6)) < 57)) ? data[((((((((rc_outer * 25088) + (((int)threadIdx.z) * 6272)) + ((((((int)threadIdx.y) * 13) + ax0_ax1_fused_ax2_fused_ax3_fused_inner_inner_inner) / 348) * 3136)) + (((((((int)threadIdx.y) * 13) + ax0_ax1_fused_ax2_fused_ax3_fused_inner_inner_inner) % 348) / 6) * 56)) + (((int)blockIdx.x) * 4)) + (((((int)threadIdx.y) * 13) + ax0_ax1_fused_ax2_fused_ax3_fused_inner_inner_inner) % 6)) - 57))] : 0.000000e+00f);
}
}
}
}
}
#pragma unroll
for (int ax0_ax1_fused_ax2_fused_ax3_fused_inner_inner_inner1 = 0; ax0_ax1_fused_ax2_fused_ax3_fused_inner_inner_inner1 < 6; ++ax0_ax1_fused_ax2_fused_ax3_fused_inner_inner_inner1) {
if (((((int)threadIdx.z) * 4) + (((((int)threadIdx.y) * 2) + (ax0_ax1_fused_ax2_fused_ax3_fused_inner_inner_inner1 / 3)) / 24)) < 16) {
if (((((int)threadIdx.z) * 32) + (((((int)threadIdx.y) * 2) + (ax0_ax1_fused_ax2_fused_ax3_fused_inner_inner_inner1 / 3)) / 3)) < 128) {
if ((((((int)threadIdx.z) * 96) + (((int)threadIdx.y) * 2)) + (ax0_ax1_fused_ax2_fused_ax3_fused_inner_inner_inner1 / 3)) < 384) {
if ((((((int)threadIdx.z) * 288) + (((int)threadIdx.y) * 6)) + ax0_ax1_fused_ax2_fused_ax3_fused_inner_inner_inner1) < 1152) {
if (((((int)threadIdx.y) * 6) + ax0_ax1_fused_ax2_fused_ax3_fused_inner_inner_inner1) < 288) {
if ((((((int)blockIdx.z) * 16) + (((int)threadIdx.z) * 4)) + (((((int)threadIdx.y) * 2) + (ax0_ax1_fused_ax2_fused_ax3_fused_inner_inner_inner1 / 3)) / 24)) < 64) {
kernel_shared[((((((int)threadIdx.z) * 288) + (((int)threadIdx.y) * 6)) + ax0_ax1_fused_ax2_fused_ax3_fused_inner_inner_inner1))] = kernel[(((((((((int)blockIdx.z) * 9216) + (((int)threadIdx.z) * 2304)) + ((((((int)threadIdx.y) * 2) + (ax0_ax1_fused_ax2_fused_ax3_fused_inner_inner_inner1 / 3)) / 24) * 576)) + (rc_outer * 72)) + ((((((int)threadIdx.y) * 2) + (ax0_ax1_fused_ax2_fused_ax3_fused_inner_inner_inner1 / 3)) % 24) * 3)) + (ax0_ax1_fused_ax2_fused_ax3_fused_inner_inner_inner1 % 3)))];
}
}
}
}
}
}
}
__syncthreads();
for (int ry_inner_outer = 0; ry_inner_outer < 3; ++ry_inner_outer) {
#pragma unroll
for (int rx_inner_outer = 0; rx_inner_outer < 3; ++rx_inner_outer) {
#pragma unroll
for (int ax1 = 0; ax1 < 8; ++ax1) {
pad_temp_shared_local[(ax1)] = pad_temp_shared[(((((ax1 * 348) + (((int)threadIdx.y) * 6)) + (ry_inner_outer * 6)) + rx_inner_outer))];
pad_temp_shared_local[((ax1 + 8))] = pad_temp_shared[((((((ax1 * 348) + (((int)threadIdx.y) * 6)) + (ry_inner_outer * 6)) + rx_inner_outer) + 1))];
pad_temp_shared_local[((ax1 + 16))] = pad_temp_shared[((((((ax1 * 348) + (((int)threadIdx.y) * 6)) + (ry_inner_outer * 6)) + rx_inner_outer) + 2))];
pad_temp_shared_local[((ax1 + 24))] = pad_temp_shared[((((((ax1 * 348) + (((int)threadIdx.y) * 6)) + (ry_inner_outer * 6)) + rx_inner_outer) + 3))];
}
#pragma unroll
for (int ax0 = 0; ax0 < 2; ++ax0) {
#pragma unroll
for (int ax11 = 0; ax11 < 8; ++ax11) {
kernel_shared_local[(((ax0 * 8) + ax11))] = kernel_shared[((((((((int)threadIdx.z) * 144) + (ax0 * 72)) + (ax11 * 9)) + (ry_inner_outer * 3)) + rx_inner_outer))];
kernel_shared_local[((((ax0 * 8) + ax11) + 16))] = kernel_shared[(((((((((int)threadIdx.z) * 144) + (ax0 * 72)) + (ax11 * 9)) + (ry_inner_outer * 3)) + rx_inner_outer) + 576))];
}
}
#pragma unroll
for (int rc_inner_inner = 0; rc_inner_inner < 8; ++rc_inner_inner) {
#pragma unroll
for (int ff_c = 0; ff_c < 2; ++ff_c) {
compute_local[(ff_c)] = (compute_local[(ff_c)] + (pad_temp_shared_local[(rc_inner_inner)] * kernel_shared_local[(((ff_c * 8) + rc_inner_inner))]));
compute_local[((ff_c + 8))] = (compute_local[((ff_c + 8))] + (pad_temp_shared_local[(rc_inner_inner)] * kernel_shared_local[((((ff_c * 8) + rc_inner_inner) + 16))]));
compute_local[((ff_c + 2))] = (compute_local[((ff_c + 2))] + (pad_temp_shared_local[((rc_inner_inner + 8))] * kernel_shared_local[(((ff_c * 8) + rc_inner_inner))]));
compute_local[((ff_c + 10))] = (compute_local[((ff_c + 10))] + (pad_temp_shared_local[((rc_inner_inner + 8))] * kernel_shared_local[((((ff_c * 8) + rc_inner_inner) + 16))]));
compute_local[((ff_c + 4))] = (compute_local[((ff_c + 4))] + (pad_temp_shared_local[((rc_inner_inner + 16))] * kernel_shared_local[(((ff_c * 8) + rc_inner_inner))]));
compute_local[((ff_c + 12))] = (compute_local[((ff_c + 12))] + (pad_temp_shared_local[((rc_inner_inner + 16))] * kernel_shared_local[((((ff_c * 8) + rc_inner_inner) + 16))]));
compute_local[((ff_c + 6))] = (compute_local[((ff_c + 6))] + (pad_temp_shared_local[((rc_inner_inner + 24))] * kernel_shared_local[(((ff_c * 8) + rc_inner_inner))]));
compute_local[((ff_c + 14))] = (compute_local[((ff_c + 14))] + (pad_temp_shared_local[((rc_inner_inner + 24))] * kernel_shared_local[((((ff_c * 8) + rc_inner_inner) + 16))]));
}
}
}
}
}
#pragma unroll
for (int ff_inner_inner_inner = 0; ff_inner_inner_inner < 2; ++ff_inner_inner_inner) {
compute[((((((((int)blockIdx.z) * 50176) + (((int)threadIdx.z) * 6272)) + (ff_inner_inner_inner * 3136)) + (((int)threadIdx.y) * 56)) + (((int)blockIdx.x) * 4)))] = compute_local[(ff_inner_inner_inner)];
compute[(((((((((int)blockIdx.z) * 50176) + (((int)threadIdx.z) * 6272)) + (ff_inner_inner_inner * 3136)) + (((int)threadIdx.y) * 56)) + (((int)blockIdx.x) * 4)) + 25088))] = compute_local[((ff_inner_inner_inner + 8))];
compute[(((((((((int)blockIdx.z) * 50176) + (((int)threadIdx.z) * 6272)) + (ff_inner_inner_inner * 3136)) + (((int)threadIdx.y) * 56)) + (((int)blockIdx.x) * 4)) + 1))] = compute_local[((ff_inner_inner_inner + 2))];
compute[(((((((((int)blockIdx.z) * 50176) + (((int)threadIdx.z) * 6272)) + (ff_inner_inner_inner * 3136)) + (((int)threadIdx.y) * 56)) + (((int)blockIdx.x) * 4)) + 25089))] = compute_local[((ff_inner_inner_inner + 10))];
compute[(((((((((int)blockIdx.z) * 50176) + (((int)threadIdx.z) * 6272)) + (ff_inner_inner_inner * 3136)) + (((int)threadIdx.y) * 56)) + (((int)blockIdx.x) * 4)) + 2))] = compute_local[((ff_inner_inner_inner + 4))];
compute[(((((((((int)blockIdx.z) * 50176) + (((int)threadIdx.z) * 6272)) + (ff_inner_inner_inner * 3136)) + (((int)threadIdx.y) * 56)) + (((int)blockIdx.x) * 4)) + 25090))] = compute_local[((ff_inner_inner_inner + 12))];
compute[(((((((((int)blockIdx.z) * 50176) + (((int)threadIdx.z) * 6272)) + (ff_inner_inner_inner * 3136)) + (((int)threadIdx.y) * 56)) + (((int)blockIdx.x) * 4)) + 3))] = compute_local[((ff_inner_inner_inner + 6))];
compute[(((((((((int)blockIdx.z) * 50176) + (((int)threadIdx.z) * 6272)) + (ff_inner_inner_inner * 3136)) + (((int)threadIdx.y) * 56)) + (((int)blockIdx.x) * 4)) + 25091))] = compute_local[((ff_inner_inner_inner + 14))];
}
}
class ConvGemm{
public:
float *cpuKernel;
float alpha = 1.0f;
float beta = 0.0f;
cudnnHandle_t convCudnn;
void* d_workspace{nullptr};
size_t workspace_bytes{0};
cudnnTensorDescriptor_t convInputDescriptor;
cudnnTensorDescriptor_t convOutputDescriptor;
cudnnFilterDescriptor_t convKernelDescriptor;
cudnnConvolutionDescriptor_t convDesc;
float *output;
float *kernel;
void initialize();
float *forward(float *input);
};
void ConvGemm::initialize(){
cudaMalloc(&kernel,sizeof(float)*C*N*9);
cudaMalloc(&this->output,sizeof(float)*N*H*W);
cudnnCreate(&convCudnn);
cudnnCreateTensorDescriptor(&convInputDescriptor);
cudnnSetTensor4dDescriptor(convInputDescriptor,
/*format=*/CUDNN_TENSOR_NCHW,
/*dataType=*/CUDNN_DATA_FLOAT,
/*batch_size=*/1,
/*channels=*/C,
/*image_height=*/H,
/*image_width=*/W);
cudnnCreateFilterDescriptor(&convKernelDescriptor);
cudnnSetFilter4dDescriptor(convKernelDescriptor,
/*dataType=*/CUDNN_DATA_FLOAT,
/*format=*/CUDNN_TENSOR_NCHW,
/*out_channels=*/N,
/*in_channels=*/C,
/*kernel_height=*/R,
/*kernel_width=*/S);
cudnnCreateConvolutionDescriptor(&convDesc);
cudnnSetConvolution2dDescriptor(convDesc,
/*pad_height=*/1,
/*pad_width=*/1,
/*vertical_stride=*/1,
/*horizontal_stride=*/1,
/*dilation_height=*/1,
/*dilation_width=*/1,
/*mode=*/CUDNN_CROSS_CORRELATION,
CUDNN_DATA_FLOAT);
int batch_size{0}, channels{0}, height{0}, width{0};
cudnnGetConvolution2dForwardOutputDim(convDesc,
convInputDescriptor,
convKernelDescriptor,
&batch_size,
&channels,
&height,
&width);
cudnnCreateTensorDescriptor(&convOutputDescriptor);
cudnnSetTensor4dDescriptor(convOutputDescriptor,
/*format=*/CUDNN_TENSOR_NCHW,
/*dataType=*/CUDNN_DATA_FLOAT,
/*batch_size=*/1,
/*channels=*/N,
/*image_height=*/H,
/*image_width=*/W);
cudnnGetConvolutionForwardWorkspaceSize(convCudnn,
convInputDescriptor,
convKernelDescriptor,
convDesc,
convOutputDescriptor,
CUDNN_CONVOLUTION_FWD_ALGO_IMPLICIT_GEMM,
&workspace_bytes);
cudaMalloc(&d_workspace, workspace_bytes);
unsigned int kernelSize = R*S*C*N;//kernel
this->cpuKernel = (float *)malloc(kernelSize*sizeof(float));
for(int i=0;i<kernelSize;++i){
this->cpuKernel[i] = 1.0f;
}
cudaMemcpy(kernel,cpuKernel,R*S*C*N*sizeof(float),cudaMemcpyHostToDevice);
free(cpuKernel);
}
float * ConvGemm::forward(float *input) {
cudaMemset(output, 0, 1*N*H*W*sizeof(float));
checkCUDNN(cudnnConvolutionForward(convCudnn,
&alpha,
convInputDescriptor,
input,
convKernelDescriptor,
kernel,
convDesc,
CUDNN_CONVOLUTION_FWD_ALGO_IMPLICIT_GEMM,
d_workspace,
workspace_bytes,
&beta,
convOutputDescriptor,
output));
return output;
}
class ConvWinogradeNon{
public:
float *cpuKernel;
float alpha = 1.0f;
float beta = 0.0f;
cudnnHandle_t convCudnn;
void* d_workspace{nullptr};
size_t workspace_bytes{0};
cudnnTensorDescriptor_t convInputDescriptor;
cudnnTensorDescriptor_t convOutputDescriptor;
cudnnFilterDescriptor_t convKernelDescriptor;
cudnnConvolutionDescriptor_t convDesc;
float *output;
float *kernel;
void initialize();
float *forward(float *input);
};
void ConvWinogradeNon::initialize(){
cudaMalloc(&kernel,sizeof(float)*C*N*9);
cudaMalloc(&this->output,sizeof(float)*N*H*W);
cudnnCreate(&convCudnn);
cudnnCreateTensorDescriptor(&convInputDescriptor);
cudnnSetTensor4dDescriptor(convInputDescriptor,
/*format=*/CUDNN_TENSOR_NCHW,
/*dataType=*/CUDNN_DATA_FLOAT,
/*batch_size=*/1,
/*channels=*/C,
/*image_height=*/H,
/*image_width=*/W);
cudnnCreateFilterDescriptor(&convKernelDescriptor);
cudnnSetFilter4dDescriptor(convKernelDescriptor,
/*dataType=*/CUDNN_DATA_FLOAT,
/*format=*/CUDNN_TENSOR_NCHW,
/*out_channels=*/N,
/*in_channels=*/C,
/*kernel_height=*/R,
/*kernel_width=*/S);
cudnnCreateConvolutionDescriptor(&convDesc);
cudnnSetConvolution2dDescriptor(convDesc,
/*pad_height=*/1,
/*pad_width=*/1,
/*vertical_stride=*/1,
/*horizontal_stride=*/1,
/*dilation_height=*/1,
/*dilation_width=*/1,
/*mode=*/CUDNN_CROSS_CORRELATION,
CUDNN_DATA_FLOAT);
int batch_size{0}, channels{0}, height{0}, width{0};
cudnnGetConvolution2dForwardOutputDim(convDesc,
convInputDescriptor,
convKernelDescriptor,
&batch_size,
&channels,
&height,
&width);
cudnnCreateTensorDescriptor(&convOutputDescriptor);
cudnnSetTensor4dDescriptor(convOutputDescriptor,
/*format=*/CUDNN_TENSOR_NCHW,
/*dataType=*/CUDNN_DATA_FLOAT,
/*batch_size=*/1,
/*channels=*/N,
/*image_height=*/H,
/*image_width=*/W);
cudnnGetConvolutionForwardWorkspaceSize(convCudnn,
convInputDescriptor,
convKernelDescriptor,
convDesc,
convOutputDescriptor,
CUDNN_CONVOLUTION_FWD_ALGO_WINOGRAD_NONFUSED,
&workspace_bytes);
cudaMalloc(&d_workspace, workspace_bytes);
unsigned int kernelSize = R*S*C*N;//kernel
this->cpuKernel = (float *)malloc(kernelSize*sizeof(float));
for(int i=0;i<kernelSize;++i){
this->cpuKernel[i] = 1.0f;
}
cudaMemcpy(kernel,cpuKernel,R*S*C*N*sizeof(float),cudaMemcpyHostToDevice);
free(cpuKernel);
}
float * ConvWinogradeNon::forward(float *input) {
cudaMemset(output, 0, 1*N*H*W*sizeof(float));
checkCUDNN(cudnnConvolutionForward(convCudnn,
&alpha,
convInputDescriptor,
input,
convKernelDescriptor,
kernel,
convDesc,
CUDNN_CONVOLUTION_FWD_ALGO_WINOGRAD_NONFUSED,
d_workspace,
workspace_bytes,
&beta,
convOutputDescriptor,
output));
return output;
}
class ConvFFT{
public:
float *cpuKernel;
float alpha = 1.0f;
float beta = 0.0f;
cudnnHandle_t convCudnn;
void* d_workspace{nullptr};
size_t workspace_bytes{0};
cudnnTensorDescriptor_t convInputDescriptor;
cudnnTensorDescriptor_t convOutputDescriptor;
cudnnFilterDescriptor_t convKernelDescriptor;
cudnnConvolutionDescriptor_t convDesc;
float *output;
float *kernel;
void initialize();
float *forward(float *input);
};
void ConvFFT::initialize(){
cudaMalloc(&kernel,sizeof(float)*C*N*9);
cudaMalloc(&this->output,sizeof(float)*N*H*W);
cudnnCreate(&convCudnn);
cudnnCreateTensorDescriptor(&convInputDescriptor);
cudnnSetTensor4dDescriptor(convInputDescriptor,
/*format=*/CUDNN_TENSOR_NCHW,
/*dataType=*/CUDNN_DATA_FLOAT,
/*batch_size=*/1,
/*channels=*/C,
/*image_height=*/H,
/*image_width=*/W);
cudnnCreateFilterDescriptor(&convKernelDescriptor);
cudnnSetFilter4dDescriptor(convKernelDescriptor,
/*dataType=*/CUDNN_DATA_FLOAT,
/*format=*/CUDNN_TENSOR_NCHW,
/*out_channels=*/N,
/*in_channels=*/C,
/*kernel_height=*/R,
/*kernel_width=*/S);
cudnnCreateConvolutionDescriptor(&convDesc);
cudnnSetConvolution2dDescriptor(convDesc,
/*pad_height=*/1,
/*pad_width=*/1,
/*vertical_stride=*/1,
/*horizontal_stride=*/1,
/*dilation_height=*/1,
/*dilation_width=*/1,
/*mode=*/CUDNN_CROSS_CORRELATION,
CUDNN_DATA_FLOAT);
int batch_size{0}, channels{0}, height{0}, width{0};
cudnnGetConvolution2dForwardOutputDim(convDesc,
convInputDescriptor,
convKernelDescriptor,
&batch_size,
&channels,
&height,
&width);
cudnnCreateTensorDescriptor(&convOutputDescriptor);
cudnnSetTensor4dDescriptor(convOutputDescriptor,
/*format=*/CUDNN_TENSOR_NCHW,
/*dataType=*/CUDNN_DATA_FLOAT,
/*batch_size=*/1,
/*channels=*/N,
/*image_height=*/H,
/*image_width=*/W);
cudnnGetConvolutionForwardWorkspaceSize(convCudnn,
convInputDescriptor,
convKernelDescriptor,
convDesc,
convOutputDescriptor,
CUDNN_CONVOLUTION_FWD_ALGO_FFT,
&workspace_bytes);
cudaMalloc(&d_workspace, workspace_bytes);
unsigned int kernelSize = R*S*C*N;//kernel
this->cpuKernel = (float *)malloc(kernelSize*sizeof(float));
for(int i=0;i<kernelSize;++i){
this->cpuKernel[i] = 1.0f;
}
cudaMemcpy(kernel,cpuKernel,R*S*C*N*sizeof(float),cudaMemcpyHostToDevice);
free(cpuKernel);
}
float * ConvFFT::forward(float *input) {
cudaMemset(output, 0, 1*N*H*W*sizeof(float));
checkCUDNN(cudnnConvolutionForward(convCudnn,
&alpha,
convInputDescriptor,
input,
convKernelDescriptor,
kernel,
convDesc,
CUDNN_CONVOLUTION_FWD_ALGO_FFT,
d_workspace,
workspace_bytes,
&beta,
convOutputDescriptor,
output));
return output;
}
__device__ void load_input_2_shared_memory(float *input, float *shared_input, unsigned int h_start,
unsigned int h_end, unsigned int h_offset, unsigned int c_start,
unsigned int warp_id, unsigned int lane_id, unsigned int warp_size){
switch(h_offset){
case 0:
for(unsigned int c = warp_id; c<TC; c+=TWS){
for(unsigned int i=lane_id; i<(h_end - h_start) * W; i+=warp_size){
unsigned int r = i/W;
unsigned int s = i%W;
shared_input[c*(TH + 2)*(WPAD) + r * WPAD + s + 1] = input[(c_start + c) * H * W + h_start * W + i];
}
}
break;
case 1:
for(unsigned int c = warp_id; c<TC; c+=TWS){
for(unsigned int i=lane_id; i<(h_end - h_start) * W; i+=warp_size){
unsigned int r = i/W;
unsigned int s = i%W;
shared_input[c*(TH + 2)*(WPAD) + (1 + r) * WPAD + s + 1] = input[(c_start + c) * H * W + h_start * W + i];
}
}
break;
}
}
__device__ __forceinline__ void switch_write_back(unsigned int write_h, unsigned int write_w, unsigned int h_out_start, unsigned int w_out_start, unsigned int n, float * outputs, float * temp_result){
switch(write_h){
case 1:
switch(write_w){
case 1:
#pragma unroll
for (unsigned int th = 0; th < 1; ++th) {
#pragma unroll
for (unsigned int tw = 0; tw < 1; ++tw) {
atomicAdd(&outputs[n*H*W+(h_out_start + th) * W+(w_out_start + tw)],temp_result[(th * TW + tw)]);
}
}
break;
case 2:
#pragma unroll
for (unsigned int th = 0; th < 1; ++th) {
#pragma unroll
for (unsigned int tw = 0; tw < 2; ++tw) {
atomicAdd(&outputs[n*H*W+(h_out_start + th) * W+(w_out_start + tw)],temp_result[(th * TW + tw)]);
}
}
break;
case 3:
#pragma unroll
for (unsigned int th = 0; th < 1; ++th) {
#pragma unroll
for (unsigned int tw = 0; tw < 3; ++tw) {
atomicAdd(&outputs[n*H*W+(h_out_start + th) * W+(w_out_start + tw)],temp_result[(th * TW + tw)]);
}
}
break;
case 4:
#pragma unroll
for (unsigned int th = 0; th < 1; ++th) {
#pragma unroll
for (unsigned int tw = 0; tw < 4; ++tw) {
atomicAdd(&outputs[n*H*W+(h_out_start + th) * W+(w_out_start + tw)],temp_result[(th * TW + tw)]);
}
}
break;
}
break;
}
}
__global__ void conv2d(float * __restrict__ input,const float * __restrict__ kernel, float * __restrict__ outputs){
extern __shared__ float shared_input[];
const unsigned int tile_id = blockIdx.x;
const unsigned int tc_id = tile_id / THS;
const unsigned int th_id = tile_id % THS;
const unsigned int tw_id = threadIdx.x / N;
const int h_out_start = th_id * TH;
const int w_out_start = tw_id * TW;
const unsigned int warp_id = tw_id;
const unsigned int lane_id = threadIdx.x % N;
float data_array[9];
float temp_result[TH*TW] = {0.0f};
for(unsigned int i=threadIdx.x;i<TC*(TH+2)*WPAD;i+=blockDim.x){
shared_input[i] = 0.0f;
}
unsigned int n = lane_id;
unsigned int c_offset = tc_id * TC;
int h_offset = (h_out_start == 0)?1:0;
int h_padded_start = h_out_start;
int h_padded_end = min(h_padded_start + TH + 2, H + 2);
int h_non_padded_start = max(h_out_start - 1, 0);
int h_non_padded_end = min(H, h_padded_end - 1);
__syncthreads();
load_input_2_shared_memory(input, shared_input, h_non_padded_start, h_non_padded_end, h_offset, c_offset, warp_id, lane_id, N);
__syncthreads();
#pragma unroll
for(unsigned int c=0;c<TC;c++){
#pragma unroll
for(unsigned int r=0;r<R;++r){
#pragma unroll
for(unsigned int s=0;s<S;++s){
data_array[r*S+s] = kernel[(c + c_offset)*N*9+r*3*N+s*N+n];
}
}
temp_result[0] += shared_input[c*(TH+2)*(WPAD) + 0 * WPAD + tw_id * TW + 0]*data_array[0];
temp_result[1] += shared_input[c*(TH+2)*(WPAD) + 0 * WPAD + tw_id * TW + 1]*data_array[0];
temp_result[0] += shared_input[c*(TH+2)*(WPAD) + 0 * WPAD + tw_id * TW + 1]*data_array[1];
temp_result[2] += shared_input[c*(TH+2)*(WPAD) + 0 * WPAD + tw_id * TW + 2]*data_array[0];
temp_result[1] += shared_input[c*(TH+2)*(WPAD) + 0 * WPAD + tw_id * TW + 2]*data_array[1];
temp_result[0] += shared_input[c*(TH+2)*(WPAD) + 0 * WPAD + tw_id * TW + 2]*data_array[2];
temp_result[3] += shared_input[c*(TH+2)*(WPAD) + 0 * WPAD + tw_id * TW + 3]*data_array[0];
temp_result[2] += shared_input[c*(TH+2)*(WPAD) + 0 * WPAD + tw_id * TW + 3]*data_array[1];
temp_result[1] += shared_input[c*(TH+2)*(WPAD) + 0 * WPAD + tw_id * TW + 3]*data_array[2];
temp_result[3] += shared_input[c*(TH+2)*(WPAD) + 0 * WPAD + tw_id * TW + 4]*data_array[1];
temp_result[2] += shared_input[c*(TH+2)*(WPAD) + 0 * WPAD + tw_id * TW + 4]*data_array[2];
temp_result[3] += shared_input[c*(TH+2)*(WPAD) + 0 * WPAD + tw_id * TW + 5]*data_array[2];
temp_result[0] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 0]*data_array[3];
temp_result[1] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 1]*data_array[3];
temp_result[0] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 1]*data_array[4];
temp_result[2] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 2]*data_array[3];
temp_result[1] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 2]*data_array[4];
temp_result[0] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 2]*data_array[5];
temp_result[3] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 3]*data_array[3];
temp_result[2] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 3]*data_array[4];
temp_result[1] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 3]*data_array[5];
temp_result[3] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 4]*data_array[4];
temp_result[2] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 4]*data_array[5];
temp_result[3] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 5]*data_array[5];
temp_result[0] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 0]*data_array[6];
temp_result[1] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 1]*data_array[6];
temp_result[0] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 1]*data_array[7];
temp_result[2] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 2]*data_array[6];
temp_result[1] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 2]*data_array[7];
temp_result[0] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 2]*data_array[8];
temp_result[3] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 3]*data_array[6];
temp_result[2] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 3]*data_array[7];
temp_result[1] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 3]*data_array[8];
temp_result[3] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 4]*data_array[7];
temp_result[2] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 4]*data_array[8];
temp_result[3] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 5]*data_array[8];
}
switch_write_back(min(TH, H - h_out_start), min(TW, W - w_out_start), h_out_start, w_out_start, n, outputs, temp_result);
}
float check_diff(float *x, float *y, unsigned int size){
float diff = 0.0f;
#pragma omp parallel for reduction(+ : diff)
for(unsigned int i=0;i<size;++i){
diff += abs(x[i] - y[i]);
}
return diff;
}
int main(void){
float *input = new float[C*H*W];
time_t t;
float *matrix;
cudaMalloc(&matrix,C*(TH+2)*(TW+2)*THS*TWS*sizeof(float));
cudaMemset(matrix,0,C*(TH+2)*(TW+2)*THS*TWS*sizeof(float));
srand((unsigned) time(&t));
for(int i =0;i<C*H*W;++i){
input[i] = rand() % 10;
}
float *device_input;
cudaMalloc(&device_input,C*H*W*sizeof(float));
cudaMemcpy(device_input,input,C*H*W*sizeof(float),cudaMemcpyHostToDevice);
float *K = new float[C*N*9];
for(int i=0;i<C*N*9;++i){
K[i] = 1.0f;
}
ConvGemm convGemm;
convGemm.initialize();
ConvWinogradeNon convWinogradeNon;
convWinogradeNon.initialize();
ConvFFT convFFT;
convFFT.initialize();
float *out_cudnn;
float *out_cudnn_host = new float[N*H*W];
cudaEvent_t event_start;
cudaEvent_t event_stop;
cudaEventCreate(&event_start);
cudaEventCreate(&event_stop);
out_cudnn = convGemm.forward(device_input);
cudaMemcpy(out_cudnn_host,out_cudnn,N*H*W*sizeof(float),cudaMemcpyDeviceToHost);
out_cudnn = convFFT.forward(device_input);
out_cudnn = convWinogradeNon.forward(device_input);
float *device_K;
float *device_out;
cudaMalloc(&device_out,H*W*N*sizeof(float));
cudaMemset(device_out,0,H*W*N*sizeof(float));
cudaMalloc(&device_K,C*N*9*sizeof(float));
cudaMemcpy(device_K,K,C*N*9*sizeof(float),cudaMemcpyHostToDevice);
cudaEventRecord(event_start);
convGemm.forward(device_input);
cudaEventRecord(event_stop);
cudaEventSynchronize(event_stop);
float cudnnGemmTime;
cudaEventElapsedTime(&cudnnGemmTime, event_start, event_stop);
cudaEventRecord(event_start);
convWinogradeNon.forward(device_input);
cudaEventRecord(event_stop);
cudaEventSynchronize(event_stop);
float cudnnWinogradeTimeNon;
cudaEventElapsedTime(&cudnnWinogradeTimeNon, event_start, event_stop);
cudaEventRecord(event_start);
convFFT.forward(device_input);
cudaEventRecord(event_stop);
cudaEventSynchronize(event_stop);
float cudnnFFTTime;
cudaEventElapsedTime(&cudnnFFTTime, event_start, event_stop);
dim3 grid(14,1,4);
dim3 block(1,56,4);
cudaEventRecord(event_start);
default_function_kernel0<<<grid, block>>>(device_input, device_K, device_out);
cudaEventRecord(event_stop);
cudaEventSynchronize(event_stop);
float time_tvm;
cudaEventElapsedTime(&time_tvm, event_start, event_stop);
float *out_tvm = new float[N*H*W];
cudaMemcpy(out_tvm,device_out,N*H*W*sizeof(float),cudaMemcpyDeviceToHost);
cudaMemset(device_out, 0, sizeof(float)*N*H*W);
chkerr(cudaFuncSetAttribute(conv2d,cudaFuncAttributeMaxDynamicSharedMemorySize, TC*(TH+2)*(WPAD)*4));
cudaEventRecord(event_start);
conv2d<<<TCS*THS, N * TWS, TC*(TH+2)*(WPAD)*4>>>(device_input, device_K, device_out);
cudaEventRecord(event_stop);
cudaEventSynchronize(event_stop);
float time_tdc;
cudaEventElapsedTime(&time_tdc, event_start, event_stop);
float *out_tdc = new float[N*H*W];
cudaMemcpy(out_tdc,device_out,N*H*W*sizeof(float),cudaMemcpyDeviceToHost);
ofstream outfile;
char buffer[1000];
int ret = sprintf(buffer,"%d,%d,%d,%d,%f,%f,%f,%f,%f,%f,%f,%f,%f\n",N,C,H,W,
cudnnFFTTime,cudnnWinogradeTimeNon,cudnnGemmTime,time_tvm,time_tdc,
cudnnFFTTime/time_tdc,cudnnWinogradeTimeNon/time_tdc,cudnnGemmTime/time_tdc,time_tvm/time_tdc);
outfile.open("../../evaluation_outcome/A100-layers-eval-modeling.csv", std::ios_base::app);
outfile << buffer;
float difference = check_diff(out_tvm, out_tdc, N*H*W);
cout<<N<<","<<C<<","<<H<<","<<W<<","<<cudnnFFTTime<<","<<cudnnWinogradeTimeNon<<","<<cudnnGemmTime<<","<<
time_tvm<<","<<time_tdc<<","<<cudnnFFTTime/time_tdc<<","<<
cudnnWinogradeTimeNon/time_tdc<<","<<cudnnGemmTime/time_tdc<<","<<time_tvm/time_tdc<<endl;
return 0;
}
|
7219d60c534cec8be55b3d4448fecdf4f27f483a.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "util.h"
#include "caps.h"
#include <string>
#include <sstream>
#include "DMatrix.h"
#include "CuMatrix.h"
#include <float.h>
#include <limits>
#include "Maths.h"
#include "Kernels.h"
using std::numeric_limits;
template <typename T> __host__ __device__ void printColoArray(const T* array, int n, int direction) {
#ifndef __CUDA_ARCH__
printf("caller %s\n", b_util::caller().c_str());
#endif
flprintf("array %p[0::%d] ", array, n);
for(int i =0; i < n; i++) {
printf("%f", (float) array[i]);
if(i < n -1) printf(", ");
}
printf("\n");
}
template __host__ __device__ void printColoArray<float>(const float*,int,int);
template __host__ __device__ void printColoArray<double>(const double*,int,int);
template __host__ __device__ void printColoArray<int>(const int*,int,int);
template __host__ __device__ void printColoArray<uint>(const uint*,int,int);
template __host__ __device__ void printColoArray<long>(const long*, int,int);
template __host__ __device__ void printColoArray<ulong>(const ulong*, int,int);
template <typename T> __host__ __device__ void prtColoArrayDiag(
const T* array,const char*msg,int line, int pitch,int n, int direction, T notEq) {
flprintf("%s:%d h arraydiag %p p:%d[0::%d] notEq %f\n", msg, line, array, pitch, n, notEq);
int neqCnt = 0;
int idxFb= -1;
const T* firstBad = nullptr;
for(int i =0; i < n; i++) {
if(!notEq || array[i * (pitch + 1)] != notEq) {
printf("%p + %d (%p) = %f != %f", array, direction*i, array + direction * i * (pitch + 1), (float) array[i * (pitch + 1)], notEq);
if(i < n -1) printf(", ");
if(notEq) { idxFb = i; firstBad = array + i * (pitch + 1); neqCnt++; }
}
}
if(!neqCnt)
flprintf("\nfound none != %f\n",notEq);
else {
flprintf("found %d unexpected values starting at %p idx %d\n", neqCnt, firstBad,idxFb);
}
assert(neqCnt == 0);
}
template __host__ __device__ void prtColoArrayDiag<float>(const float*,const char*msg,int line,int,int,int,float);
template __host__ __device__ void prtColoArrayDiag<double>(const double*,const char*msg,int line,int,int,int,double);
template __host__ __device__ void prtColoArrayDiag<int>(const int*,const char*msg,int line,int,int,int,int);
template __host__ __device__ void prtColoArrayDiag<uint>(const uint*,const char*msg,int line,int,int,int,uint);
template __host__ __device__ void prtColoArrayDiag<long>(const long*,const char*msg,int line,int,int,int,long);
template __host__ __device__ void prtColoArrayDiag<ulong>(const ulong*,const char*msg,int line,int,int,int,ulong);
template <typename T> __host__ __device__ void cntColoArrayDiag(
const T* array,const char*msg,int line, int pitch,int n, int direction, T test) {
flprintf("%s:%d h arraydiag %p p:%d[0::%d] test %f\n", msg, line, array, pitch, n, test);
int neqCnt = 0;
int idxNeq= -1;
int eqCnt = 0;
int idxEq= -1;
const T* firstNeq = nullptr,* firstEq = nullptr;
for(int i =0; i < n; i++) {
if(!test || array[i * (pitch + 1)] != test) {
//printf("%p + %d (%p) = %f != %f", array, direction*i, array + direction * i * (pitch + 1), (float) array[i * (pitch + 1)], notEq);
if(test && firstNeq == nullptr) { idxNeq = i; firstNeq= array + i * (pitch + 1); }
neqCnt++;
}else {
if(test&& firstEq == nullptr) { idxEq = i; firstEq = array + i * (pitch + 1); }
eqCnt++;
}
}
flprintf("\nfound %d neq %f, %d eq out of %d; first neq @ %p idx %d first eq %p idx %d\n", neqCnt, test, eqCnt, n, firstNeq,idxNeq,firstEq,idxEq);
}
template __host__ __device__ void cntColoArrayDiag<float>(const float*,const char*msg,int line,int,int,int,float);
template __host__ __device__ void cntColoArrayDiag<double>(const double*,const char*msg,int line,int,int,int,double);
template __host__ __device__ void cntColoArrayDiag<int>(const int*,const char*msg,int line,int,int,int,int);
template __host__ __device__ void cntColoArrayDiag<uint>(const uint*,const char*msg,int line,int,int,int,uint);
template __host__ __device__ void cntColoArrayDiag<long>(const long*,const char*msg,int line,int,int,int,long);
template __host__ __device__ void cntColoArrayDiag<ulong>(const ulong*,const char*msg,int line,int,int,int,ulong);
template <typename T> __host__ __device__ void prtColoArrayInterval(const T* array,
const char* msg, long n, int sampleElemCount, int sampleCount) {
int step = n / sampleCount;
printf("%s array %p n %ld selems %d samCnt %d step %d\n", msg, array, n, sampleElemCount, sampleCount, step);
printf("array %p[0::%d]\n", array, n);
for(int s = 0; s < n -sampleElemCount; s += step) {
printf(" %d::%d --> ", s, s+ sampleElemCount);
for(int i =0; i < sampleElemCount; i++) {
printf(" %f", (float) array[s + i ]);
if(i < sampleElemCount -1) printf(", ");
}
printf("\n");
}
printf("\n");
}
template __host__ __device__ void prtColoArrayInterval<float>(const float*, const char*,long,int,int);
template __host__ __device__ void prtColoArrayInterval<double>(const double*, const char*,long,int,int);
template __host__ __device__ void prtColoArrayInterval<int>(const int*, const char*,long,int,int);
template __host__ __device__ void prtColoArrayInterval<uint>(const uint*, const char*,long,int,int);
template __host__ __device__ void prtColoArrayInterval<long>(const long*, const char*, long,int,int);
template __host__ __device__ void prtColoArrayInterval<ulong>(const ulong*, const char*, long,int,int);
__host__ void b_util::warmupL() {
outln("warminup");
hipLaunchKernelGGL(( warmup), dim3(1),dim3(1), 0, 0, );
outln("blokin");
checkCudaError(hipDeviceSynchronize());
}
__host__ __device__ const char* b_util::tileDir(TileDirection tileD) {
switch(tileD) {
case tdNeither:
return "tdNeither";
case tdRows:
return "tdRows";
case tdCols:
return "tdCols";
case tdBoth:
return "tdBoth";
default:
return "???";
}
}
__host__ __device__ bool b_util::isPow2(uint x) {
return ((x&(x-1))==0);
}
__host__ __device__ int b_util::threadIdx1D() {
#ifdef __CUDA_ARCH__
return blockIdx.x * blockDim.x * 2 + threadIdx.x;
#else
return false;
#endif
}
__host__ __device__ int b_util::threadIdx1Dblock(uint blocksize) {
#ifdef __CUDA_ARCH__
return blockIdx.x * blocksize * 2 + threadIdx.x;
#else
return false;
#endif
}
__host__ __device__ const char* tdStr(TileDirection td) {
switch (td ){
case tdNeither:
return "tdNeither";
case tdRows:
return "tdRows";
case tdCols:
return "tdCols";
}
return "unknown";
}
__host__ __device__ bool b_util::adjustExpectations(dim3& grid, dim3& block, const hipFuncAttributes& atts) {
uint curBlk = block.x * block.y;
flprintf("curBlk %d\n",curBlk);
float factor = 1.0 * curBlk / atts.maxThreadsPerBlock;
flprintf("factor %f\n",factor);
if(factor > 1) {
flprintf("was block(%d,%d)\n", block.x, block.y);
flprintf("factor %d\n", factor);
if(block.x > block.y) {
block.x /= factor;
grid.x *= factor;
} else {
block.y /= factor;
grid.y *= factor;
}
flprintf("now block(%d,%d)\n", block.x, block.y);
return true;
}
return false;
}
__host__ __device__ bool b_util::onGpuQ() {
#ifdef __CUDA_ARCH__
return true;
#else
return false;
#endif
}
__global__ void devFree(void * mem) {
#ifdef CuMatrix_Enable_Cdp
FirstThread {
cherr(hipFree(mem));
}
#endif
}
__host__ void b_util::freeOnDevice(void * mem) {
hipLaunchKernelGGL(( devFree), dim3(1),dim3(1), 0, 0, mem);
}
__device__ __host__ uint b_util::nextPowerOf2(uint x) {
if (x < 2) {
return 2;
}
x = maskShifts(--x);
return ++x;
}
__device__ __host__ uint b_util::prevPowerOf2(uint x) {
x = maskShifts(x);
return x - (x >> 1);
}
template<> __host__ __device__ float util<float>::epsilon() {
return 1e-6;
}
template<> __host__ __device__ double util<double>::epsilon() {
return 1e-10;
}
template<> __host__ __device__ long util<long>::epsilon() {
return 0;
}
template<> __host__ __device__ ulong util<ulong>::epsilon() {
return 0;
}
template<> __host__ __device__ uint util<uint>::epsilon() {
return 0;
}
template<> __host__ __device__ int util<int>::epsilon() {
return 0;
}
template<typename T> __host__ __device__ T util<T>::minValue() {
#ifndef __CUDA_ARCH__
return numeric_limits<T>::min();
#else
setLastError(notImplementedEx);
return 0;
#endif
}
template <> __host__ __device__ uint util<uint>::minValue() {
return 0;
}
template<typename T> __host__ __device__ bool util<T>::almostEquals(T t1, T t2, T epsilon) {
if(checkDebug(debugVerbose))flprintf("t2 %f - t1 %f < epsilon %f ::abs(t2 - t1) %f (%d)\n",
(double)t2, (double) t1, (double) epsilon,(double) ::fabs(t2 - t1), ::fabs(t2 - t1) < epsilon);
return ::fabs(t2 - t1) < epsilon;
}
template __host__ __device__ bool util<long>::almostEquals(long, long, long);
template __host__ __device__ bool util<unsigned long>::almostEquals(unsigned long, unsigned long, unsigned long);
template __host__ __device__ bool util<double>::almostEquals(double, double, double);
template __host__ __device__ bool util<int>::almostEquals(int, int, int);
template __host__ __device__ bool util<float>::almostEquals(float, float, float);
template __host__ __device__ bool util<unsigned int>::almostEquals(unsigned int, unsigned int, unsigned int);
template<typename T> __host__ __device__ bool util<T>::almostEqualsNormalized(T t1, T t2, T epsilon) {
if(checkDebug(debugVerbose))flprintf("t2 %f - t1 %f < epsilon %f ::abs(t2 - t1) %f (%d)\n",
(double)t2, (double) t1, (double) epsilon,(double) ::fabs(t2 - t1), ::fabs(t2 - t1) < epsilon);
return (::fabs(t2 - t1))/t1 < epsilon;
}
template __host__ __device__ bool util<long>::almostEqualsNormalized(long, long, long);
template __host__ __device__ bool util<unsigned long>::almostEqualsNormalized(unsigned long, unsigned long, unsigned long);
template __host__ __device__ bool util<double>::almostEqualsNormalized(double, double, double);
template __host__ __device__ bool util<int>::almostEqualsNormalized(int, int, int);
template __host__ __device__ bool util<float>::almostEqualsNormalized(float, float, float);
template __host__ __device__ bool util<unsigned int>::almostEqualsNormalized(unsigned int, unsigned int, unsigned int);
template<typename T>__global__ void vectorAddPitch(T *c, const T *a, const T *b, int n, int p) {
uint idx = blockDim.x * blockIdx.x + threadIdx.x;
if (idx < n) {
int tidx = idx * p;
tidx[c] = tidx[a] + tidx[b]; // fun with [] syntax
}
}
template<typename T> __host__ float util<T>::vAddGflops(int device){
int orgDev;
cherr(hipPeekAtLastError());
cherr(hipGetDevice(&orgDev));
if(orgDev != device) {
ExecCaps_visitDevice(device);
}
flprintf("util<T>::vAddGflops(device = %d, %s) set device\n", device, gpuNames[device].c_str());
outln("checking fer dev " << device);
//usedDevMem();
int n = 1000000;
//outln("before mc");
//usedDevMem();
CuMatrix<T> mc = CuMatrix<T>::ones(n,1);
outln("after mc " << mc.toShortString());
T mcsum = mc.sum();
outln("mcsum " << mcsum);
assert(mcsum == (T)n);
//outln("checking fer dev " << device);
//usedDevMem();
//outln("made mc\n " << mc.syncBuffers());
//outln("after mc syncbuffes \n ");
//printColoArrayInterval(mc.elements, n, 10, 40);
//usedDevMem();
CuMatrix<T> m2 = CuMatrix<T>::fill( n,1,(T)2);
//outln("made m2\n " << m2.syncBuffers());
//usedDevMem();
CuMatrix<T> m3 = CuMatrix<T>::zeros( n,1);
outln("made mc " << mc.toShortString() << ", " << m2.toShortString() << ", m3 " << m3.toShortString());
//m3.syncBuffers();
DMatrix<T> dc = mc.asDmatrix();
DMatrix<T> d2 = m2.asDmatrix();
DMatrix<T> d3 = m3.asDmatrix();
outln("after d1-d3");
//usedDevMem();
//flprintf("util<T>::vAddGflops dc.el %p d2.el %p d3.el %p\n", dc.elements, d2.elements, d3.elements);
uint blockSize = 1024;
CuTimer timer;
timer.start();
cherr(hipPeekAtLastError());
MemMgr<T>::checkValid( dc.elements);
MemMgr<T>::checkValid( d2.elements);
MemMgr<T>::checkValid( d3.elements);
outln("all valid d1-d3");
hipLaunchKernelGGL(( vectorAddPitch<T>), dim3(DIV_UP(n,blockSize)), dim3(blockSize), 0, 0, d3.elements, dc.elements, d2.elements, n, mc._tileP);
cherr(hipDeviceSynchronize());
outln("hipDeviceSynchronize after vectorAddPitch");
m3.invalidateHost();
float addTimeMs = timer.stop();
timer.start();
m3.syncBuffers();
//outln("m3 " << m3 );
T m3sum = m3.sum();
outln("m3sum " << m3sum );
assert(m3sum == 3 * n);
float memTimeMs = timer.stop();
// b_util::usedDmem(1);
//printColoArrayInterval(m3.elements, n, 10, 40);
//flprintf("n %u adds took exeTimeMs %f millis (%f s)\n", n, exeTimeMs, exeTimeMs/Kilo);
addTimeMs /= Kilo;
flprintf("n/addTimeMs %f\n", n/addTimeMs);
float nExe = n/addTimeMs;
flprintf("n/extTimeS %f\n", nExe);
flprintf("nExe/Giga %f\n", nExe/Giga);
// one add per result element, so n adds per invocation
if(orgDev != device) {
ExecCaps_restoreDevice(orgDev);
}
return (n/(addTimeMs/Kilo) )/ Giga;
}
template __host__ float util<float>::vAddGflops(int device);
template __host__ float util<double>::vAddGflops(int device);
template __host__ float util<ulong>::vAddGflops(int device);
template<> __host__ __device__ float util<float>::minValue() {
return FLT_MIN;
}
template<> __host__ __device__ double util<double>::minValue() {
return DBL_MIN;
}
template<> __host__ __device__ long util<long>::minValue() {
return 0;
}
template<> __host__ __device__ ulong util<ulong>::minValue() {
return 0;
}
template<> __host__ __device__ int util<int>::minValue() {
return INT_MIN;
}
template<typename T> __host__ __device__ T util<T>::maxValue() {
#ifndef __CUDA_ARCH__
return numeric_limits<T>::max();
#else
setLastError(notImplementedEx);
return 0;
#endif
}
template<> __host__ __device__ float util<float>::maxValue() {
return FLT_MAX;
}
template<> __host__ __device__ double util<double>::maxValue() {
return DBL_MAX;
}
template<> __host__ __device__ long util<long>::maxValue() {
return LONG_MAX;
}
template<> __host__ __device__ ulong util<ulong>::maxValue() {
return 0xffffffff;
}
template<> __host__ __device__ int util<int>::maxValue() {
return INT_MAX;
}
template<> __host__ __device__ uint util<uint>::maxValue() {
return 0xFFFF;
}
/*
namespace mods {
static const char * host = "host";
static const char * device = "device";
static const char * synced = "synced";
static const char * neither = "neither";
}
*/
#define mods_host "host"
#define mods_device "device"
#define mods_synced "synced"
#define mods_neither "neither"
__host__ __device__ const char * b_util::modStr(Modification lastMod) {
switch (lastMod) {
case mod_host:
return "lstmd: " mods_host;
case mod_device:
return "lstmd: " mods_device;
case mod_synced:
return "lstmd: " mods_synced;
case mod_neither:
return "lstmd: " mods_neither;
default:
return "????";
}
}
int b_util::kernelOccupancy( void* kernel, int* maxBlocks, int blockSize) {
ExecCaps* curr = ExecCaps::currCaps();
//hipOccupancyMaxActiveBlocksPerMultiprocessor(maxBlocks, kernel, blockSize,0);
int device = ExecCaps::currDev();
hipDeviceProp_t prop;
int numBlocks;
cherr(hipGetDeviceProperties(&prop, device))
hipOccupancyMaxActiveBlocksPerMultiprocessor(&numBlocks, kernel,
blockSize, 0);
int activeWarps = numBlocks * blockSize / prop.warpSize;
int maxWarps = prop.maxThreadsPerMultiProcessor / prop.warpSize;
//int activeWarps = *maxBlocks * blockSize / curr->deviceProp.warpSize;
//int maxWarps = curr->deviceProp.maxThreadsPerMultiProcessor /curr->deviceProp.warpSize;;
outln("Occupancy " << (double) activeWarps/maxWarps * 100 << "%");
return 0;
}
template<typename T> __host__ __device__ void b_util::pPtrAtts(const T * ptr) {
#ifndef __CUDA_ARCH__
int ptrDev = b_util::getDevice((void*)ptr);
int orgDev = ExecCaps::currDev();
outln("b_util::pPtrAtts( " << ptr << ")");
hipPointerAttribute_t ptrAtts;
if(ptrDev != orgDev) {
ExecCaps_visitDevice(ptrDev);
}
chsuckor(hipPointerGetAttributes(&ptrAtts, ptr), hipErrorInvalidValue);
if(ptrDev != orgDev) {
ExecCaps_restoreDevice(orgDev);
}
stringstream ss;
flprintf("raw %p %s %p, %s %p gpu %d, managed %s, type %s\n",
ptr,
ptrAtts.devicePointer != nullptr ? "dev: " : "",
ptrAtts.devicePointer != nullptr ? ptrAtts.devicePointer : 0,
ptrAtts.hostPointer != nullptr ? "host: " : "",
ptrAtts.hostPointer != nullptr ? ptrAtts.hostPointer : 0,
ptrAtts.device,
tOrF(ptrAtts.isManaged),
ptrAtts.memoryType == hipMemoryTypeHost ? "host"
: ptrAtts.memoryType == hipMemoryTypeDevice ? "device" : "unknown");
#else
flprintf("b_util::pPtrAtts(..) can't call hipPointerGetAttributes() from device, ptr %p\n", ptr );
#endif
}
template __host__ __device__ void b_util::pPtrAtts<UnaryOpIndexF<float,0> const>(UnaryOpIndexF<float,0> const*);
template __host__ __device__ void b_util::pPtrAtts<UnaryOpIndexF<double,0> const>(UnaryOpIndexF<double,0> const*);
template __host__ __device__ void b_util::pPtrAtts<UnaryOpIndexF<unsigned long,0> const>(UnaryOpIndexF<unsigned long,0> const*);
template __host__ __device__ void b_util::pPtrAtts<float (*)(float) >(float (* const *)(float));
template __host__ __device__ void b_util::pPtrAtts<double (*)(double)>(double (* const *)(double));
template __host__ __device__ void b_util::pPtrAtts<unsigned long (*)(unsigned long)>(unsigned long (* const *)(unsigned long));
template __host__ __device__ void b_util::pPtrAtts<void>( const void*);
template __host__ __device__ void b_util::pPtrAtts<constFiller<float> >( const constFiller<float>*);
template __host__ __device__ void b_util::pPtrAtts<constFiller<double> >( const constFiller<double>*);
template __host__ __device__ void b_util::pPtrAtts<constFiller<unsigned long> >( const constFiller<unsigned long>*);
template __host__ __device__ void b_util::pPtrAtts<float const>(float const*);
template __host__ __device__ void b_util::pPtrAtts<ulong const>(ulong const*);
template __host__ __device__ void b_util::pPtrAtts<double const>(double const*);
template __host__ __device__ void b_util::pPtrAtts<float>( const float*);
template __host__ __device__ void b_util::pPtrAtts<double>( const double*);
template __host__ __device__ void b_util::pPtrAtts<int>( const int*);
template __host__ __device__ void b_util::pPtrAtts<uint>( const uint*);
template __host__ __device__ void b_util::pPtrAtts<long>( const long*);
template __host__ __device__ void b_util::pPtrAtts<ulong>( const ulong*);
__host__ __device__ void b_util::pFuncPtrAtts( const void * ptr) {
//#ifndef __CUDA_ARCH__
#ifdef CuMatrix_Enable_Cdp
struct hipFuncAttributes fa;
cherr(hipFuncGetAttributes(&fa, ptr));
flprintf("\n\tfa.binaryVersion %d\n\tfa.cacheModeCA %d\n\tfa.constSizeBytes %d\n\tfa.localSizeBytes %d\n\t,fa.maxThreadsPerBlock %d\n\tfa.numRegs %d\n\tptxVersion %d\n\tsharedSizeBytes %d\n",
fa.binaryVersion,fa.cacheModeCA,fa.constSizeBytes,fa.localSizeBytes,fa.maxThreadsPerBlock,fa.numRegs,fa.ptxVersion,fa.sharedSizeBytes);
#endif
/*
#else
flprintf("can't call hipPointerGetAttributes() from device, ptr %p\n", ptr );
#endif
*/
}
template<typename T> __host__ __device__ enum hipMemcpyKind b_util::copyKind( T * dst, T const * const src ) {
#ifndef __CUDA_ARCH__
int dstDevice = b_util::getDevice((void*)dst);
int srcDevice = b_util::getDevice((void*)src);
int orgDev = ExecCaps::currDev();
outln("b_util::copyKind( dst " << dst << ", src " << src << ") orgDev " << orgDev << ", dstDev " << dstDevice << ", srcDev " << srcDevice);
hipPointerAttribute_t padst, pasrc;
if(dstDevice != orgDev) {
outln("b_util::copyKind() visiting dstDevice " << dstDevice);
ExecCaps_visitDevice(dstDevice);
}
chsuckor(hipPointerGetAttributes(&padst, dst), hipErrorInvalidValue);
if(dstDevice != orgDev) {
outln("b_util::copyKind() restoring orgDev " << orgDev);
ExecCaps_restoreDevice(orgDev);
}
if(srcDevice != orgDev) {
outln("b_util::copyKind() visiting srcDevice " << srcDevice);
ExecCaps_visitDevice(srcDevice);
}
chsuckor(hipPointerGetAttributes(&pasrc, src), hipErrorInvalidValue);
if(srcDevice != orgDev) {
outln("b_util::copyKind() restoring orgDev " << orgDev);
ExecCaps_restoreDevice(orgDev);
}
outln("b_util::copyKind() pasrc.memoryType " << pasrc.memoryType << "; padst.memoryType " << padst.memoryType);
if(pasrc.memoryType == hipMemoryTypeHost) {
if(padst.memoryType == hipMemoryTypeHost)
return hipMemcpyHostToHost;
else
return hipMemcpyHostToDevice;
}else {
if(padst.memoryType == hipMemoryTypeHost)
return hipMemcpyDeviceToHost;
else
return hipMemcpyDeviceToDevice;
}
#else
flprintf("b_util<T>::copyType(..) can't call hipPointerGetAttributes() from device, dst %p\n", dst );
return hipMemcpyDefault; // kind inferred from pointer; for managed buffers
#endif
}
template __host__ __device__ enum hipMemcpyKind b_util::copyKind<float>(float*, float const*);
template __host__ __device__ enum hipMemcpyKind b_util::copyKind<double>(double*, double const*);
template __host__ __device__ enum hipMemcpyKind b_util::copyKind<unsigned long>(unsigned long*, unsigned long const*);
__host__ __device__ int b_util::maxBlockThreads(const void * ptr) {
struct hipFuncAttributes fa;
cherr(hipFuncGetAttributes(&fa, ptr));
return fa.maxThreadsPerBlock;
}
__host__ CUDART_DEVICE bool b_util::validLaunchQ( const void* pKernel, dim3 grid, dim3 block) {
hipFuncAttributes fatts;
cherr(hipFuncGetAttributes(&fatts, pKernel));
uint blockThreads = block.x * block.y * block.z;
flprintf("blockThreads %u\n", blockThreads);
if(blockThreads > fatts.maxThreadsPerBlock) {
flprintf("kernel @ %p unlaunchable: blockThreads %u > fatts.maxThreadsPerBlock %u\n", pKernel, blockThreads, fatts.maxThreadsPerBlock);
return false;
}
ExecCaps* pCaps = ExecCaps::currCaps();
uint gridVol = grid.x * grid.y * grid.z;
flprintf("gridVol %u\n", gridVol);
uint blockRegs = fatts.numRegs * blockThreads;
flprintf("blockRegs %u\n", blockRegs);
if(blockRegs > pCaps->regsPerBlock) {
flprintf("kernel @ %p unlaunchable: blockRegs %u > pCaps->regsPerBlock %u, gridVol %u\n", pKernel, blockRegs, pCaps->regsPerBlock, gridVol);
return false;
}
if(fatts.sharedSizeBytes > pCaps->memSharedPerBlock) {
flprintf("kernel @ %p unlaunchable: fatts.sharedSizeBytes %u > pCaps->memSharedPerBlock %u\n", pKernel, fatts.sharedSizeBytes, pCaps->memSharedPerBlock);
return false;
}
return true;
}
__host__ CUDART_DEVICE void b_util::validLaunch( dim3 & block, const void* pKernel) {
hipFuncAttributes fatts;
uint minF = MIN( block.x, MIN(block.y, block.z));
uint maxF = MAX( block.x, MAX(block.y, block.z));
uint blocks = block.x * block.y * block.z;
uint otherF = blocks / (minF * maxF);
hipFuncGetAttributes(&fatts, pKernel);
if(fatts.maxThreadsPerBlock < blocks) {
double factor = 1.0 * blocks/ fatts.maxThreadsPerBlock;
if(factor < maxF) {
maxF = (uint) maxF/ factor;
}
block.x = maxF; block.y = otherF; block.z = minF;
}
}
__host__ __device__ void b_util::prd3(const dim3& d3,const char* msg) {
if(msg)
printf("%s (%u,%u,%u)\n", msg, d3.x, d3.y, d3.z);
else
printf("(%u,%u,%u)\n", d3.x, d3.y, d3.z);
}
#ifdef CuMatrix_NVML
__host__ void b_util::enableGpuMode() {
//nvmlDeviceSetGpuOperationMode();
}
#endif
/* for rows of n<65
each warpsized sublock (should) reduces multiple spans of 64 columns
? for given matrix dimension, how many warp-spanning rows will there be?
for x.n factorOf WARP_SIZE -> 0
for x.n < ws
for ws % x.n != 0,
for primeQ(x.n) there are x.n-1 spanrows per ws*x.n threads
for !primeQ(x.n) there are x.n/largestCommonFactor(x.n, ws) - 1 spanrows
for every ws * x.n/largestCommonFactor(x.n, ws) warps
for m*n threads, there can be at most totalWarps -1 spanrows ( totalWarps = DIV_UP(threads, warpSize))
*/
__host__ CUDART_DEVICE int b_util::countSpanrows( int m, int n, uint warpSize ) {
uint num = MAX(n,warpSize), den = MIN(n,warpSize);
int div = num/den;
if( div* den == num) {
//flprintf("div %d * num %d (== %d)\n", div, num, (div * num));
return 0;
}
uint sf = smallestFactor(n);
uint warps = DIV_UP(m * n,warpSize);
//flprintf("sf %u n/sf %u warps %d\n", sf, n/sf, warps);
uint factor = sf == n ? n : n/sf;
return warps * (factor-1)/factor;
//flprintf("factor (%u) s.t. (uint) ( m * (1. * (factor-1)/(factor))) == %u\n", factor, sr);
}
__host__ __device__ bool b_util::spanrowQ( int row, int n, uint warpSize) {
#ifdef __CUDA_ARCH__
return ::spanrowQ(row, n);
#else
uint warpS = row * n / warpSize;
uint warpE = (row + 1 ) * n / warpSize;
return warpS != warpE;
#endif
}
template<typename T> __host__ __device__ void util<T>::prdm(const char* msg, const DMatrix<T>& md) {
printf("%s d: %p (%u*%u*%u)", msg, md.elements, md.m,md.n,md.p);
}
template __host__ __device__ void util<float>::prdm(const char*,const DMatrix<float>& md);
template __host__ __device__ void util<double>::prdm(const char*,const DMatrix<double>& md);
template __host__ __device__ void util<long>::prdm(const char*,const DMatrix<long>& md);
template __host__ __device__ void util<ulong>::prdm(const char*,const DMatrix<ulong>& md);
template __host__ __device__ void util<uint>::prdm(const char*,const DMatrix<uint>& md);
template __host__ __device__ void util<int>::prdm(const char*,const DMatrix<int>& md);
template<typename T> __host__ __device__ void util<T>::prdmln(const char* msg, const DMatrix<T>& md) {
printf("%s d: %p (%u*%u*%u)\n", msg, md.elements, md.m,md.n,md.p);
}
template __host__ __device__ void util<float>::prdmln(const char*,const DMatrix<float>& md);
template __host__ __device__ void util<double>::prdmln(const char*,const DMatrix<double>& md);
template __host__ __device__ void util<long>::prdmln(const char*,const DMatrix<long>& md);
template __host__ __device__ void util<ulong>::prdmln(const char*,const DMatrix<ulong>& md);
template __host__ __device__ void util<uint>::prdmln(const char*,const DMatrix<uint>& md);
template __host__ __device__ void util<int>::prdmln(const char*,const DMatrix<int>& md);
template<typename T> __host__ __device__ void util<T>::printDm( const DMatrix<T>& dm , const char* msg) {
uint size = dm.m*dm.p;
printf("%s (%d*%d*%d) %d &dmatrix=%p elements %p\n",msg,dm.m,dm.n,dm.p,size, &dm,dm.elements);
T * elems = NULL;
#ifndef __CUDA_ARCH__
if(dm.elements) {
checkCudaError(hipHostMalloc(&elems, size,0));
CuTimer timer;
timer.start();
checkCudaError(hipMemcpy(elems,dm.elements, size, hipMemcpyDeviceToHost));
//CuMatrix<T>::incDhCopy("util<T>::printDm-" + b_util::caller(),size,timer.stop() );
}
#else
elems = dm.elements;
#endif
if(!elems) {
printf("printDm nothing to see here\n");
return;
}
bool header = false;
if (checkDebug(debugVerbose) || (dm.m < CuMatrix<T>::getMaxRowsDisplayed() && dm.n < CuMatrix<T>::getMaxColsDisplayed())) {
for (uint i1 = 0; i1 < dm.m; i1++) {
if(!header) {
printf("-");
for (uint j1 = 0; j1 < dm.n; j1++) {
if(j1 % 10 == 0) {
printf(" %d", j1/10);
}else {
printf(" ");
}
printf(" ");
}
printf("\n");
header = true;
}
printf("[");
for (uint j1 = 0; j1 < dm.n; j1++) {
if(sizeof(T) == 4)
printf("% 2.10g", elems[i1 * dm.p + j1]); //get(i1,j1) );
else
printf("% 2.16g", elems[i1 * dm.p + j1]); // get(i1,j1) );
//);
if (j1 < dm.n - 1) {
printf(" ");
}
}
printf("] ");
if(i1 % 10 == 0) {
printf("%d", i1);
}
printf("\n");
}
if(header) {
printf("+");
for (uint j1 = 0; j1 < dm.n; j1++) {
if(j1 % 10 == 0) {
printf(" %d",j1/10);
}else {
printf(" ");
}
printf(" ");
}
printf("\n");
header = false;
}
} else {
for (uint i2 = 0; i2 < CuMatrix<T>::getMaxRowsDisplayed() + 1 && i2 < dm.m; i2++) {
if (i2 == CuMatrix<T>::getMaxRowsDisplayed()) {
printf(".\n.\n.\n");
continue;
}
for (uint j2 = 0; j2 < CuMatrix<T>::getMaxColsDisplayed() + 1 && j2 < dm.n; j2++) {
if (j2 == CuMatrix<T>::getMaxColsDisplayed()) {
printf("...");
continue;
}
if(sizeof(T) == 4)
printf("% 2.10g", elems[i2 * dm.p + j2]); //get(i2,j2));
else
printf("% 2.16g", elems[i2 * dm.p + j2]); //get(i2,j2));
//elements[i2 * p + j2]);
if (j2 < dm.n - 1) {
printf(" ");
}
}
printf("\n");
}
if (dm.m > CuMatrix<T>::getMaxRowsDisplayed()) {
for (uint i3 =dm.m - CuMatrix<T>::getMaxRowsDisplayed(); i3 < dm.m; i3++) {
if (dm.n > CuMatrix<T>::getMaxColsDisplayed()) {
for (uint j3 = dm.n - CuMatrix<T>::getMaxColsDisplayed(); j3 < dm.n; j3++) {
if (j3 == dm.n - CuMatrix<T>::getMaxColsDisplayed()) {
printf("...");
continue;
}
if(sizeof(T) == 4)
printf("% 2.10g", elems[i3 * dm.p + j3]);//get(i3, j3));
else
printf("% 2.16g", elems[i3 * dm.p + j3]); //get(i3,j3));
//elements[i3 * p + j3]);
if (j3 < dm.n - 1) {
printf(" ");
}
}
} else {
for (uint j4 = 0; j4 < dm.n; j4++) {
if(sizeof(T) == 4)
printf("% 2.10g", elems[i3 * dm.p + j4]); // get(i3,j4));
else
printf("% 2.16g", elems[i3 * dm.p + j4]); //get(i3,j4));
//elements[i3 * p + j4]);
if (j4 < dm.n - 1) {
printf(" ");
}
}
}
printf("\n");
}
} else { //if(dm.m > 10) -> dm.n > 10
for (uint i5 = 0; i5 < CuMatrix<T>::getMaxRowsDisplayed() + 1 && i5 < dm.m; i5++) {
if (dm.n > CuMatrix<T>::getMaxColsDisplayed()) {
for (uint j5 = dm.n - CuMatrix<T>::getMaxColsDisplayed(); j5 < dm.n; j5++) {
if (j5 == dm.n - CuMatrix<T>::getMaxColsDisplayed()) {
printf("...");
continue;
}
T t = elems[i5 * dm.p + j5];
if(sizeof(T) == 4)
printf("% 2.10g", t);
else
printf("% 2.16g", t);
if (j5 < dm.n - 1) {
printf(" ");
}
}
} else {
for (uint j4 = 0; j4 < dm.n; j4++) {
if(sizeof(T) == 4)
printf("% 2.10g", elems[i5 * dm.p + j4]); //get(i5,j4));
else
printf("% 2.16g", elems[i5 * dm.p + j4]); //get(i5,j4));
if (j4 < dm.n - 1) {
printf(" ");
}
}
}
printf("\n");
}
}
}
#ifndef __CUDA_ARCH__
if(elems) {
flprintf("freeing host elems %p\n", elems);
checkCudaErrors(hipHostFree(elems));
}
#endif
}
template void util<float>::printDm(DMatrix<float> const&,const char*);
template void util<double>::printDm(DMatrix<double> const&,const char*);
template void util<ulong>::printDm(DMatrix<ulong> const&,const char*);
template void util<uint>::printDm(DMatrix<uint> const&,const char*);
template void util<int>::printDm(DMatrix<int> const&,const char*);
template<typename T> __host__ __device__ void util<T>::printRow(const DMatrix<T>& dm, int row) {
prlocf("printRow\n");
if(!dm.elements) {
printf("row %d: null elements\n", row);
return;
}
ulong idx = row * dm.p;
T* elems;
#ifndef __CUDA_ARCH__
uint rowSize = dm.n*sizeof(T);
checkCudaError(hipHostMalloc(&elems, rowSize,0));
checkCudaError(hipMemcpy(elems, dm.elements + idx, rowSize, hipMemcpyHostToDevice));
#else
elems = dm.elements;
#endif
printf("row %d: ", row);
for(int c = 0; c < dm.n; c++) {
printf("%5.2f ", elems[idx + c]);
}
printf("\n");
#ifndef __CUDA_ARCH__
flprintf("freeing host elems %p\n", elems);
checkCudaError(hipHostFree(elems));
#endif
}
template void util<float>::printRow(DMatrix<float> const&,int);
template void util<double>::printRow(DMatrix<double> const&,int);
template void util<long>::printRow(DMatrix<long> const&,int);
template void util<ulong>::printRow(DMatrix<ulong> const&,int);
template void util<int>::printRow(DMatrix<int> const&,int);
template void util<uint>::printRow(DMatrix<uint> const&,int);
__host__ __device__ void b_util::vectorExecContext(int threads, int count, dim3& dBlocks,
dim3& dThreads, const void* kernel) {
if (threads % WARP_SIZE != 0) {
printf("WARN: %d is not a multiple of the warp size (32)\n",threads);
}
int blocks = DIV_UP(count, threads);
dThreads.y = dThreads.z = 1;
dThreads.x = threads;
if(kernel) {
validLaunch(dThreads,kernel);
if(dThreads.x != threads) {
blocks = DIV_UP(count, dThreads.x);
}
}
dBlocks.y = dBlocks.z = 1;
dBlocks.x = blocks;
#ifndef __CUDA_ARCH__
totalThreads += dBlocks.x * dThreads.x;
totalElements += count;
#endif
if (checkDebug(debugExec))
printf(
"contxt of %d blks of %d threads for count of %d\n", dBlocks.x, dThreads.x,count);
}
template<typename T> __host__ __device__ void util<T>::pDarry(const T* arry, int cnt) {
for(int i = 0; i < cnt; i++) {
printf("%f ", arry[i]);
}
printf("\n");
}
template<> __host__ __device__ void util<uint>::pDarry(const uint* arry, int cnt) {
uint* ptr = (uint*)arry;
#ifndef __CUDA_ARCH__
checkCudaError(hipHostMalloc(&ptr, cnt * sizeof(uint)));
CuTimer timer;
timer.start();
checkCudaError(hipMemcpy(ptr, arry, cnt*sizeof(uint), hipMemcpyDeviceToHost));
//CuMatrix<uint>::incDhCopy("util<uint>::pDarry-" + b_util::caller() , cnt*sizeof(uint),timer.stop());
#endif
for(int i = 0; i < cnt; i++) {
printf("%.3f ", (double) ptr[i]);
}
printf("\n");
#ifndef __CUDA_ARCH__
flprintf("freeing host ptr %p\n", ptr);
checkCudaError(hipHostFree(ptr));
#endif
}
template __host__ __device__ void util<float>::pDarry(const float*, int);
template __host__ __device__ void util<double>::pDarry(const double*, int);
template __host__ __device__ void util<ulong>::pDarry(const ulong*, int);
template __host__ __device__ void util<int>::pDarry(const int*, int);
template <typename T> __host__ __device__ void util<T>::fillNDev(T* trg, T val, long n) {
int threads = 512;
dim3 dBlocks, dThreads;
b_util::vectorExecContext(threads, n, dBlocks, dThreads);
if(checkDebug(debugCheckValid)) {
flprintf("currdev %d -- trg %p val %.5f N = %d\n", ExecCaps::currDev(), trg,val,n);
MemMgr<T>::checkValid(trg, "fillNDev start");
MemMgr<T>::checkValid(trg + n/2, "fillNDev half/N");
MemMgr<T>::checkValid(trg + 3* n/4, "fillNDev 3N/4");
MemMgr<T>::checkValid(trg + n-1, "fillNDev N-1");
//MemMgr<T>::checkRange(trg, n-1, "fillNDev range of N");
}
hipLaunchKernelGGL(( fillKernel), dim3(dBlocks),dim3(dThreads), 0, 0, trg, val, n);
cherr(hipDeviceSynchronize());
}
template void util<float>::fillNDev(float*, float, long);
template void util<double>::fillNDev(double*, double, long);
template void util<ulong>::fillNDev(ulong*, ulong, long);
template void util<long>::fillNDev(long*, long, long);
template void util<uint>::fillNDev(uint*, uint, long);
template void util<int>::fillNDev(int*, int, long);
//////////////////////////
//
// IndexArray
//
//////////////////////////
//
__host__ __device__ IndexArray::IndexArray() :
indices(null), count(0), owner(true) {
}
__host__ __device__ IndexArray::IndexArray(const IndexArray & o) :
indices(o.indices), count(o.count), owner(false) {
}
__host__ __device__ IndexArray::IndexArray(uint* _indices, uint _count, bool _owner) :
indices(_indices), count(_count), owner(_owner) {
}
__host__ __device__ IndexArray::IndexArray(uint idx1, uint idx2) :
count(2), owner(true) {
indices = new uint[2];
indices[0] = idx1;
indices[1] = idx2;
}
/*
intPair IndexArray::toPair() const {
assert(count == 2);
return intPair(indices[0], indices[1]);
}
*/
__host__ __device__ IndexArray::~IndexArray() {
if (indices != null && owner) {
delete[] indices;
}
}
string IndexArray::toString(bool lf) const {
stringstream ssout;
ssout << "IdxArr" << (owner ? "+" : "-") << "(" << count << ")[ \n";
for (uint i = 0; i < count; i++) {
ssout << indices[i];
if (i < count - 1) {
ssout << (lf ? ( i % 50 == 0 ? "----\n" : "\n") : ", ");
}
}
ssout << " ]";
return ssout.str();
}
__host__ __device__ void b_util::expNotation(char* buff, long val) {
#ifndef __CUDA_ARCH__
double factor = 1.;
if (val >= Giga) {
factor = 1. / Giga;
sprintf(buff, "%2.3gGb", (double) val * factor);
} else if (val >= Mega) {
factor = 1. / Mega;
sprintf(buff, "%2.3gMb", (double)val * factor);
} else if (val >= Kilo) {
factor = 1. / Kilo;
sprintf(buff, "%2.3gKb", (double)val * factor);
} else {
sprintf(buff, "%2.3gb", (double)val * factor);
}
#endif
}
__host__ void b_util::expNotationMem(char* buff, long val) {
double factor = 1.;
if (val >= Terab) {
factor = 1. / Terab;
sprintf(buff, "%2.3gTb", (double) val * factor);
} else if (val >= Gigab) {
factor = 1. / Gigab;
sprintf(buff, "%2.3gGb", (double) val * factor);
} else if (val >= Megab) {
factor = 1. / Megab;
sprintf(buff, "%2.3gMb", (double)val * factor);
} else if (val >= Kilob) {
factor = 1. / Kilob;
sprintf(buff, "%2.3gKb", (double)val * factor);
} else {
sprintf(buff, "%2.3gb", (double)val * factor);
}
}
__host__ string b_util::expNotationMemStr( long val) {
char buff[256];
expNotationMem(buff, val);
stringstream ss;
ss << buff;
return ss.str();
}
__host__ CUDART_DEVICE double b_util::currMemRatio( ) {
size_t freeMemory =1, totalMemory =1;
cherr( hipMemGetInfo(&freeMemory, &totalMemory));
return 100 * (1 - freeMemory * 1. / totalMemory);
}
__host__ CUDART_DEVICE double b_util::usedMemRatio(bool allDevices) {
//outln("b_util::usedMemRatio("<< tOrF(allDevices) << ") ent");
//b_util::dumpStack();
size_t freeMemory =1, totalMemory =1;
if(allDevices) {
#ifndef __CUDA_ARCH__
ExecCaps::allGpuMem(&freeMemory, &totalMemory);
#endif
}
else {
hipMemGetInfo(&freeMemory, &totalMemory);
}
return 100 * (1 - freeMemory * 1. / totalMemory);
}
__host__ CUDART_DEVICE void b_util::usedDmem(bool allDevices) {
//outln("b_util::usedDmem("<< tOrF(allDevices) << ") ent");
#ifndef __CUDA_ARCH__
flprintf("Memory %.3f%% used\n", usedMemRatio(allDevices));
#else
flprintf("Memory %.3f%% used\n", usedMemRatio(false));
#endif
}
__host__ __device__ void b_util::_checkCudaError(const char* file, int line, hipError_t val) {
if (val != hipSuccess) {
printf("CuMatrixException (%d) %s at %s : %d\n",val ,__cudaGetErrorEnum(val),file, line);
#ifndef __CUDA_ARCH__
// cout << print_stacktrace() << endl;
#endif
assert(false);
}
}
template <> __host__ __device__ float sqrt_p(float val) {
return sqrtf(val);
}
template <> __host__ __device__ double sqrt_p(double val) {
return sqrt(val);
}
template <> __host__ __device__ long sqrt_p(long val) {
return (long)sqrtf(val);
}
template <> __host__ __device__ ulong sqrt_p(ulong val) {
return (ulong)sqrtf(val);
}
template <> __host__ __device__ int sqrt_p(int val) {
return (int)sqrtf((float)val);
}
template <> __host__ __device__ uint sqrt_p(uint val) {
return (uint)sqrtf((float)val);
}
template<typename T> __host__ __device__ void prry(T* ry) {
}
template <> template <> __host__ __device__ void Packeur<double4>::packNext<double>(
double h) {
switch (currIdx) {
case 0:
target.x = h;
break;
case 1:
target.y = h;
break;
case 2:
target.z = h;
break;
case 3:
target.w = h;
break;
default:
setLastError(outOfBoundsEx);
return;
}
currIdx++;
}
template<> template <> __host__ __device__ void Packeur<double3>::packNext<double>(
double h) {
switch (currIdx) {
case 0:
target.x = h;
break;
case 1:
target.y = h;
break;
case 2:
target.z = h;
break;
default:
setLastError(outOfBoundsEx);
return;
}
currIdx++;
}
template<> template <> __host__ __device__ void Packeur<int3>::packNext<int>(
int h) {
switch (currIdx) {
case 0:
target.x = h;
break;
case 1:
target.y = h;
break;
case 2:
target.z = h;
break;
default:
setLastError(outOfBoundsEx);
return;
}
currIdx++;
}
template<> template <> __host__ __device__ void Packeur<uint3>::packNext<uint>(
uint h) {
switch (currIdx) {
case 0:
target.x = h;
break;
case 1:
target.y = h;
break;
case 2:
target.z = h;
break;
default:
setLastError(outOfBoundsEx);
return;
}
currIdx++;
}
template<> __host__ __device__ void Packeur2::render<double4>(double4& target) {
switch (currIdx) {
case 1:
target.x = vals[0];
break;
case 2:
target.x = vals[0];
target.y = vals[1];
break;
case 3:
target.x = vals[0];
target.y = vals[1];
target.z = vals[2];
break;
case 4:
target.x = vals[0];
target.y = vals[1];
target.z = vals[2];
target.w = vals[3];
break;
default:
setLastError(outOfBoundsEx);
return;
}
}
template<typename T> template <typename PackType> __host__ __device__ PackType Packeur3<T>::render() {
if(sizeof(T) == 4) {
switch (currIdx) {
case 1:
return vals[0];
case 2:
return render<float2>();
case 3:
return render<float3>();
case 4:
return render<float4>();
default:
setLastError(outOfBoundsEx);
}
}else {
switch (currIdx) {
case 1:
return vals[0];
case 2:
return render<double2>();
case 3:
return render<double3>();
case 4:
return render<double4>();
default:
setLastError(outOfBoundsEx);
}
}
}
template<> template<> __host__ __device__ double4 Packeur3<double>::render() {
double4 target;
switch (currIdx) {
case 1:
target.x = vals[0];
break;
case 2:
target.x = vals[0];
target.y = vals[1];
break;
case 3:
target.x = vals[0];
target.y = vals[1];
target.z = vals[2];
break;
case 4:
target.x = vals[0];
target.y = vals[1];
target.z = vals[2];
target.w = vals[3];
break;
default:
setLastError(outOfBoundsEx);
}
return target;
}
template<> template<> __host__ __device__ float4 Packeur3<float>::render() {
float4 target;
switch (currIdx) {
case 1:
target.x = vals[0];
break;
case 2:
target.x = vals[0];
target.y = vals[1];
break;
case 3:
target.x = vals[0];
target.y = vals[1];
target.z = vals[2];
break;
case 4:
target.x = vals[0];
target.y = vals[1];
target.z = vals[2];
target.w = vals[3];
break;
default:
setLastError(outOfBoundsEx);
}
return target;
}
template<> template<> __host__ __device__ ulong4 Packeur3<ulong>::render() {
ulong4 target;
switch (currIdx) {
case 1:
target.x = vals[0];
break;
case 2:
target.x = vals[0];
target.y = vals[1];
break;
case 3:
target.x = vals[0];
target.y = vals[1];
target.z = vals[2];
break;
case 4:
target.x = vals[0];
target.y = vals[1];
target.z = vals[2];
target.w = vals[3];
break;
default:
setLastError(outOfBoundsEx);
}
return target;
}
| 7219d60c534cec8be55b3d4448fecdf4f27f483a.cu | #include "util.h"
#include "caps.h"
#include <string>
#include <sstream>
#include "DMatrix.h"
#include "CuMatrix.h"
#include <float.h>
#include <limits>
#include "Maths.h"
#include "Kernels.h"
using std::numeric_limits;
template <typename T> __host__ __device__ void printColoArray(const T* array, int n, int direction) {
#ifndef __CUDA_ARCH__
printf("caller %s\n", b_util::caller().c_str());
#endif
flprintf("array %p[0::%d] ", array, n);
for(int i =0; i < n; i++) {
printf("%f", (float) array[i]);
if(i < n -1) printf(", ");
}
printf("\n");
}
template __host__ __device__ void printColoArray<float>(const float*,int,int);
template __host__ __device__ void printColoArray<double>(const double*,int,int);
template __host__ __device__ void printColoArray<int>(const int*,int,int);
template __host__ __device__ void printColoArray<uint>(const uint*,int,int);
template __host__ __device__ void printColoArray<long>(const long*, int,int);
template __host__ __device__ void printColoArray<ulong>(const ulong*, int,int);
template <typename T> __host__ __device__ void prtColoArrayDiag(
const T* array,const char*msg,int line, int pitch,int n, int direction, T notEq) {
flprintf("%s:%d h arraydiag %p p:%d[0::%d] notEq %f\n", msg, line, array, pitch, n, notEq);
int neqCnt = 0;
int idxFb= -1;
const T* firstBad = nullptr;
for(int i =0; i < n; i++) {
if(!notEq || array[i * (pitch + 1)] != notEq) {
printf("%p + %d (%p) = %f != %f", array, direction*i, array + direction * i * (pitch + 1), (float) array[i * (pitch + 1)], notEq);
if(i < n -1) printf(", ");
if(notEq) { idxFb = i; firstBad = array + i * (pitch + 1); neqCnt++; }
}
}
if(!neqCnt)
flprintf("\nfound none != %f\n",notEq);
else {
flprintf("found %d unexpected values starting at %p idx %d\n", neqCnt, firstBad,idxFb);
}
assert(neqCnt == 0);
}
template __host__ __device__ void prtColoArrayDiag<float>(const float*,const char*msg,int line,int,int,int,float);
template __host__ __device__ void prtColoArrayDiag<double>(const double*,const char*msg,int line,int,int,int,double);
template __host__ __device__ void prtColoArrayDiag<int>(const int*,const char*msg,int line,int,int,int,int);
template __host__ __device__ void prtColoArrayDiag<uint>(const uint*,const char*msg,int line,int,int,int,uint);
template __host__ __device__ void prtColoArrayDiag<long>(const long*,const char*msg,int line,int,int,int,long);
template __host__ __device__ void prtColoArrayDiag<ulong>(const ulong*,const char*msg,int line,int,int,int,ulong);
template <typename T> __host__ __device__ void cntColoArrayDiag(
const T* array,const char*msg,int line, int pitch,int n, int direction, T test) {
flprintf("%s:%d h arraydiag %p p:%d[0::%d] test %f\n", msg, line, array, pitch, n, test);
int neqCnt = 0;
int idxNeq= -1;
int eqCnt = 0;
int idxEq= -1;
const T* firstNeq = nullptr,* firstEq = nullptr;
for(int i =0; i < n; i++) {
if(!test || array[i * (pitch + 1)] != test) {
//printf("%p + %d (%p) = %f != %f", array, direction*i, array + direction * i * (pitch + 1), (float) array[i * (pitch + 1)], notEq);
if(test && firstNeq == nullptr) { idxNeq = i; firstNeq= array + i * (pitch + 1); }
neqCnt++;
}else {
if(test&& firstEq == nullptr) { idxEq = i; firstEq = array + i * (pitch + 1); }
eqCnt++;
}
}
flprintf("\nfound %d neq %f, %d eq out of %d; first neq @ %p idx %d first eq %p idx %d\n", neqCnt, test, eqCnt, n, firstNeq,idxNeq,firstEq,idxEq);
}
template __host__ __device__ void cntColoArrayDiag<float>(const float*,const char*msg,int line,int,int,int,float);
template __host__ __device__ void cntColoArrayDiag<double>(const double*,const char*msg,int line,int,int,int,double);
template __host__ __device__ void cntColoArrayDiag<int>(const int*,const char*msg,int line,int,int,int,int);
template __host__ __device__ void cntColoArrayDiag<uint>(const uint*,const char*msg,int line,int,int,int,uint);
template __host__ __device__ void cntColoArrayDiag<long>(const long*,const char*msg,int line,int,int,int,long);
template __host__ __device__ void cntColoArrayDiag<ulong>(const ulong*,const char*msg,int line,int,int,int,ulong);
template <typename T> __host__ __device__ void prtColoArrayInterval(const T* array,
const char* msg, long n, int sampleElemCount, int sampleCount) {
int step = n / sampleCount;
printf("%s array %p n %ld selems %d samCnt %d step %d\n", msg, array, n, sampleElemCount, sampleCount, step);
printf("array %p[0::%d]\n", array, n);
for(int s = 0; s < n -sampleElemCount; s += step) {
printf(" %d::%d --> ", s, s+ sampleElemCount);
for(int i =0; i < sampleElemCount; i++) {
printf(" %f", (float) array[s + i ]);
if(i < sampleElemCount -1) printf(", ");
}
printf("\n");
}
printf("\n");
}
template __host__ __device__ void prtColoArrayInterval<float>(const float*, const char*,long,int,int);
template __host__ __device__ void prtColoArrayInterval<double>(const double*, const char*,long,int,int);
template __host__ __device__ void prtColoArrayInterval<int>(const int*, const char*,long,int,int);
template __host__ __device__ void prtColoArrayInterval<uint>(const uint*, const char*,long,int,int);
template __host__ __device__ void prtColoArrayInterval<long>(const long*, const char*, long,int,int);
template __host__ __device__ void prtColoArrayInterval<ulong>(const ulong*, const char*, long,int,int);
__host__ void b_util::warmupL() {
outln("warminup");
warmup<<<1,1>>>();
outln("blokin");
checkCudaError(cudaDeviceSynchronize());
}
__host__ __device__ const char* b_util::tileDir(TileDirection tileD) {
switch(tileD) {
case tdNeither:
return "tdNeither";
case tdRows:
return "tdRows";
case tdCols:
return "tdCols";
case tdBoth:
return "tdBoth";
default:
return "???";
}
}
__host__ __device__ bool b_util::isPow2(uint x) {
return ((x&(x-1))==0);
}
__host__ __device__ int b_util::threadIdx1D() {
#ifdef __CUDA_ARCH__
return blockIdx.x * blockDim.x * 2 + threadIdx.x;
#else
return false;
#endif
}
__host__ __device__ int b_util::threadIdx1Dblock(uint blocksize) {
#ifdef __CUDA_ARCH__
return blockIdx.x * blocksize * 2 + threadIdx.x;
#else
return false;
#endif
}
__host__ __device__ const char* tdStr(TileDirection td) {
switch (td ){
case tdNeither:
return "tdNeither";
case tdRows:
return "tdRows";
case tdCols:
return "tdCols";
}
return "unknown";
}
__host__ __device__ bool b_util::adjustExpectations(dim3& grid, dim3& block, const cudaFuncAttributes& atts) {
uint curBlk = block.x * block.y;
flprintf("curBlk %d\n",curBlk);
float factor = 1.0 * curBlk / atts.maxThreadsPerBlock;
flprintf("factor %f\n",factor);
if(factor > 1) {
flprintf("was block(%d,%d)\n", block.x, block.y);
flprintf("factor %d\n", factor);
if(block.x > block.y) {
block.x /= factor;
grid.x *= factor;
} else {
block.y /= factor;
grid.y *= factor;
}
flprintf("now block(%d,%d)\n", block.x, block.y);
return true;
}
return false;
}
__host__ __device__ bool b_util::onGpuQ() {
#ifdef __CUDA_ARCH__
return true;
#else
return false;
#endif
}
__global__ void devFree(void * mem) {
#ifdef CuMatrix_Enable_Cdp
FirstThread {
cherr(cudaFree(mem));
}
#endif
}
__host__ void b_util::freeOnDevice(void * mem) {
devFree<<<1,1>>>(mem);
}
__device__ __host__ uint b_util::nextPowerOf2(uint x) {
if (x < 2) {
return 2;
}
x = maskShifts(--x);
return ++x;
}
__device__ __host__ uint b_util::prevPowerOf2(uint x) {
x = maskShifts(x);
return x - (x >> 1);
}
template<> __host__ __device__ float util<float>::epsilon() {
return 1e-6;
}
template<> __host__ __device__ double util<double>::epsilon() {
return 1e-10;
}
template<> __host__ __device__ long util<long>::epsilon() {
return 0;
}
template<> __host__ __device__ ulong util<ulong>::epsilon() {
return 0;
}
template<> __host__ __device__ uint util<uint>::epsilon() {
return 0;
}
template<> __host__ __device__ int util<int>::epsilon() {
return 0;
}
template<typename T> __host__ __device__ T util<T>::minValue() {
#ifndef __CUDA_ARCH__
return numeric_limits<T>::min();
#else
setLastError(notImplementedEx);
return 0;
#endif
}
template <> __host__ __device__ uint util<uint>::minValue() {
return 0;
}
template<typename T> __host__ __device__ bool util<T>::almostEquals(T t1, T t2, T epsilon) {
if(checkDebug(debugVerbose))flprintf("t2 %f - t1 %f < epsilon %f ::abs(t2 - t1) %f (%d)\n",
(double)t2, (double) t1, (double) epsilon,(double) ::fabs(t2 - t1), ::fabs(t2 - t1) < epsilon);
return ::fabs(t2 - t1) < epsilon;
}
template __host__ __device__ bool util<long>::almostEquals(long, long, long);
template __host__ __device__ bool util<unsigned long>::almostEquals(unsigned long, unsigned long, unsigned long);
template __host__ __device__ bool util<double>::almostEquals(double, double, double);
template __host__ __device__ bool util<int>::almostEquals(int, int, int);
template __host__ __device__ bool util<float>::almostEquals(float, float, float);
template __host__ __device__ bool util<unsigned int>::almostEquals(unsigned int, unsigned int, unsigned int);
template<typename T> __host__ __device__ bool util<T>::almostEqualsNormalized(T t1, T t2, T epsilon) {
if(checkDebug(debugVerbose))flprintf("t2 %f - t1 %f < epsilon %f ::abs(t2 - t1) %f (%d)\n",
(double)t2, (double) t1, (double) epsilon,(double) ::fabs(t2 - t1), ::fabs(t2 - t1) < epsilon);
return (::fabs(t2 - t1))/t1 < epsilon;
}
template __host__ __device__ bool util<long>::almostEqualsNormalized(long, long, long);
template __host__ __device__ bool util<unsigned long>::almostEqualsNormalized(unsigned long, unsigned long, unsigned long);
template __host__ __device__ bool util<double>::almostEqualsNormalized(double, double, double);
template __host__ __device__ bool util<int>::almostEqualsNormalized(int, int, int);
template __host__ __device__ bool util<float>::almostEqualsNormalized(float, float, float);
template __host__ __device__ bool util<unsigned int>::almostEqualsNormalized(unsigned int, unsigned int, unsigned int);
template<typename T>__global__ void vectorAddPitch(T *c, const T *a, const T *b, int n, int p) {
uint idx = blockDim.x * blockIdx.x + threadIdx.x;
if (idx < n) {
int tidx = idx * p;
tidx[c] = tidx[a] + tidx[b]; // fun with [] syntax
}
}
template<typename T> __host__ float util<T>::vAddGflops(int device){
int orgDev;
cherr(cudaPeekAtLastError());
cherr(cudaGetDevice(&orgDev));
if(orgDev != device) {
ExecCaps_visitDevice(device);
}
flprintf("util<T>::vAddGflops(device = %d, %s) set device\n", device, gpuNames[device].c_str());
outln("checking fer dev " << device);
//usedDevMem();
int n = 1000000;
//outln("before mc");
//usedDevMem();
CuMatrix<T> mc = CuMatrix<T>::ones(n,1);
outln("after mc " << mc.toShortString());
T mcsum = mc.sum();
outln("mcsum " << mcsum);
assert(mcsum == (T)n);
//outln("checking fer dev " << device);
//usedDevMem();
//outln("made mc\n " << mc.syncBuffers());
//outln("after mc syncbuffes \n ");
//printColoArrayInterval(mc.elements, n, 10, 40);
//usedDevMem();
CuMatrix<T> m2 = CuMatrix<T>::fill( n,1,(T)2);
//outln("made m2\n " << m2.syncBuffers());
//usedDevMem();
CuMatrix<T> m3 = CuMatrix<T>::zeros( n,1);
outln("made mc " << mc.toShortString() << ", " << m2.toShortString() << ", m3 " << m3.toShortString());
//m3.syncBuffers();
DMatrix<T> dc = mc.asDmatrix();
DMatrix<T> d2 = m2.asDmatrix();
DMatrix<T> d3 = m3.asDmatrix();
outln("after d1-d3");
//usedDevMem();
//flprintf("util<T>::vAddGflops dc.el %p d2.el %p d3.el %p\n", dc.elements, d2.elements, d3.elements);
uint blockSize = 1024;
CuTimer timer;
timer.start();
cherr(cudaPeekAtLastError());
MemMgr<T>::checkValid( dc.elements);
MemMgr<T>::checkValid( d2.elements);
MemMgr<T>::checkValid( d3.elements);
outln("all valid d1-d3");
vectorAddPitch<T><<<DIV_UP(n,blockSize), blockSize>>>(d3.elements, dc.elements, d2.elements, n, mc._tileP);
cherr(cudaDeviceSynchronize());
outln("cudaDeviceSynchronize after vectorAddPitch");
m3.invalidateHost();
float addTimeMs = timer.stop();
timer.start();
m3.syncBuffers();
//outln("m3 " << m3 );
T m3sum = m3.sum();
outln("m3sum " << m3sum );
assert(m3sum == 3 * n);
float memTimeMs = timer.stop();
// b_util::usedDmem(1);
//printColoArrayInterval(m3.elements, n, 10, 40);
//flprintf("n %u adds took exeTimeMs %f millis (%f s)\n", n, exeTimeMs, exeTimeMs/Kilo);
addTimeMs /= Kilo;
flprintf("n/addTimeMs %f\n", n/addTimeMs);
float nExe = n/addTimeMs;
flprintf("n/extTimeS %f\n", nExe);
flprintf("nExe/Giga %f\n", nExe/Giga);
// one add per result element, so n adds per invocation
if(orgDev != device) {
ExecCaps_restoreDevice(orgDev);
}
return (n/(addTimeMs/Kilo) )/ Giga;
}
template __host__ float util<float>::vAddGflops(int device);
template __host__ float util<double>::vAddGflops(int device);
template __host__ float util<ulong>::vAddGflops(int device);
template<> __host__ __device__ float util<float>::minValue() {
return FLT_MIN;
}
template<> __host__ __device__ double util<double>::minValue() {
return DBL_MIN;
}
template<> __host__ __device__ long util<long>::minValue() {
return 0;
}
template<> __host__ __device__ ulong util<ulong>::minValue() {
return 0;
}
template<> __host__ __device__ int util<int>::minValue() {
return INT_MIN;
}
template<typename T> __host__ __device__ T util<T>::maxValue() {
#ifndef __CUDA_ARCH__
return numeric_limits<T>::max();
#else
setLastError(notImplementedEx);
return 0;
#endif
}
template<> __host__ __device__ float util<float>::maxValue() {
return FLT_MAX;
}
template<> __host__ __device__ double util<double>::maxValue() {
return DBL_MAX;
}
template<> __host__ __device__ long util<long>::maxValue() {
return LONG_MAX;
}
template<> __host__ __device__ ulong util<ulong>::maxValue() {
return 0xffffffff;
}
template<> __host__ __device__ int util<int>::maxValue() {
return INT_MAX;
}
template<> __host__ __device__ uint util<uint>::maxValue() {
return 0xFFFF;
}
/*
namespace mods {
static const char * host = "host";
static const char * device = "device";
static const char * synced = "synced";
static const char * neither = "neither";
}
*/
#define mods_host "host"
#define mods_device "device"
#define mods_synced "synced"
#define mods_neither "neither"
__host__ __device__ const char * b_util::modStr(Modification lastMod) {
switch (lastMod) {
case mod_host:
return "lstmd: " mods_host;
case mod_device:
return "lstmd: " mods_device;
case mod_synced:
return "lstmd: " mods_synced;
case mod_neither:
return "lstmd: " mods_neither;
default:
return "????";
}
}
int b_util::kernelOccupancy( void* kernel, int* maxBlocks, int blockSize) {
ExecCaps* curr = ExecCaps::currCaps();
//cudaOccupancyMaxActiveBlocksPerMultiprocessor(maxBlocks, kernel, blockSize,0);
int device = ExecCaps::currDev();
cudaDeviceProp prop;
int numBlocks;
cherr(cudaGetDeviceProperties(&prop, device))
cudaOccupancyMaxActiveBlocksPerMultiprocessor(&numBlocks, kernel,
blockSize, 0);
int activeWarps = numBlocks * blockSize / prop.warpSize;
int maxWarps = prop.maxThreadsPerMultiProcessor / prop.warpSize;
//int activeWarps = *maxBlocks * blockSize / curr->deviceProp.warpSize;
//int maxWarps = curr->deviceProp.maxThreadsPerMultiProcessor /curr->deviceProp.warpSize;;
outln("Occupancy " << (double) activeWarps/maxWarps * 100 << "%");
return 0;
}
template<typename T> __host__ __device__ void b_util::pPtrAtts(const T * ptr) {
#ifndef __CUDA_ARCH__
int ptrDev = b_util::getDevice((void*)ptr);
int orgDev = ExecCaps::currDev();
outln("b_util::pPtrAtts( " << ptr << ")");
cudaPointerAttributes ptrAtts;
if(ptrDev != orgDev) {
ExecCaps_visitDevice(ptrDev);
}
chsuckor(cudaPointerGetAttributes(&ptrAtts, ptr), cudaErrorInvalidValue);
if(ptrDev != orgDev) {
ExecCaps_restoreDevice(orgDev);
}
stringstream ss;
flprintf("raw %p %s %p, %s %p gpu %d, managed %s, type %s\n",
ptr,
ptrAtts.devicePointer != nullptr ? "dev: " : "",
ptrAtts.devicePointer != nullptr ? ptrAtts.devicePointer : 0,
ptrAtts.hostPointer != nullptr ? "host: " : "",
ptrAtts.hostPointer != nullptr ? ptrAtts.hostPointer : 0,
ptrAtts.device,
tOrF(ptrAtts.isManaged),
ptrAtts.memoryType == cudaMemoryTypeHost ? "host"
: ptrAtts.memoryType == cudaMemoryTypeDevice ? "device" : "unknown");
#else
flprintf("b_util::pPtrAtts(..) can't call cudaPointerGetAttributes() from device, ptr %p\n", ptr );
#endif
}
template __host__ __device__ void b_util::pPtrAtts<UnaryOpIndexF<float,0> const>(UnaryOpIndexF<float,0> const*);
template __host__ __device__ void b_util::pPtrAtts<UnaryOpIndexF<double,0> const>(UnaryOpIndexF<double,0> const*);
template __host__ __device__ void b_util::pPtrAtts<UnaryOpIndexF<unsigned long,0> const>(UnaryOpIndexF<unsigned long,0> const*);
template __host__ __device__ void b_util::pPtrAtts<float (*)(float) >(float (* const *)(float));
template __host__ __device__ void b_util::pPtrAtts<double (*)(double)>(double (* const *)(double));
template __host__ __device__ void b_util::pPtrAtts<unsigned long (*)(unsigned long)>(unsigned long (* const *)(unsigned long));
template __host__ __device__ void b_util::pPtrAtts<void>( const void*);
template __host__ __device__ void b_util::pPtrAtts<constFiller<float> >( const constFiller<float>*);
template __host__ __device__ void b_util::pPtrAtts<constFiller<double> >( const constFiller<double>*);
template __host__ __device__ void b_util::pPtrAtts<constFiller<unsigned long> >( const constFiller<unsigned long>*);
template __host__ __device__ void b_util::pPtrAtts<float const>(float const*);
template __host__ __device__ void b_util::pPtrAtts<ulong const>(ulong const*);
template __host__ __device__ void b_util::pPtrAtts<double const>(double const*);
template __host__ __device__ void b_util::pPtrAtts<float>( const float*);
template __host__ __device__ void b_util::pPtrAtts<double>( const double*);
template __host__ __device__ void b_util::pPtrAtts<int>( const int*);
template __host__ __device__ void b_util::pPtrAtts<uint>( const uint*);
template __host__ __device__ void b_util::pPtrAtts<long>( const long*);
template __host__ __device__ void b_util::pPtrAtts<ulong>( const ulong*);
__host__ __device__ void b_util::pFuncPtrAtts( const void * ptr) {
//#ifndef __CUDA_ARCH__
#ifdef CuMatrix_Enable_Cdp
struct cudaFuncAttributes fa;
cherr(cudaFuncGetAttributes(&fa, ptr));
flprintf("\n\tfa.binaryVersion %d\n\tfa.cacheModeCA %d\n\tfa.constSizeBytes %d\n\tfa.localSizeBytes %d\n\t,fa.maxThreadsPerBlock %d\n\tfa.numRegs %d\n\tptxVersion %d\n\tsharedSizeBytes %d\n",
fa.binaryVersion,fa.cacheModeCA,fa.constSizeBytes,fa.localSizeBytes,fa.maxThreadsPerBlock,fa.numRegs,fa.ptxVersion,fa.sharedSizeBytes);
#endif
/*
#else
flprintf("can't call cudaPointerGetAttributes() from device, ptr %p\n", ptr );
#endif
*/
}
template<typename T> __host__ __device__ enum cudaMemcpyKind b_util::copyKind( T * dst, T const * const src ) {
#ifndef __CUDA_ARCH__
int dstDevice = b_util::getDevice((void*)dst);
int srcDevice = b_util::getDevice((void*)src);
int orgDev = ExecCaps::currDev();
outln("b_util::copyKind( dst " << dst << ", src " << src << ") orgDev " << orgDev << ", dstDev " << dstDevice << ", srcDev " << srcDevice);
cudaPointerAttributes padst, pasrc;
if(dstDevice != orgDev) {
outln("b_util::copyKind() visiting dstDevice " << dstDevice);
ExecCaps_visitDevice(dstDevice);
}
chsuckor(cudaPointerGetAttributes(&padst, dst), cudaErrorInvalidValue);
if(dstDevice != orgDev) {
outln("b_util::copyKind() restoring orgDev " << orgDev);
ExecCaps_restoreDevice(orgDev);
}
if(srcDevice != orgDev) {
outln("b_util::copyKind() visiting srcDevice " << srcDevice);
ExecCaps_visitDevice(srcDevice);
}
chsuckor(cudaPointerGetAttributes(&pasrc, src), cudaErrorInvalidValue);
if(srcDevice != orgDev) {
outln("b_util::copyKind() restoring orgDev " << orgDev);
ExecCaps_restoreDevice(orgDev);
}
outln("b_util::copyKind() pasrc.memoryType " << pasrc.memoryType << "; padst.memoryType " << padst.memoryType);
if(pasrc.memoryType == cudaMemoryTypeHost) {
if(padst.memoryType == cudaMemoryTypeHost)
return cudaMemcpyHostToHost;
else
return cudaMemcpyHostToDevice;
}else {
if(padst.memoryType == cudaMemoryTypeHost)
return cudaMemcpyDeviceToHost;
else
return cudaMemcpyDeviceToDevice;
}
#else
flprintf("b_util<T>::copyType(..) can't call cudaPointerGetAttributes() from device, dst %p\n", dst );
return cudaMemcpyDefault; // kind inferred from pointer; for managed buffers
#endif
}
template __host__ __device__ enum cudaMemcpyKind b_util::copyKind<float>(float*, float const*);
template __host__ __device__ enum cudaMemcpyKind b_util::copyKind<double>(double*, double const*);
template __host__ __device__ enum cudaMemcpyKind b_util::copyKind<unsigned long>(unsigned long*, unsigned long const*);
__host__ __device__ int b_util::maxBlockThreads(const void * ptr) {
struct cudaFuncAttributes fa;
cherr(cudaFuncGetAttributes(&fa, ptr));
return fa.maxThreadsPerBlock;
}
__host__ CUDART_DEVICE bool b_util::validLaunchQ( const void* pKernel, dim3 grid, dim3 block) {
cudaFuncAttributes fatts;
cherr(cudaFuncGetAttributes(&fatts, pKernel));
uint blockThreads = block.x * block.y * block.z;
flprintf("blockThreads %u\n", blockThreads);
if(blockThreads > fatts.maxThreadsPerBlock) {
flprintf("kernel @ %p unlaunchable: blockThreads %u > fatts.maxThreadsPerBlock %u\n", pKernel, blockThreads, fatts.maxThreadsPerBlock);
return false;
}
ExecCaps* pCaps = ExecCaps::currCaps();
uint gridVol = grid.x * grid.y * grid.z;
flprintf("gridVol %u\n", gridVol);
uint blockRegs = fatts.numRegs * blockThreads;
flprintf("blockRegs %u\n", blockRegs);
if(blockRegs > pCaps->regsPerBlock) {
flprintf("kernel @ %p unlaunchable: blockRegs %u > pCaps->regsPerBlock %u, gridVol %u\n", pKernel, blockRegs, pCaps->regsPerBlock, gridVol);
return false;
}
if(fatts.sharedSizeBytes > pCaps->memSharedPerBlock) {
flprintf("kernel @ %p unlaunchable: fatts.sharedSizeBytes %u > pCaps->memSharedPerBlock %u\n", pKernel, fatts.sharedSizeBytes, pCaps->memSharedPerBlock);
return false;
}
return true;
}
__host__ CUDART_DEVICE void b_util::validLaunch( dim3 & block, const void* pKernel) {
cudaFuncAttributes fatts;
uint minF = MIN( block.x, MIN(block.y, block.z));
uint maxF = MAX( block.x, MAX(block.y, block.z));
uint blocks = block.x * block.y * block.z;
uint otherF = blocks / (minF * maxF);
cudaFuncGetAttributes(&fatts, pKernel);
if(fatts.maxThreadsPerBlock < blocks) {
double factor = 1.0 * blocks/ fatts.maxThreadsPerBlock;
if(factor < maxF) {
maxF = (uint) maxF/ factor;
}
block.x = maxF; block.y = otherF; block.z = minF;
}
}
__host__ __device__ void b_util::prd3(const dim3& d3,const char* msg) {
if(msg)
printf("%s (%u,%u,%u)\n", msg, d3.x, d3.y, d3.z);
else
printf("(%u,%u,%u)\n", d3.x, d3.y, d3.z);
}
#ifdef CuMatrix_NVML
__host__ void b_util::enableGpuMode() {
//nvmlDeviceSetGpuOperationMode();
}
#endif
/* for rows of n<65
each warpsized sublock (should) reduces multiple spans of 64 columns
? for given matrix dimension, how many warp-spanning rows will there be?
for x.n factorOf WARP_SIZE -> 0
for x.n < ws
for ws % x.n != 0,
for primeQ(x.n) there are x.n-1 spanrows per ws*x.n threads
for !primeQ(x.n) there are x.n/largestCommonFactor(x.n, ws) - 1 spanrows
for every ws * x.n/largestCommonFactor(x.n, ws) warps
for m*n threads, there can be at most totalWarps -1 spanrows ( totalWarps = DIV_UP(threads, warpSize))
*/
__host__ CUDART_DEVICE int b_util::countSpanrows( int m, int n, uint warpSize ) {
uint num = MAX(n,warpSize), den = MIN(n,warpSize);
int div = num/den;
if( div* den == num) {
//flprintf("div %d * num %d (== %d)\n", div, num, (div * num));
return 0;
}
uint sf = smallestFactor(n);
uint warps = DIV_UP(m * n,warpSize);
//flprintf("sf %u n/sf %u warps %d\n", sf, n/sf, warps);
uint factor = sf == n ? n : n/sf;
return warps * (factor-1)/factor;
//flprintf("factor (%u) s.t. (uint) ( m * (1. * (factor-1)/(factor))) == %u\n", factor, sr);
}
__host__ __device__ bool b_util::spanrowQ( int row, int n, uint warpSize) {
#ifdef __CUDA_ARCH__
return ::spanrowQ(row, n);
#else
uint warpS = row * n / warpSize;
uint warpE = (row + 1 ) * n / warpSize;
return warpS != warpE;
#endif
}
template<typename T> __host__ __device__ void util<T>::prdm(const char* msg, const DMatrix<T>& md) {
printf("%s d: %p (%u*%u*%u)", msg, md.elements, md.m,md.n,md.p);
}
template __host__ __device__ void util<float>::prdm(const char*,const DMatrix<float>& md);
template __host__ __device__ void util<double>::prdm(const char*,const DMatrix<double>& md);
template __host__ __device__ void util<long>::prdm(const char*,const DMatrix<long>& md);
template __host__ __device__ void util<ulong>::prdm(const char*,const DMatrix<ulong>& md);
template __host__ __device__ void util<uint>::prdm(const char*,const DMatrix<uint>& md);
template __host__ __device__ void util<int>::prdm(const char*,const DMatrix<int>& md);
template<typename T> __host__ __device__ void util<T>::prdmln(const char* msg, const DMatrix<T>& md) {
printf("%s d: %p (%u*%u*%u)\n", msg, md.elements, md.m,md.n,md.p);
}
template __host__ __device__ void util<float>::prdmln(const char*,const DMatrix<float>& md);
template __host__ __device__ void util<double>::prdmln(const char*,const DMatrix<double>& md);
template __host__ __device__ void util<long>::prdmln(const char*,const DMatrix<long>& md);
template __host__ __device__ void util<ulong>::prdmln(const char*,const DMatrix<ulong>& md);
template __host__ __device__ void util<uint>::prdmln(const char*,const DMatrix<uint>& md);
template __host__ __device__ void util<int>::prdmln(const char*,const DMatrix<int>& md);
template<typename T> __host__ __device__ void util<T>::printDm( const DMatrix<T>& dm , const char* msg) {
uint size = dm.m*dm.p;
printf("%s (%d*%d*%d) %d &dmatrix=%p elements %p\n",msg,dm.m,dm.n,dm.p,size, &dm,dm.elements);
T * elems = NULL;
#ifndef __CUDA_ARCH__
if(dm.elements) {
checkCudaError(cudaHostAlloc(&elems, size,0));
CuTimer timer;
timer.start();
checkCudaError(cudaMemcpy(elems,dm.elements, size, cudaMemcpyDeviceToHost));
//CuMatrix<T>::incDhCopy("util<T>::printDm-" + b_util::caller(),size,timer.stop() );
}
#else
elems = dm.elements;
#endif
if(!elems) {
printf("printDm nothing to see here\n");
return;
}
bool header = false;
if (checkDebug(debugVerbose) || (dm.m < CuMatrix<T>::getMaxRowsDisplayed() && dm.n < CuMatrix<T>::getMaxColsDisplayed())) {
for (uint i1 = 0; i1 < dm.m; i1++) {
if(!header) {
printf("-");
for (uint j1 = 0; j1 < dm.n; j1++) {
if(j1 % 10 == 0) {
printf(" %d", j1/10);
}else {
printf(" ");
}
printf(" ");
}
printf("\n");
header = true;
}
printf("[");
for (uint j1 = 0; j1 < dm.n; j1++) {
if(sizeof(T) == 4)
printf("% 2.10g", elems[i1 * dm.p + j1]); //get(i1,j1) );
else
printf("% 2.16g", elems[i1 * dm.p + j1]); // get(i1,j1) );
//);
if (j1 < dm.n - 1) {
printf(" ");
}
}
printf("] ");
if(i1 % 10 == 0) {
printf("%d", i1);
}
printf("\n");
}
if(header) {
printf("+");
for (uint j1 = 0; j1 < dm.n; j1++) {
if(j1 % 10 == 0) {
printf(" %d",j1/10);
}else {
printf(" ");
}
printf(" ");
}
printf("\n");
header = false;
}
} else {
for (uint i2 = 0; i2 < CuMatrix<T>::getMaxRowsDisplayed() + 1 && i2 < dm.m; i2++) {
if (i2 == CuMatrix<T>::getMaxRowsDisplayed()) {
printf(".\n.\n.\n");
continue;
}
for (uint j2 = 0; j2 < CuMatrix<T>::getMaxColsDisplayed() + 1 && j2 < dm.n; j2++) {
if (j2 == CuMatrix<T>::getMaxColsDisplayed()) {
printf("...");
continue;
}
if(sizeof(T) == 4)
printf("% 2.10g", elems[i2 * dm.p + j2]); //get(i2,j2));
else
printf("% 2.16g", elems[i2 * dm.p + j2]); //get(i2,j2));
//elements[i2 * p + j2]);
if (j2 < dm.n - 1) {
printf(" ");
}
}
printf("\n");
}
if (dm.m > CuMatrix<T>::getMaxRowsDisplayed()) {
for (uint i3 =dm.m - CuMatrix<T>::getMaxRowsDisplayed(); i3 < dm.m; i3++) {
if (dm.n > CuMatrix<T>::getMaxColsDisplayed()) {
for (uint j3 = dm.n - CuMatrix<T>::getMaxColsDisplayed(); j3 < dm.n; j3++) {
if (j3 == dm.n - CuMatrix<T>::getMaxColsDisplayed()) {
printf("...");
continue;
}
if(sizeof(T) == 4)
printf("% 2.10g", elems[i3 * dm.p + j3]);//get(i3, j3));
else
printf("% 2.16g", elems[i3 * dm.p + j3]); //get(i3,j3));
//elements[i3 * p + j3]);
if (j3 < dm.n - 1) {
printf(" ");
}
}
} else {
for (uint j4 = 0; j4 < dm.n; j4++) {
if(sizeof(T) == 4)
printf("% 2.10g", elems[i3 * dm.p + j4]); // get(i3,j4));
else
printf("% 2.16g", elems[i3 * dm.p + j4]); //get(i3,j4));
//elements[i3 * p + j4]);
if (j4 < dm.n - 1) {
printf(" ");
}
}
}
printf("\n");
}
} else { //if(dm.m > 10) -> dm.n > 10
for (uint i5 = 0; i5 < CuMatrix<T>::getMaxRowsDisplayed() + 1 && i5 < dm.m; i5++) {
if (dm.n > CuMatrix<T>::getMaxColsDisplayed()) {
for (uint j5 = dm.n - CuMatrix<T>::getMaxColsDisplayed(); j5 < dm.n; j5++) {
if (j5 == dm.n - CuMatrix<T>::getMaxColsDisplayed()) {
printf("...");
continue;
}
T t = elems[i5 * dm.p + j5];
if(sizeof(T) == 4)
printf("% 2.10g", t);
else
printf("% 2.16g", t);
if (j5 < dm.n - 1) {
printf(" ");
}
}
} else {
for (uint j4 = 0; j4 < dm.n; j4++) {
if(sizeof(T) == 4)
printf("% 2.10g", elems[i5 * dm.p + j4]); //get(i5,j4));
else
printf("% 2.16g", elems[i5 * dm.p + j4]); //get(i5,j4));
if (j4 < dm.n - 1) {
printf(" ");
}
}
}
printf("\n");
}
}
}
#ifndef __CUDA_ARCH__
if(elems) {
flprintf("freeing host elems %p\n", elems);
checkCudaErrors(cudaFreeHost(elems));
}
#endif
}
template void util<float>::printDm(DMatrix<float> const&,const char*);
template void util<double>::printDm(DMatrix<double> const&,const char*);
template void util<ulong>::printDm(DMatrix<ulong> const&,const char*);
template void util<uint>::printDm(DMatrix<uint> const&,const char*);
template void util<int>::printDm(DMatrix<int> const&,const char*);
template<typename T> __host__ __device__ void util<T>::printRow(const DMatrix<T>& dm, int row) {
prlocf("printRow\n");
if(!dm.elements) {
printf("row %d: null elements\n", row);
return;
}
ulong idx = row * dm.p;
T* elems;
#ifndef __CUDA_ARCH__
uint rowSize = dm.n*sizeof(T);
checkCudaError(cudaHostAlloc(&elems, rowSize,0));
checkCudaError(cudaMemcpy(elems, dm.elements + idx, rowSize, cudaMemcpyHostToDevice));
#else
elems = dm.elements;
#endif
printf("row %d: ", row);
for(int c = 0; c < dm.n; c++) {
printf("%5.2f ", elems[idx + c]);
}
printf("\n");
#ifndef __CUDA_ARCH__
flprintf("freeing host elems %p\n", elems);
checkCudaError(cudaFreeHost(elems));
#endif
}
template void util<float>::printRow(DMatrix<float> const&,int);
template void util<double>::printRow(DMatrix<double> const&,int);
template void util<long>::printRow(DMatrix<long> const&,int);
template void util<ulong>::printRow(DMatrix<ulong> const&,int);
template void util<int>::printRow(DMatrix<int> const&,int);
template void util<uint>::printRow(DMatrix<uint> const&,int);
__host__ __device__ void b_util::vectorExecContext(int threads, int count, dim3& dBlocks,
dim3& dThreads, const void* kernel) {
if (threads % WARP_SIZE != 0) {
printf("WARN: %d is not a multiple of the warp size (32)\n",threads);
}
int blocks = DIV_UP(count, threads);
dThreads.y = dThreads.z = 1;
dThreads.x = threads;
if(kernel) {
validLaunch(dThreads,kernel);
if(dThreads.x != threads) {
blocks = DIV_UP(count, dThreads.x);
}
}
dBlocks.y = dBlocks.z = 1;
dBlocks.x = blocks;
#ifndef __CUDA_ARCH__
totalThreads += dBlocks.x * dThreads.x;
totalElements += count;
#endif
if (checkDebug(debugExec))
printf(
"contxt of %d blks of %d threads for count of %d\n", dBlocks.x, dThreads.x,count);
}
template<typename T> __host__ __device__ void util<T>::pDarry(const T* arry, int cnt) {
for(int i = 0; i < cnt; i++) {
printf("%f ", arry[i]);
}
printf("\n");
}
template<> __host__ __device__ void util<uint>::pDarry(const uint* arry, int cnt) {
uint* ptr = (uint*)arry;
#ifndef __CUDA_ARCH__
checkCudaError(cudaMallocHost(&ptr, cnt * sizeof(uint)));
CuTimer timer;
timer.start();
checkCudaError(cudaMemcpy(ptr, arry, cnt*sizeof(uint), cudaMemcpyDeviceToHost));
//CuMatrix<uint>::incDhCopy("util<uint>::pDarry-" + b_util::caller() , cnt*sizeof(uint),timer.stop());
#endif
for(int i = 0; i < cnt; i++) {
printf("%.3f ", (double) ptr[i]);
}
printf("\n");
#ifndef __CUDA_ARCH__
flprintf("freeing host ptr %p\n", ptr);
checkCudaError(cudaFreeHost(ptr));
#endif
}
template __host__ __device__ void util<float>::pDarry(const float*, int);
template __host__ __device__ void util<double>::pDarry(const double*, int);
template __host__ __device__ void util<ulong>::pDarry(const ulong*, int);
template __host__ __device__ void util<int>::pDarry(const int*, int);
template <typename T> __host__ __device__ void util<T>::fillNDev(T* trg, T val, long n) {
int threads = 512;
dim3 dBlocks, dThreads;
b_util::vectorExecContext(threads, n, dBlocks, dThreads);
if(checkDebug(debugCheckValid)) {
flprintf("currdev %d -- trg %p val %.5f N = %d\n", ExecCaps::currDev(), trg,val,n);
MemMgr<T>::checkValid(trg, "fillNDev start");
MemMgr<T>::checkValid(trg + n/2, "fillNDev half/N");
MemMgr<T>::checkValid(trg + 3* n/4, "fillNDev 3N/4");
MemMgr<T>::checkValid(trg + n-1, "fillNDev N-1");
//MemMgr<T>::checkRange(trg, n-1, "fillNDev range of N");
}
fillKernel<<<dBlocks,dThreads>>>(trg, val, n);
cherr(cudaDeviceSynchronize());
}
template void util<float>::fillNDev(float*, float, long);
template void util<double>::fillNDev(double*, double, long);
template void util<ulong>::fillNDev(ulong*, ulong, long);
template void util<long>::fillNDev(long*, long, long);
template void util<uint>::fillNDev(uint*, uint, long);
template void util<int>::fillNDev(int*, int, long);
//////////////////////////
//
// IndexArray
//
//////////////////////////
//
__host__ __device__ IndexArray::IndexArray() :
indices(null), count(0), owner(true) {
}
__host__ __device__ IndexArray::IndexArray(const IndexArray & o) :
indices(o.indices), count(o.count), owner(false) {
}
__host__ __device__ IndexArray::IndexArray(uint* _indices, uint _count, bool _owner) :
indices(_indices), count(_count), owner(_owner) {
}
__host__ __device__ IndexArray::IndexArray(uint idx1, uint idx2) :
count(2), owner(true) {
indices = new uint[2];
indices[0] = idx1;
indices[1] = idx2;
}
/*
intPair IndexArray::toPair() const {
assert(count == 2);
return intPair(indices[0], indices[1]);
}
*/
__host__ __device__ IndexArray::~IndexArray() {
if (indices != null && owner) {
delete[] indices;
}
}
string IndexArray::toString(bool lf) const {
stringstream ssout;
ssout << "IdxArr" << (owner ? "+" : "-") << "(" << count << ")[ \n";
for (uint i = 0; i < count; i++) {
ssout << indices[i];
if (i < count - 1) {
ssout << (lf ? ( i % 50 == 0 ? "----\n" : "\n") : ", ");
}
}
ssout << " ]";
return ssout.str();
}
__host__ __device__ void b_util::expNotation(char* buff, long val) {
#ifndef __CUDA_ARCH__
double factor = 1.;
if (val >= Giga) {
factor = 1. / Giga;
sprintf(buff, "%2.3gGb", (double) val * factor);
} else if (val >= Mega) {
factor = 1. / Mega;
sprintf(buff, "%2.3gMb", (double)val * factor);
} else if (val >= Kilo) {
factor = 1. / Kilo;
sprintf(buff, "%2.3gKb", (double)val * factor);
} else {
sprintf(buff, "%2.3gb", (double)val * factor);
}
#endif
}
__host__ void b_util::expNotationMem(char* buff, long val) {
double factor = 1.;
if (val >= Terab) {
factor = 1. / Terab;
sprintf(buff, "%2.3gTb", (double) val * factor);
} else if (val >= Gigab) {
factor = 1. / Gigab;
sprintf(buff, "%2.3gGb", (double) val * factor);
} else if (val >= Megab) {
factor = 1. / Megab;
sprintf(buff, "%2.3gMb", (double)val * factor);
} else if (val >= Kilob) {
factor = 1. / Kilob;
sprintf(buff, "%2.3gKb", (double)val * factor);
} else {
sprintf(buff, "%2.3gb", (double)val * factor);
}
}
__host__ string b_util::expNotationMemStr( long val) {
char buff[256];
expNotationMem(buff, val);
stringstream ss;
ss << buff;
return ss.str();
}
__host__ CUDART_DEVICE double b_util::currMemRatio( ) {
size_t freeMemory =1, totalMemory =1;
cherr( cudaMemGetInfo(&freeMemory, &totalMemory));
return 100 * (1 - freeMemory * 1. / totalMemory);
}
__host__ CUDART_DEVICE double b_util::usedMemRatio(bool allDevices) {
//outln("b_util::usedMemRatio("<< tOrF(allDevices) << ") ent");
//b_util::dumpStack();
size_t freeMemory =1, totalMemory =1;
if(allDevices) {
#ifndef __CUDA_ARCH__
ExecCaps::allGpuMem(&freeMemory, &totalMemory);
#endif
}
else {
cudaMemGetInfo(&freeMemory, &totalMemory);
}
return 100 * (1 - freeMemory * 1. / totalMemory);
}
__host__ CUDART_DEVICE void b_util::usedDmem(bool allDevices) {
//outln("b_util::usedDmem("<< tOrF(allDevices) << ") ent");
#ifndef __CUDA_ARCH__
flprintf("Memory %.3f%% used\n", usedMemRatio(allDevices));
#else
flprintf("Memory %.3f%% used\n", usedMemRatio(false));
#endif
}
__host__ __device__ void b_util::_checkCudaError(const char* file, int line, cudaError_t val) {
if (val != cudaSuccess) {
printf("CuMatrixException (%d) %s at %s : %d\n",val ,__cudaGetErrorEnum(val),file, line);
#ifndef __CUDA_ARCH__
// cout << print_stacktrace() << endl;
#endif
assert(false);
}
}
template <> __host__ __device__ float sqrt_p(float val) {
return sqrtf(val);
}
template <> __host__ __device__ double sqrt_p(double val) {
return sqrt(val);
}
template <> __host__ __device__ long sqrt_p(long val) {
return (long)sqrtf(val);
}
template <> __host__ __device__ ulong sqrt_p(ulong val) {
return (ulong)sqrtf(val);
}
template <> __host__ __device__ int sqrt_p(int val) {
return (int)sqrtf((float)val);
}
template <> __host__ __device__ uint sqrt_p(uint val) {
return (uint)sqrtf((float)val);
}
template<typename T> __host__ __device__ void prry(T* ry) {
}
template <> template <> __host__ __device__ void Packeur<double4>::packNext<double>(
double h) {
switch (currIdx) {
case 0:
target.x = h;
break;
case 1:
target.y = h;
break;
case 2:
target.z = h;
break;
case 3:
target.w = h;
break;
default:
setLastError(outOfBoundsEx);
return;
}
currIdx++;
}
template<> template <> __host__ __device__ void Packeur<double3>::packNext<double>(
double h) {
switch (currIdx) {
case 0:
target.x = h;
break;
case 1:
target.y = h;
break;
case 2:
target.z = h;
break;
default:
setLastError(outOfBoundsEx);
return;
}
currIdx++;
}
template<> template <> __host__ __device__ void Packeur<int3>::packNext<int>(
int h) {
switch (currIdx) {
case 0:
target.x = h;
break;
case 1:
target.y = h;
break;
case 2:
target.z = h;
break;
default:
setLastError(outOfBoundsEx);
return;
}
currIdx++;
}
template<> template <> __host__ __device__ void Packeur<uint3>::packNext<uint>(
uint h) {
switch (currIdx) {
case 0:
target.x = h;
break;
case 1:
target.y = h;
break;
case 2:
target.z = h;
break;
default:
setLastError(outOfBoundsEx);
return;
}
currIdx++;
}
template<> __host__ __device__ void Packeur2::render<double4>(double4& target) {
switch (currIdx) {
case 1:
target.x = vals[0];
break;
case 2:
target.x = vals[0];
target.y = vals[1];
break;
case 3:
target.x = vals[0];
target.y = vals[1];
target.z = vals[2];
break;
case 4:
target.x = vals[0];
target.y = vals[1];
target.z = vals[2];
target.w = vals[3];
break;
default:
setLastError(outOfBoundsEx);
return;
}
}
template<typename T> template <typename PackType> __host__ __device__ PackType Packeur3<T>::render() {
if(sizeof(T) == 4) {
switch (currIdx) {
case 1:
return vals[0];
case 2:
return render<float2>();
case 3:
return render<float3>();
case 4:
return render<float4>();
default:
setLastError(outOfBoundsEx);
}
}else {
switch (currIdx) {
case 1:
return vals[0];
case 2:
return render<double2>();
case 3:
return render<double3>();
case 4:
return render<double4>();
default:
setLastError(outOfBoundsEx);
}
}
}
template<> template<> __host__ __device__ double4 Packeur3<double>::render() {
double4 target;
switch (currIdx) {
case 1:
target.x = vals[0];
break;
case 2:
target.x = vals[0];
target.y = vals[1];
break;
case 3:
target.x = vals[0];
target.y = vals[1];
target.z = vals[2];
break;
case 4:
target.x = vals[0];
target.y = vals[1];
target.z = vals[2];
target.w = vals[3];
break;
default:
setLastError(outOfBoundsEx);
}
return target;
}
template<> template<> __host__ __device__ float4 Packeur3<float>::render() {
float4 target;
switch (currIdx) {
case 1:
target.x = vals[0];
break;
case 2:
target.x = vals[0];
target.y = vals[1];
break;
case 3:
target.x = vals[0];
target.y = vals[1];
target.z = vals[2];
break;
case 4:
target.x = vals[0];
target.y = vals[1];
target.z = vals[2];
target.w = vals[3];
break;
default:
setLastError(outOfBoundsEx);
}
return target;
}
template<> template<> __host__ __device__ ulong4 Packeur3<ulong>::render() {
ulong4 target;
switch (currIdx) {
case 1:
target.x = vals[0];
break;
case 2:
target.x = vals[0];
target.y = vals[1];
break;
case 3:
target.x = vals[0];
target.y = vals[1];
target.z = vals[2];
break;
case 4:
target.x = vals[0];
target.y = vals[1];
target.z = vals[2];
target.w = vals[3];
break;
default:
setLastError(outOfBoundsEx);
}
return target;
}
|
0973b8622d2b5681ca14ba641371e205bd06f278.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#ifndef THC_GENERIC_FILE
#define THC_GENERIC_FILE "generic/THCTensorIndex.cu"
#else
void THCTensor_(indexCopy_long)(THCState *state, THCTensor *dst, int dim, THLongTensor *indices, THCTensor *src)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, dst, src));
THCudaLongTensor *indices_ = THCudaLongTensor_newWithSize1d(state, indices->size[0]);
THCudaLongTensor_copyLong(state, indices_, indices);
THCTensor_(indexCopy)(state, dst, dim, indices_, src);
THCudaLongTensor_free(state, indices_);
}
void THCTensor_(indexCopy)(THCState *state, THCTensor *dst, int dim, THCudaLongTensor *indices, THCTensor *src)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, dst, src));
THCAssertSameGPU(THCudaLongTensor_checkGPU(state, 1, indices));
int dims = THCTensor_(nDimension)(state, dst);
THArgCheck(dims <= MAX_CUTORCH_DIMS, 2, CUTORCH_DIM_WARNING);
dims = THCTensor_(nDimension)(state, src);
THArgCheck(dims <= MAX_CUTORCH_DIMS, 5, CUTORCH_DIM_WARNING);
dims = THCudaLongTensor_nDimension(state, indices);
THArgCheck(dims <= MAX_CUTORCH_DIMS, 4, CUTORCH_DIM_WARNING);
ptrdiff_t numIndices = THCudaLongTensor_nElement(state, indices);
int srcDims = THCTensor_(nDimension)(state, src);
hipStream_t stream = THCState_getCurrentStream(state);
THArgCheck(THCudaLongTensor_nDimension(state, indices) == 1, 3,
"expecting vector of indices");
THArgCheck(dim < srcDims, 4, "Indexing dim is out of bounds");
THArgCheck(srcDims > 0, 2, "Source tensor is empty");
THArgCheck(numIndices == src->size[dim], 4, "length of src.size[dim] is not equal to length of indices");
int indContig = THCudaLongTensor_isContiguous(state, indices);
// The `src` is partitioned into two parts:
// -the size of each slice we are indexing, which is the
// total size of the tensor ignoring dimension `dim`;
// -the number of indices we are choosing, which is the total size
// of the tensor `indices`.
ptrdiff_t srcTotalSize = THCTensor_(nElement)(state, src);
int64_t dstCopyDimSize = THCTensor_(size)(state, dst, dim);
ptrdiff_t sliceSize = srcTotalSize / numIndices;
int mpc = THCState_getCurrentDeviceProperties(state)->multiProcessorCount;
#define SMALL_INDEX(TENSOR_TYPE, TYPE, DST_DIM, SRC_DIM, IDX_DIM) \
hipLaunchKernelGGL(( indexCopySmallIndex<TENSOR_TYPE, TYPE, DST_DIM, SRC_DIM, IDX_DIM>) \
, dim3(smallIndexGrid), dim3(smallIndexBlock), 0, stream, \
dstInfo, srcInfo, indicesInfo, \
dstCopyDim, srcCopyDim, sliceSize, dstCopyDimSize);
#define LARGE_INDEX(TENSOR_TYPE, TYPE, DST_DIM, SRC_DIM, IDX_DIM) \
hipLaunchKernelGGL(( indexCopyLargeIndex<TENSOR_TYPE, TYPE, DST_DIM, SRC_DIM, IDX_DIM>) \
, dim3(largeIndexGrid), dim3(largeIndexBlock), 0, stream, \
dstInfo, srcInfo, indicesInfo, \
dstCopyDim, srcCopyDim, sliceSize, dstCopyDimSize);
dim3 smallIndexGrid(::min(THCCeilDiv(sliceSize, (ptrdiff_t)128), (ptrdiff_t)(mpc * 8)));
dim3 smallIndexBlock(::min(sliceSize, (ptrdiff_t)128));
dim3 largeIndexGrid(::min(THCCeilDiv(srcTotalSize, (ptrdiff_t)128), (ptrdiff_t)(mpc * 8)));
dim3 largeIndexBlock(::min(srcTotalSize, (ptrdiff_t)128));
if (TensorUtils<THCTensor>::canUse32BitIndexMath(state, dst) &&
TensorUtils<THCTensor>::canUse32BitIndexMath(state, src) &&
TensorUtils<THCudaLongTensor>::canUse32BitIndexMath(state, indices)) {
TensorInfo<real, unsigned int> dstInfo =
getTensorInfo<THCTensor, unsigned int>(state, dst);
int dstCopyDim = dstInfo.collapseDims(dim);
dstInfo.reduceDim(dstCopyDim);
TensorInfo<real, unsigned int> srcInfo =
getTensorInfo<THCTensor, unsigned int>(state, src);
int srcCopyDim = srcInfo.collapseDims(dim);
srcInfo.reduceDim(srcCopyDim);
TensorInfo<int64_t, unsigned int> indicesInfo =
getTensorInfo<THCudaLongTensor, unsigned int>(state, indices);
indicesInfo.collapseDims();
// A reasonable choice for when to have each thread iterate over
// indices to choose
if (numIndices <= 16) {
if (dstInfo.dims == 1 && srcInfo.dims == 1 && indContig) {
SMALL_INDEX(real, unsigned int, 1, 1, -2);
} else if (dstInfo.dims == 2 && srcInfo.dims == 2 && indContig) {
SMALL_INDEX(real, unsigned int, 2, 2, -2);
} else if (dstInfo.dims == 3 && srcInfo.dims == 3 && indContig) {
SMALL_INDEX(real, unsigned int, 3, 3, -2);
} else {
SMALL_INDEX(real, unsigned int, -1, -1, -1);
}
} else {
if (dstInfo.dims == 1 && srcInfo.dims == 1 && indContig) {
LARGE_INDEX(real, unsigned int, 1, 1, -2);
} else if (dstInfo.dims == 2 && srcInfo.dims == 2 && indContig) {
LARGE_INDEX(real, unsigned int, 2, 2, -2);
} else if (dstInfo.dims == 3 && srcInfo.dims == 3 && indContig) {
LARGE_INDEX(real, unsigned int, 3, 3, -2);
} else {
LARGE_INDEX(real, unsigned int, -1, -1, -1);
}
}
} else {
TensorInfo<real, uint64_t> dstInfo =
getTensorInfo<THCTensor, uint64_t>(state, dst);
int dstCopyDim = dstInfo.collapseDims(dim);
dstInfo.reduceDim(dstCopyDim);
TensorInfo<real, uint64_t> srcInfo =
getTensorInfo<THCTensor, uint64_t>(state, src);
int srcCopyDim = srcInfo.collapseDims(dim);
srcInfo.reduceDim(srcCopyDim);
TensorInfo<int64_t, uint64_t> indicesInfo =
getTensorInfo<THCudaLongTensor, uint64_t>(state, indices);
indicesInfo.collapseDims();
LARGE_INDEX(real, uint64_t, -1, -1, -1);
}
#undef SMALL_INDEX
#undef LARGE_INDEX
}
void THCTensor_(take)(THCState *state, THCTensor *dst, THCTensor *src, THCudaLongTensor *index)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, dst, src));
THCAssertSameGPU(THCudaLongTensor_checkGPU(state, 1, index));
THArgCheck(THCTensor_(nDimension)(state, src) <= MAX_CUTORCH_DIMS, 2, CUTORCH_DIM_WARNING);
THArgCheck(THCTensor_(nDimension)(state, dst) <= MAX_CUTORCH_DIMS, 2, CUTORCH_DIM_WARNING);
THArgCheck(THCudaLongTensor_nDimension(state, index) <= MAX_CUTORCH_DIMS, 2, CUTORCH_DIM_WARNING);
THCTensor_(resizeNd)(state, dst, index->nDimension, index->size, NULL);
dispatchTakePut<real, TensorTakeOp>(state, src, dst, index);
}
static void THCTensor_(sort_indices)(THCState *state, THCudaLongTensor *index, THCTensor *src) {
THCThrustAllocator thrustAlloc(state);
auto index_iter = thrust::device_ptr<int64_t>(THCudaLongTensor_data(state, index));
auto src_iter = thrust::device_ptr<real>(THCTensor_(data)(state, src));
auto numel = THCTensor_(numel)(state, src);
thrust::sort_by_key(
thrust::hip::par(thrustAlloc).on(THCState_getCurrentStream(state)),
index_iter, index_iter + numel,
src_iter, ThrustLTOp<int64_t>());
}
void THCTensor_(put)(THCState *state, THCTensor *dst, THCudaLongTensor *index, THCTensor *src, int accumulate)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, dst, src));
THCAssertSameGPU(THCudaLongTensor_checkGPU(state, 1, index));
ptrdiff_t dstSize = THCTensor_(nElement)(state, dst);
ptrdiff_t numIndices = THCudaLongTensor_nElement(state, index);
THArgCheck(THCTensor_(nElement)(state, src) == numIndices,
3, "src should have the same number of elements as index");
THArgCheck(THCTensor_(nDimension)(state, dst) <= MAX_CUTORCH_DIMS, 2, CUTORCH_DIM_WARNING);
THArgCheck(THCTensor_(nDimension)(state, src) <= MAX_CUTORCH_DIMS, 2, CUTORCH_DIM_WARNING);
THArgCheck(THCudaLongTensor_nDimension(state, index) <= MAX_CUTORCH_DIMS, 2, CUTORCH_DIM_WARNING);
int srcDims = THCTensor_(nDimension)(state, src);
THArgCheck(srcDims > 0, 2, "Source tensor is empty");
if (accumulate) {
// wrap indices so to replace negative indices
THCudaLongTensor* sorted_index = THCudaLongTensor_new(state);
THCudaLongTensor_resizeAs(state, sorted_index, index);
THC_pointwiseApply2(state, sorted_index, index, WrapIndexOp(dstSize));
THCTensor* sorted_src = THCTensor_(newClone)(state, src);
THCTensor_(sort_indices)(state, sorted_index, sorted_src);
dispatchTakePut<real, TensorPutAccumulateOp>(state, dst, src, index);
THCTensor_(free)(state, sorted_src);
THCudaLongTensor_free(state, sorted_index);
} else {
dispatchTakePut<real, TensorPutOp>(state, dst, src, index);
}
}
void THCTensor_(indexAdd_long)(THCState *state, THCTensor *dst, int dim, THLongTensor *indices, THCTensor *src)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, dst, src));
THCudaLongTensor *indices_ = THCudaLongTensor_newWithSize1d(state, indices->size[0]);
THCudaLongTensor_copyLong(state, indices_, indices);
THCTensor_(indexAdd)(state, dst, dim, indices_, src);
THCudaLongTensor_free(state, indices_);
}
void THCTensor_(indexAdd)(THCState *state, THCTensor *dst, int dim, THCudaLongTensor *indices, THCTensor *src)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, dst, src));
THCAssertSameGPU(THCudaLongTensor_checkGPU(state, 1, indices));
int dims = THCTensor_(nDimension)(state, dst);
THArgCheck(dims <= MAX_CUTORCH_DIMS, 2, CUTORCH_DIM_WARNING);
dims = THCTensor_(nDimension)(state, src);
THArgCheck(dims <= MAX_CUTORCH_DIMS, 5, CUTORCH_DIM_WARNING);
dims = THCudaLongTensor_nDimension(state, indices);
THArgCheck(dims <= MAX_CUTORCH_DIMS, 4, CUTORCH_DIM_WARNING);
ptrdiff_t numIndices = THCudaLongTensor_nElement(state, indices);
int srcDims = THCTensor_(nDimension)(state, src);
hipStream_t stream = THCState_getCurrentStream(state);
THArgCheck(THCudaLongTensor_nDimension(state, indices) == 1, 3,
"expecting vector of indices");
THArgCheck(dim < srcDims, 4, "Indexing dim is out of bounds");
THArgCheck(srcDims > 0, 2, "Source tensor is empty");
THArgCheck(numIndices == src->size[dim], 4, "length of src.size[dim] is not equal to length of indices");
int indContig = THCudaLongTensor_isContiguous(state, indices);
// The `src` is partitioned into two parts:
// -the size of each slice we are indexing, which is the
// total size of the tensor ignoring dimension `dim`;
// -the number of indices we are choosing, which is the total size
// of the tensor `indices`.
ptrdiff_t srcTotalSize = THCTensor_(nElement)(state, src);
int64_t dstAddDimSize = THCTensor_(size)(state, dst, dim);
ptrdiff_t sliceSize = srcTotalSize / numIndices;
int mpc = THCState_getCurrentDeviceProperties(state)->multiProcessorCount;
#define SMALL_INDEX(TENSOR_TYPE, TYPE, DST_DIM, SRC_DIM, IDX_DIM) \
hipLaunchKernelGGL(( indexAddSmallIndex<TENSOR_TYPE, TYPE, DST_DIM, SRC_DIM, IDX_DIM>) \
, dim3(smallIndexGrid), dim3(smallIndexBlock), 0, stream, \
dstInfo, srcInfo, indicesInfo, \
dstAddDim, srcAddDim, sliceSize, dstAddDimSize);
#define LARGE_INDEX(TENSOR_TYPE, TYPE, DST_DIM, SRC_DIM, IDX_DIM) \
hipLaunchKernelGGL(( indexAddLargeIndex<TENSOR_TYPE, TYPE, DST_DIM, SRC_DIM, IDX_DIM>) \
, dim3(largeIndexGrid), dim3(largeIndexBlock), 0, stream, \
dstInfo, srcInfo, indicesInfo, \
dstAddDim, srcAddDim, sliceSize, dstAddDimSize);
dim3 smallIndexGrid(::min(THCCeilDiv(sliceSize, (ptrdiff_t)128), (ptrdiff_t)(mpc * 8)));
dim3 smallIndexBlock(::min(sliceSize, (ptrdiff_t)128));
dim3 largeIndexGrid(::min(THCCeilDiv(srcTotalSize, (ptrdiff_t)128), (ptrdiff_t)(mpc * 8)));
dim3 largeIndexBlock(::min(srcTotalSize, (ptrdiff_t)128));
if (TensorUtils<THCTensor>::canUse32BitIndexMath(state, dst) &&
TensorUtils<THCTensor>::canUse32BitIndexMath(state, src) &&
TensorUtils<THCudaLongTensor>::canUse32BitIndexMath(state, indices)) {
TensorInfo<real, unsigned int> dstInfo =
getTensorInfo<THCTensor, unsigned int>(state, dst);
int dstAddDim = dstInfo.collapseDims(dim);
dstInfo.reduceDim(dstAddDim);
TensorInfo<real, unsigned int> srcInfo =
getTensorInfo<THCTensor, unsigned int>(state, src);
int srcAddDim = srcInfo.collapseDims(dim);
srcInfo.reduceDim(srcAddDim);
TensorInfo<int64_t, unsigned int> indicesInfo =
getTensorInfo<THCudaLongTensor, unsigned int>(state, indices);
indicesInfo.collapseDims();
// A reasonable choice for when to have each thread iterate over
// indices to choose
if (numIndices <= 16) {
if (dstInfo.dims == 1 && srcInfo.dims == 1 && indContig) {
SMALL_INDEX(real, unsigned int, 1, 1, -2);
} else if (dstInfo.dims == 2 && srcInfo.dims == 2 && indContig) {
SMALL_INDEX(real, unsigned int, 2, 2, -2);
} else if (dstInfo.dims == 3 && srcInfo.dims == 3 && indContig) {
SMALL_INDEX(real, unsigned int, 3, 3, -2);
} else {
SMALL_INDEX(real, unsigned int, -1, -1, -1);
}
} else {
if (dstInfo.dims == 1 && srcInfo.dims == 1 && indContig) {
LARGE_INDEX(real, unsigned int, 1, 1, -2);
} else if (dstInfo.dims == 2 && srcInfo.dims == 2 && indContig) {
LARGE_INDEX(real, unsigned int, 2, 2, -2);
} else if (dstInfo.dims == 3 && srcInfo.dims == 3 && indContig) {
LARGE_INDEX(real, unsigned int, 3, 3, -2);
} else {
LARGE_INDEX(real, unsigned int, -1, -1, -1);
}
}
} else {
TensorInfo<real, uint64_t> dstInfo =
getTensorInfo<THCTensor, uint64_t>(state, dst);
int dstAddDim = dstInfo.collapseDims(dim);
dstInfo.reduceDim(dstAddDim);
TensorInfo<real, uint64_t> srcInfo =
getTensorInfo<THCTensor, uint64_t>(state, src);
int srcAddDim = srcInfo.collapseDims(dim);
srcInfo.reduceDim(srcAddDim);
TensorInfo<int64_t, uint64_t> indicesInfo =
getTensorInfo<THCudaLongTensor, uint64_t>(state, indices);
indicesInfo.collapseDims();
LARGE_INDEX(real, uint64_t, -1, -1, -1);
}
#undef SMALL_INDEX
#undef LARGE_INDEX
}
void THCTensor_(indexFill_long)(THCState *state, THCTensor *dst, int dim, THLongTensor *indices, real val)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, dst));
THCudaLongTensor *indices_ = THCudaLongTensor_newWithSize1d(state, indices->size[0]);
THCudaLongTensor_copyLong(state, indices_, indices);
THCTensor_(indexFill)(state, dst, dim, indices_, val);
THCudaLongTensor_free(state, indices_);
}
void THCTensor_(indexFill)(THCState *state, THCTensor *dst, int dim, THCudaLongTensor *indices, real val)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, dst));
THCAssertSameGPU(THCudaLongTensor_checkGPU(state, 1, indices));
int dims = THCTensor_(nDimension)(state, dst);
THArgCheck(dims <= MAX_CUTORCH_DIMS, 2, CUTORCH_DIM_WARNING);
dims = THCudaLongTensor_nDimension(state, indices);
THArgCheck(dims <= MAX_CUTORCH_DIMS, 4, CUTORCH_DIM_WARNING);
ptrdiff_t numIndices = THCudaLongTensor_nElement(state, indices);
int srcDims = THCTensor_(nDimension)(state, dst);
hipStream_t stream = THCState_getCurrentStream(state);
THArgCheck(THCudaLongTensor_nDimension(state, indices) == 1, 3,
"expecting vector of indices");
THArgCheck(dim < srcDims, 4, "Indexing dim is out of bounds");
THArgCheck(srcDims > 0, 2, "Source tensor is empty");
int indContig = THCudaLongTensor_isContiguous(state, indices);
// The `src` is partitioned into two parts:
// -the size of each slice we are indexing, which is the
// total size of the tensor ignoring dimension `dim`;
// -the number of indices we are choosing, which is the total size
// of the tensor `indices`.
ptrdiff_t dstTotalSize = THCTensor_(nElement)(state, dst);
int64_t dstFillDimSize = THCTensor_(size)(state, dst, dim);
ptrdiff_t sliceSize = dstTotalSize / dstFillDimSize;
int mpc = THCState_getCurrentDeviceProperties(state)->multiProcessorCount;
#define SMALL_INDEX(TENSOR_TYPE, TYPE, DST_DIM, IDX_DIM) \
hipLaunchKernelGGL(( indexFillSmallIndex<TENSOR_TYPE, TYPE, DST_DIM, IDX_DIM>) \
, dim3(smallIndexGrid), dim3(smallIndexBlock), 0, stream, \
dstInfo, indicesInfo, \
dstFillDim, sliceSize, dstFillDimSize, val);
#define LARGE_INDEX(TENSOR_TYPE, TYPE, DST_DIM, IDX_DIM) \
hipLaunchKernelGGL(( indexFillLargeIndex<TENSOR_TYPE, TYPE, DST_DIM, IDX_DIM>) \
, dim3(largeIndexGrid), dim3(largeIndexBlock), 0, stream, \
dstInfo, indicesInfo, \
dstFillDim, sliceSize, dstFillDimSize, val);
dim3 smallIndexGrid(::min(THCCeilDiv(sliceSize, (ptrdiff_t)128), (ptrdiff_t)(mpc * 8)));
dim3 smallIndexBlock(::min(sliceSize, (ptrdiff_t)128));
dim3 largeIndexGrid(::min(THCCeilDiv(dstTotalSize, (ptrdiff_t)128), (ptrdiff_t)(mpc * 8)));
dim3 largeIndexBlock(::min(dstTotalSize, (ptrdiff_t)128));
if (TensorUtils<THCTensor>::canUse32BitIndexMath(state, dst) &&
TensorUtils<THCudaLongTensor>::canUse32BitIndexMath(state, indices)) {
TensorInfo<real, unsigned int> dstInfo =
getTensorInfo<THCTensor, unsigned int>(state, dst);
int dstFillDim = dstInfo.collapseDims(dim);
dstInfo.reduceDim(dstFillDim);
TensorInfo<int64_t, unsigned int> indicesInfo =
getTensorInfo<THCudaLongTensor, unsigned int>(state, indices);
indicesInfo.collapseDims();
// A reasonable choice for when to have each thread iterate over
// indices to choose
if (numIndices <= 16) {
if (dstInfo.dims == 1 && indContig) {
SMALL_INDEX(real, unsigned int, 1, -2);
} else if (dstInfo.dims == 2 && indContig) {
SMALL_INDEX(real, unsigned int, 2, -2);
} else if (dstInfo.dims == 3 && indContig) {
SMALL_INDEX(real, unsigned int, 3, -2);
} else {
SMALL_INDEX(real, unsigned int, -1, -1);
}
} else {
if (dstInfo.dims == 1 && indContig) {
LARGE_INDEX(real, unsigned int, 1, -2);
} else if (dstInfo.dims == 2 && indContig) {
LARGE_INDEX(real, unsigned int, 2, -2);
} else if (dstInfo.dims == 3 && indContig) {
LARGE_INDEX(real, unsigned int, 3, -2);
} else {
LARGE_INDEX(real, unsigned int, -1, -1);
}
}
} else {
TensorInfo<real, uint64_t> dstInfo =
getTensorInfo<THCTensor, uint64_t>(state, dst);
int dstFillDim = dstInfo.collapseDims(dim);
dstInfo.reduceDim(dstFillDim);
TensorInfo<int64_t, uint64_t> indicesInfo =
getTensorInfo<THCudaLongTensor, uint64_t>(state, indices);
indicesInfo.collapseDims();
LARGE_INDEX(real, uint64_t, -1, -1);
}
#undef SMALL_INDEX
#undef LARGE_INDEX
}
void THCTensor_(indexSelect_long)(THCState *state, THCTensor *dst, THCTensor *src, int dim, THLongTensor *indices)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, dst, src));
THArgCheck(indices->nDimension == 1, 3, "Index is supposed to be a vector");
THCudaLongTensor *indices_ = THCudaLongTensor_newWithSize1d(state, indices->size[0]);
THCudaLongTensor_copyLong(state, indices_, indices);
THCTensor_(indexSelect)(state, dst, src, dim, indices_);
THCudaLongTensor_free(state, indices_);
}
void THCTensor_(indexSelect)(THCState *state, THCTensor *dst, THCTensor *src, int dim, THCudaLongTensor *indices)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 3, dst, src, indices));
int dims = THCTensor_(nDimension)(state, dst);
THArgCheck(dims <= MAX_CUTORCH_DIMS, 2, CUTORCH_DIM_WARNING);
dims = THCTensor_(nDimension)(state, src);
THArgCheck(dims <= MAX_CUTORCH_DIMS, 3, CUTORCH_DIM_WARNING);
dims = THCudaLongTensor_nDimension(state, indices);
THArgCheck(dims <= MAX_CUTORCH_DIMS, 5, CUTORCH_DIM_WARNING);
ptrdiff_t numIndices = THCudaLongTensor_nElement(state, indices);
int srcDims = THCTensor_(nDimension)(state, src);
hipStream_t stream = THCState_getCurrentStream(state);
THArgCheck(THCudaLongTensor_nDimension(state, indices) == 1, 3,
"expecting vector of indices");
THArgCheck(dim < srcDims, 4, "Indexing dim is out of bounds");
THArgCheck(srcDims > 0, 2, "Source tensor is empty");
THLongStorage *newSize = THCTensor_(newSizeOf)(state, src);
THLongStorage_set(newSize, dim, numIndices);
THCTensor_(resize)(state, dst, newSize, NULL);
THLongStorage_free(newSize);
int indContig = THCudaLongTensor_isContiguous(state, indices);
// The `src` is partitioned into two parts:
// -the size of each slice we are indexing, which is the
// total size of the tensor ignoring dimension `dim`;
// -the number of indices we are choosing, which is the total size
// of the tensor `indices`.
ptrdiff_t dstTotalSize = THCTensor_(nElement)(state, dst);
int64_t srcSelectDimSize = THCTensor_(size)(state, src, dim);
ptrdiff_t sliceSize = dstTotalSize / numIndices;
int mpc = THCState_getCurrentDeviceProperties(state)->multiProcessorCount;
#define SMALL_INDEX(TENSOR_TYPE, TYPE, DST_DIM, SRC_DIM, IDX_DIM) \
hipLaunchKernelGGL(( indexSelectSmallIndex<TENSOR_TYPE, TYPE, DST_DIM, SRC_DIM, IDX_DIM>) \
, dim3(smallIndexGrid), dim3(smallIndexBlock), 0, stream, \
dstInfo, srcInfo, indicesInfo, \
dstSelectDim, srcSelectDim, sliceSize, srcSelectDimSize);
#define LARGE_INDEX(TENSOR_TYPE, TYPE, DST_DIM, SRC_DIM, IDX_DIM) \
hipLaunchKernelGGL(( indexSelectLargeIndex<TENSOR_TYPE, TYPE, DST_DIM, SRC_DIM, IDX_DIM>) \
, dim3(largeIndexGrid), dim3(largeIndexBlock), 0, stream, \
dstInfo, srcInfo, indicesInfo, \
dstSelectDim, srcSelectDim, dstTotalSize, sliceSize, srcSelectDimSize);
dim3 smallIndexGrid(::min(THCCeilDiv(sliceSize, (ptrdiff_t)128), (ptrdiff_t)(mpc * 8)));
dim3 smallIndexBlock(::min(sliceSize, (ptrdiff_t)128));
dim3 largeIndexGrid(::min(THCCeilDiv(dstTotalSize, (ptrdiff_t)128), (ptrdiff_t)(mpc * 8)));
dim3 largeIndexBlock(::min(dstTotalSize, (ptrdiff_t)128));
if (TensorUtils<THCTensor>::canUse32BitIndexMath(state, dst) &&
TensorUtils<THCTensor>::canUse32BitIndexMath(state, src) &&
TensorUtils<THCudaLongTensor>::canUse32BitIndexMath(state, indices)) {
TensorInfo<real, unsigned int> dstInfo =
getTensorInfo<THCTensor, unsigned int>(state, dst);
int dstSelectDim = dstInfo.collapseDims(dim);
dstInfo.reduceDim(dstSelectDim);
TensorInfo<real, unsigned int> srcInfo =
getTensorInfo<THCTensor, unsigned int>(state, src);
int srcSelectDim = srcInfo.collapseDims(dim);
srcInfo.reduceDim(srcSelectDim);
TensorInfo<int64_t, unsigned int> indicesInfo =
getTensorInfo<THCudaLongTensor, unsigned int>(state, indices);
indicesInfo.collapseDims();
// A reasonable choice for when to have each thread iterate over
// indices to choose
if (numIndices <= 16) {
if (dstInfo.dims == 1 && srcInfo.dims == 1 && indContig) {
SMALL_INDEX(real, unsigned int, 1, 1, -2);
} else if (dstInfo.dims == 2 && srcInfo.dims == 2 && indContig) {
SMALL_INDEX(real, unsigned int, 2, 2, -2);
} else if (dstInfo.dims == 3 && srcInfo.dims == 3 && indContig) {
SMALL_INDEX(real, unsigned int, 3, 3, -2);
} else {
SMALL_INDEX(real, unsigned int, -1, -1, -1);
}
} else {
if (dstInfo.dims == 1 && srcInfo.dims == 1 && indContig) {
LARGE_INDEX(real, unsigned int, 1, 1, -2);
} else if (dstInfo.dims == 2 && srcInfo.dims == 2 && indContig) {
LARGE_INDEX(real, unsigned int, 2, 2, -2);
} else if (dstInfo.dims == 3 && srcInfo.dims == 3 && indContig) {
LARGE_INDEX(real, unsigned int, 3, 3, -2);
} else {
LARGE_INDEX(real, unsigned int, -1, -1, -1);
}
}
} else {
TensorInfo<real, uint64_t> dstInfo =
getTensorInfo<THCTensor, uint64_t>(state, dst);
int dstSelectDim = dstInfo.collapseDims(dim);
dstInfo.reduceDim(dstSelectDim);
TensorInfo<real, uint64_t> srcInfo =
getTensorInfo<THCTensor, uint64_t>(state, src);
int srcSelectDim = srcInfo.collapseDims(dim);
srcInfo.reduceDim(srcSelectDim);
TensorInfo<int64_t, uint64_t> indicesInfo =
getTensorInfo<THCudaLongTensor, uint64_t>(state, indices);
indicesInfo.collapseDims();
LARGE_INDEX(real, uint64_t, -1, -1, -1);
}
#undef SMALL_INDEX
#undef LARGE_INDEX
}
#define MAX_ADVINDEX_CALC_DIMS 5
void THCTensor_(calculateAdvancedIndexingOffsets)(
THCState *state,
THCudaLongTensor *output,
THCTensor *indexed,
ptrdiff_t baseOffset,
THCudaLongTensor **indexers)
{
int ndim = THCTensor_(nDimension)(state, indexed);
THAssert(ndim <= MAX_ADVINDEX_CALC_DIMS);
// Assert all Tensors are on the same GPU, and that the indexing Tensors are
// contiguous
for (int i = 0; i < ndim; ++i) {
if (indexers[i] != NULL) {
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, output, indexers[i]));
THAssert(THCudaLongTensor_isContiguous(state, indexers[i]));
}
}
// Set grid, block dims
ptrdiff_t nElement = THCudaLongTensor_nElement(state, output);
const dim3 block = getApplyBlock();
dim3 grid;
THAssert(getApplyGrid(state, nElement, grid));
#define HANDLE_CASE(INDEX_TYPE, DIMS) \
{ \
LinearIndexCalcData<INDEX_TYPE, DIMS> data; \
for (int i = 0; i < DIMS; ++i) { \
data.baseSizes[i] = THCTensor_(size)(state, indexed, i); \
data.sizes[i] = indexers[i] != NULL ? \
THCudaLongTensor_nElement(state, indexers[i]) : \
THCTensor_(size)(state, indexed, i); \
data.strides[i] = THCTensor_(stride)(state, indexed, i); \
data.advIndexTensors[i] = indexers[i] != NULL ? \
THCudaLongTensor_data(state, indexers[i]) : NULL; \
} \
\
hipLaunchKernelGGL(( calculateLinearIndices<INDEX_TYPE, DIMS>) \
, dim3(grid), dim3(block), 0, THCState_getCurrentStream(state), \
THCudaLongTensor_data(state, output), \
nElement, \
baseOffset, \
data \
); \
}
#define RUN_T(INDEX_TYPE) \
switch (ndim) { \
case 1: \
HANDLE_CASE(INDEX_TYPE, 1) \
break; \
case 2: \
HANDLE_CASE(INDEX_TYPE, 2) \
break; \
case 3: \
HANDLE_CASE(INDEX_TYPE, 3) \
break; \
case 4: \
HANDLE_CASE(INDEX_TYPE, 4) \
break; \
case 5: \
HANDLE_CASE(INDEX_TYPE, 5) \
break; \
default: \
THAssert(false); \
}
if (TensorUtils<THCTensor>::canUse32BitIndexMath(state, indexed)) {
RUN_T(unsigned int);
} else {
RUN_T(uint64_t);
}
#undef HANDLE_CASE
#undef RUN_T
THCudaCheck(hipGetLastError());
}
#undef MAX_ADVINDEX_CALC_DIMS
#endif
| 0973b8622d2b5681ca14ba641371e205bd06f278.cu | #ifndef THC_GENERIC_FILE
#define THC_GENERIC_FILE "generic/THCTensorIndex.cu"
#else
void THCTensor_(indexCopy_long)(THCState *state, THCTensor *dst, int dim, THLongTensor *indices, THCTensor *src)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, dst, src));
THCudaLongTensor *indices_ = THCudaLongTensor_newWithSize1d(state, indices->size[0]);
THCudaLongTensor_copyLong(state, indices_, indices);
THCTensor_(indexCopy)(state, dst, dim, indices_, src);
THCudaLongTensor_free(state, indices_);
}
void THCTensor_(indexCopy)(THCState *state, THCTensor *dst, int dim, THCudaLongTensor *indices, THCTensor *src)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, dst, src));
THCAssertSameGPU(THCudaLongTensor_checkGPU(state, 1, indices));
int dims = THCTensor_(nDimension)(state, dst);
THArgCheck(dims <= MAX_CUTORCH_DIMS, 2, CUTORCH_DIM_WARNING);
dims = THCTensor_(nDimension)(state, src);
THArgCheck(dims <= MAX_CUTORCH_DIMS, 5, CUTORCH_DIM_WARNING);
dims = THCudaLongTensor_nDimension(state, indices);
THArgCheck(dims <= MAX_CUTORCH_DIMS, 4, CUTORCH_DIM_WARNING);
ptrdiff_t numIndices = THCudaLongTensor_nElement(state, indices);
int srcDims = THCTensor_(nDimension)(state, src);
cudaStream_t stream = THCState_getCurrentStream(state);
THArgCheck(THCudaLongTensor_nDimension(state, indices) == 1, 3,
"expecting vector of indices");
THArgCheck(dim < srcDims, 4, "Indexing dim is out of bounds");
THArgCheck(srcDims > 0, 2, "Source tensor is empty");
THArgCheck(numIndices == src->size[dim], 4, "length of src.size[dim] is not equal to length of indices");
int indContig = THCudaLongTensor_isContiguous(state, indices);
// The `src` is partitioned into two parts:
// -the size of each slice we are indexing, which is the
// total size of the tensor ignoring dimension `dim`;
// -the number of indices we are choosing, which is the total size
// of the tensor `indices`.
ptrdiff_t srcTotalSize = THCTensor_(nElement)(state, src);
int64_t dstCopyDimSize = THCTensor_(size)(state, dst, dim);
ptrdiff_t sliceSize = srcTotalSize / numIndices;
int mpc = THCState_getCurrentDeviceProperties(state)->multiProcessorCount;
#define SMALL_INDEX(TENSOR_TYPE, TYPE, DST_DIM, SRC_DIM, IDX_DIM) \
indexCopySmallIndex<TENSOR_TYPE, TYPE, DST_DIM, SRC_DIM, IDX_DIM> \
<<<smallIndexGrid, smallIndexBlock, 0, stream>>>( \
dstInfo, srcInfo, indicesInfo, \
dstCopyDim, srcCopyDim, sliceSize, dstCopyDimSize);
#define LARGE_INDEX(TENSOR_TYPE, TYPE, DST_DIM, SRC_DIM, IDX_DIM) \
indexCopyLargeIndex<TENSOR_TYPE, TYPE, DST_DIM, SRC_DIM, IDX_DIM> \
<<<largeIndexGrid, largeIndexBlock, 0, stream>>>( \
dstInfo, srcInfo, indicesInfo, \
dstCopyDim, srcCopyDim, sliceSize, dstCopyDimSize);
dim3 smallIndexGrid(std::min(THCCeilDiv(sliceSize, (ptrdiff_t)128), (ptrdiff_t)(mpc * 8)));
dim3 smallIndexBlock(std::min(sliceSize, (ptrdiff_t)128));
dim3 largeIndexGrid(std::min(THCCeilDiv(srcTotalSize, (ptrdiff_t)128), (ptrdiff_t)(mpc * 8)));
dim3 largeIndexBlock(std::min(srcTotalSize, (ptrdiff_t)128));
if (TensorUtils<THCTensor>::canUse32BitIndexMath(state, dst) &&
TensorUtils<THCTensor>::canUse32BitIndexMath(state, src) &&
TensorUtils<THCudaLongTensor>::canUse32BitIndexMath(state, indices)) {
TensorInfo<real, unsigned int> dstInfo =
getTensorInfo<THCTensor, unsigned int>(state, dst);
int dstCopyDim = dstInfo.collapseDims(dim);
dstInfo.reduceDim(dstCopyDim);
TensorInfo<real, unsigned int> srcInfo =
getTensorInfo<THCTensor, unsigned int>(state, src);
int srcCopyDim = srcInfo.collapseDims(dim);
srcInfo.reduceDim(srcCopyDim);
TensorInfo<int64_t, unsigned int> indicesInfo =
getTensorInfo<THCudaLongTensor, unsigned int>(state, indices);
indicesInfo.collapseDims();
// A reasonable choice for when to have each thread iterate over
// indices to choose
if (numIndices <= 16) {
if (dstInfo.dims == 1 && srcInfo.dims == 1 && indContig) {
SMALL_INDEX(real, unsigned int, 1, 1, -2);
} else if (dstInfo.dims == 2 && srcInfo.dims == 2 && indContig) {
SMALL_INDEX(real, unsigned int, 2, 2, -2);
} else if (dstInfo.dims == 3 && srcInfo.dims == 3 && indContig) {
SMALL_INDEX(real, unsigned int, 3, 3, -2);
} else {
SMALL_INDEX(real, unsigned int, -1, -1, -1);
}
} else {
if (dstInfo.dims == 1 && srcInfo.dims == 1 && indContig) {
LARGE_INDEX(real, unsigned int, 1, 1, -2);
} else if (dstInfo.dims == 2 && srcInfo.dims == 2 && indContig) {
LARGE_INDEX(real, unsigned int, 2, 2, -2);
} else if (dstInfo.dims == 3 && srcInfo.dims == 3 && indContig) {
LARGE_INDEX(real, unsigned int, 3, 3, -2);
} else {
LARGE_INDEX(real, unsigned int, -1, -1, -1);
}
}
} else {
TensorInfo<real, uint64_t> dstInfo =
getTensorInfo<THCTensor, uint64_t>(state, dst);
int dstCopyDim = dstInfo.collapseDims(dim);
dstInfo.reduceDim(dstCopyDim);
TensorInfo<real, uint64_t> srcInfo =
getTensorInfo<THCTensor, uint64_t>(state, src);
int srcCopyDim = srcInfo.collapseDims(dim);
srcInfo.reduceDim(srcCopyDim);
TensorInfo<int64_t, uint64_t> indicesInfo =
getTensorInfo<THCudaLongTensor, uint64_t>(state, indices);
indicesInfo.collapseDims();
LARGE_INDEX(real, uint64_t, -1, -1, -1);
}
#undef SMALL_INDEX
#undef LARGE_INDEX
}
void THCTensor_(take)(THCState *state, THCTensor *dst, THCTensor *src, THCudaLongTensor *index)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, dst, src));
THCAssertSameGPU(THCudaLongTensor_checkGPU(state, 1, index));
THArgCheck(THCTensor_(nDimension)(state, src) <= MAX_CUTORCH_DIMS, 2, CUTORCH_DIM_WARNING);
THArgCheck(THCTensor_(nDimension)(state, dst) <= MAX_CUTORCH_DIMS, 2, CUTORCH_DIM_WARNING);
THArgCheck(THCudaLongTensor_nDimension(state, index) <= MAX_CUTORCH_DIMS, 2, CUTORCH_DIM_WARNING);
THCTensor_(resizeNd)(state, dst, index->nDimension, index->size, NULL);
dispatchTakePut<real, TensorTakeOp>(state, src, dst, index);
}
static void THCTensor_(sort_indices)(THCState *state, THCudaLongTensor *index, THCTensor *src) {
THCThrustAllocator thrustAlloc(state);
auto index_iter = thrust::device_ptr<int64_t>(THCudaLongTensor_data(state, index));
auto src_iter = thrust::device_ptr<real>(THCTensor_(data)(state, src));
auto numel = THCTensor_(numel)(state, src);
thrust::sort_by_key(
thrust::cuda::par(thrustAlloc).on(THCState_getCurrentStream(state)),
index_iter, index_iter + numel,
src_iter, ThrustLTOp<int64_t>());
}
void THCTensor_(put)(THCState *state, THCTensor *dst, THCudaLongTensor *index, THCTensor *src, int accumulate)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, dst, src));
THCAssertSameGPU(THCudaLongTensor_checkGPU(state, 1, index));
ptrdiff_t dstSize = THCTensor_(nElement)(state, dst);
ptrdiff_t numIndices = THCudaLongTensor_nElement(state, index);
THArgCheck(THCTensor_(nElement)(state, src) == numIndices,
3, "src should have the same number of elements as index");
THArgCheck(THCTensor_(nDimension)(state, dst) <= MAX_CUTORCH_DIMS, 2, CUTORCH_DIM_WARNING);
THArgCheck(THCTensor_(nDimension)(state, src) <= MAX_CUTORCH_DIMS, 2, CUTORCH_DIM_WARNING);
THArgCheck(THCudaLongTensor_nDimension(state, index) <= MAX_CUTORCH_DIMS, 2, CUTORCH_DIM_WARNING);
int srcDims = THCTensor_(nDimension)(state, src);
THArgCheck(srcDims > 0, 2, "Source tensor is empty");
if (accumulate) {
// wrap indices so to replace negative indices
THCudaLongTensor* sorted_index = THCudaLongTensor_new(state);
THCudaLongTensor_resizeAs(state, sorted_index, index);
THC_pointwiseApply2(state, sorted_index, index, WrapIndexOp(dstSize));
THCTensor* sorted_src = THCTensor_(newClone)(state, src);
THCTensor_(sort_indices)(state, sorted_index, sorted_src);
dispatchTakePut<real, TensorPutAccumulateOp>(state, dst, src, index);
THCTensor_(free)(state, sorted_src);
THCudaLongTensor_free(state, sorted_index);
} else {
dispatchTakePut<real, TensorPutOp>(state, dst, src, index);
}
}
void THCTensor_(indexAdd_long)(THCState *state, THCTensor *dst, int dim, THLongTensor *indices, THCTensor *src)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, dst, src));
THCudaLongTensor *indices_ = THCudaLongTensor_newWithSize1d(state, indices->size[0]);
THCudaLongTensor_copyLong(state, indices_, indices);
THCTensor_(indexAdd)(state, dst, dim, indices_, src);
THCudaLongTensor_free(state, indices_);
}
void THCTensor_(indexAdd)(THCState *state, THCTensor *dst, int dim, THCudaLongTensor *indices, THCTensor *src)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, dst, src));
THCAssertSameGPU(THCudaLongTensor_checkGPU(state, 1, indices));
int dims = THCTensor_(nDimension)(state, dst);
THArgCheck(dims <= MAX_CUTORCH_DIMS, 2, CUTORCH_DIM_WARNING);
dims = THCTensor_(nDimension)(state, src);
THArgCheck(dims <= MAX_CUTORCH_DIMS, 5, CUTORCH_DIM_WARNING);
dims = THCudaLongTensor_nDimension(state, indices);
THArgCheck(dims <= MAX_CUTORCH_DIMS, 4, CUTORCH_DIM_WARNING);
ptrdiff_t numIndices = THCudaLongTensor_nElement(state, indices);
int srcDims = THCTensor_(nDimension)(state, src);
cudaStream_t stream = THCState_getCurrentStream(state);
THArgCheck(THCudaLongTensor_nDimension(state, indices) == 1, 3,
"expecting vector of indices");
THArgCheck(dim < srcDims, 4, "Indexing dim is out of bounds");
THArgCheck(srcDims > 0, 2, "Source tensor is empty");
THArgCheck(numIndices == src->size[dim], 4, "length of src.size[dim] is not equal to length of indices");
int indContig = THCudaLongTensor_isContiguous(state, indices);
// The `src` is partitioned into two parts:
// -the size of each slice we are indexing, which is the
// total size of the tensor ignoring dimension `dim`;
// -the number of indices we are choosing, which is the total size
// of the tensor `indices`.
ptrdiff_t srcTotalSize = THCTensor_(nElement)(state, src);
int64_t dstAddDimSize = THCTensor_(size)(state, dst, dim);
ptrdiff_t sliceSize = srcTotalSize / numIndices;
int mpc = THCState_getCurrentDeviceProperties(state)->multiProcessorCount;
#define SMALL_INDEX(TENSOR_TYPE, TYPE, DST_DIM, SRC_DIM, IDX_DIM) \
indexAddSmallIndex<TENSOR_TYPE, TYPE, DST_DIM, SRC_DIM, IDX_DIM> \
<<<smallIndexGrid, smallIndexBlock, 0, stream>>>( \
dstInfo, srcInfo, indicesInfo, \
dstAddDim, srcAddDim, sliceSize, dstAddDimSize);
#define LARGE_INDEX(TENSOR_TYPE, TYPE, DST_DIM, SRC_DIM, IDX_DIM) \
indexAddLargeIndex<TENSOR_TYPE, TYPE, DST_DIM, SRC_DIM, IDX_DIM> \
<<<largeIndexGrid, largeIndexBlock, 0, stream>>>( \
dstInfo, srcInfo, indicesInfo, \
dstAddDim, srcAddDim, sliceSize, dstAddDimSize);
dim3 smallIndexGrid(std::min(THCCeilDiv(sliceSize, (ptrdiff_t)128), (ptrdiff_t)(mpc * 8)));
dim3 smallIndexBlock(std::min(sliceSize, (ptrdiff_t)128));
dim3 largeIndexGrid(std::min(THCCeilDiv(srcTotalSize, (ptrdiff_t)128), (ptrdiff_t)(mpc * 8)));
dim3 largeIndexBlock(std::min(srcTotalSize, (ptrdiff_t)128));
if (TensorUtils<THCTensor>::canUse32BitIndexMath(state, dst) &&
TensorUtils<THCTensor>::canUse32BitIndexMath(state, src) &&
TensorUtils<THCudaLongTensor>::canUse32BitIndexMath(state, indices)) {
TensorInfo<real, unsigned int> dstInfo =
getTensorInfo<THCTensor, unsigned int>(state, dst);
int dstAddDim = dstInfo.collapseDims(dim);
dstInfo.reduceDim(dstAddDim);
TensorInfo<real, unsigned int> srcInfo =
getTensorInfo<THCTensor, unsigned int>(state, src);
int srcAddDim = srcInfo.collapseDims(dim);
srcInfo.reduceDim(srcAddDim);
TensorInfo<int64_t, unsigned int> indicesInfo =
getTensorInfo<THCudaLongTensor, unsigned int>(state, indices);
indicesInfo.collapseDims();
// A reasonable choice for when to have each thread iterate over
// indices to choose
if (numIndices <= 16) {
if (dstInfo.dims == 1 && srcInfo.dims == 1 && indContig) {
SMALL_INDEX(real, unsigned int, 1, 1, -2);
} else if (dstInfo.dims == 2 && srcInfo.dims == 2 && indContig) {
SMALL_INDEX(real, unsigned int, 2, 2, -2);
} else if (dstInfo.dims == 3 && srcInfo.dims == 3 && indContig) {
SMALL_INDEX(real, unsigned int, 3, 3, -2);
} else {
SMALL_INDEX(real, unsigned int, -1, -1, -1);
}
} else {
if (dstInfo.dims == 1 && srcInfo.dims == 1 && indContig) {
LARGE_INDEX(real, unsigned int, 1, 1, -2);
} else if (dstInfo.dims == 2 && srcInfo.dims == 2 && indContig) {
LARGE_INDEX(real, unsigned int, 2, 2, -2);
} else if (dstInfo.dims == 3 && srcInfo.dims == 3 && indContig) {
LARGE_INDEX(real, unsigned int, 3, 3, -2);
} else {
LARGE_INDEX(real, unsigned int, -1, -1, -1);
}
}
} else {
TensorInfo<real, uint64_t> dstInfo =
getTensorInfo<THCTensor, uint64_t>(state, dst);
int dstAddDim = dstInfo.collapseDims(dim);
dstInfo.reduceDim(dstAddDim);
TensorInfo<real, uint64_t> srcInfo =
getTensorInfo<THCTensor, uint64_t>(state, src);
int srcAddDim = srcInfo.collapseDims(dim);
srcInfo.reduceDim(srcAddDim);
TensorInfo<int64_t, uint64_t> indicesInfo =
getTensorInfo<THCudaLongTensor, uint64_t>(state, indices);
indicesInfo.collapseDims();
LARGE_INDEX(real, uint64_t, -1, -1, -1);
}
#undef SMALL_INDEX
#undef LARGE_INDEX
}
void THCTensor_(indexFill_long)(THCState *state, THCTensor *dst, int dim, THLongTensor *indices, real val)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, dst));
THCudaLongTensor *indices_ = THCudaLongTensor_newWithSize1d(state, indices->size[0]);
THCudaLongTensor_copyLong(state, indices_, indices);
THCTensor_(indexFill)(state, dst, dim, indices_, val);
THCudaLongTensor_free(state, indices_);
}
void THCTensor_(indexFill)(THCState *state, THCTensor *dst, int dim, THCudaLongTensor *indices, real val)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, dst));
THCAssertSameGPU(THCudaLongTensor_checkGPU(state, 1, indices));
int dims = THCTensor_(nDimension)(state, dst);
THArgCheck(dims <= MAX_CUTORCH_DIMS, 2, CUTORCH_DIM_WARNING);
dims = THCudaLongTensor_nDimension(state, indices);
THArgCheck(dims <= MAX_CUTORCH_DIMS, 4, CUTORCH_DIM_WARNING);
ptrdiff_t numIndices = THCudaLongTensor_nElement(state, indices);
int srcDims = THCTensor_(nDimension)(state, dst);
cudaStream_t stream = THCState_getCurrentStream(state);
THArgCheck(THCudaLongTensor_nDimension(state, indices) == 1, 3,
"expecting vector of indices");
THArgCheck(dim < srcDims, 4, "Indexing dim is out of bounds");
THArgCheck(srcDims > 0, 2, "Source tensor is empty");
int indContig = THCudaLongTensor_isContiguous(state, indices);
// The `src` is partitioned into two parts:
// -the size of each slice we are indexing, which is the
// total size of the tensor ignoring dimension `dim`;
// -the number of indices we are choosing, which is the total size
// of the tensor `indices`.
ptrdiff_t dstTotalSize = THCTensor_(nElement)(state, dst);
int64_t dstFillDimSize = THCTensor_(size)(state, dst, dim);
ptrdiff_t sliceSize = dstTotalSize / dstFillDimSize;
int mpc = THCState_getCurrentDeviceProperties(state)->multiProcessorCount;
#define SMALL_INDEX(TENSOR_TYPE, TYPE, DST_DIM, IDX_DIM) \
indexFillSmallIndex<TENSOR_TYPE, TYPE, DST_DIM, IDX_DIM> \
<<<smallIndexGrid, smallIndexBlock, 0, stream>>>( \
dstInfo, indicesInfo, \
dstFillDim, sliceSize, dstFillDimSize, val);
#define LARGE_INDEX(TENSOR_TYPE, TYPE, DST_DIM, IDX_DIM) \
indexFillLargeIndex<TENSOR_TYPE, TYPE, DST_DIM, IDX_DIM> \
<<<largeIndexGrid, largeIndexBlock, 0, stream>>>( \
dstInfo, indicesInfo, \
dstFillDim, sliceSize, dstFillDimSize, val);
dim3 smallIndexGrid(std::min(THCCeilDiv(sliceSize, (ptrdiff_t)128), (ptrdiff_t)(mpc * 8)));
dim3 smallIndexBlock(std::min(sliceSize, (ptrdiff_t)128));
dim3 largeIndexGrid(std::min(THCCeilDiv(dstTotalSize, (ptrdiff_t)128), (ptrdiff_t)(mpc * 8)));
dim3 largeIndexBlock(std::min(dstTotalSize, (ptrdiff_t)128));
if (TensorUtils<THCTensor>::canUse32BitIndexMath(state, dst) &&
TensorUtils<THCudaLongTensor>::canUse32BitIndexMath(state, indices)) {
TensorInfo<real, unsigned int> dstInfo =
getTensorInfo<THCTensor, unsigned int>(state, dst);
int dstFillDim = dstInfo.collapseDims(dim);
dstInfo.reduceDim(dstFillDim);
TensorInfo<int64_t, unsigned int> indicesInfo =
getTensorInfo<THCudaLongTensor, unsigned int>(state, indices);
indicesInfo.collapseDims();
// A reasonable choice for when to have each thread iterate over
// indices to choose
if (numIndices <= 16) {
if (dstInfo.dims == 1 && indContig) {
SMALL_INDEX(real, unsigned int, 1, -2);
} else if (dstInfo.dims == 2 && indContig) {
SMALL_INDEX(real, unsigned int, 2, -2);
} else if (dstInfo.dims == 3 && indContig) {
SMALL_INDEX(real, unsigned int, 3, -2);
} else {
SMALL_INDEX(real, unsigned int, -1, -1);
}
} else {
if (dstInfo.dims == 1 && indContig) {
LARGE_INDEX(real, unsigned int, 1, -2);
} else if (dstInfo.dims == 2 && indContig) {
LARGE_INDEX(real, unsigned int, 2, -2);
} else if (dstInfo.dims == 3 && indContig) {
LARGE_INDEX(real, unsigned int, 3, -2);
} else {
LARGE_INDEX(real, unsigned int, -1, -1);
}
}
} else {
TensorInfo<real, uint64_t> dstInfo =
getTensorInfo<THCTensor, uint64_t>(state, dst);
int dstFillDim = dstInfo.collapseDims(dim);
dstInfo.reduceDim(dstFillDim);
TensorInfo<int64_t, uint64_t> indicesInfo =
getTensorInfo<THCudaLongTensor, uint64_t>(state, indices);
indicesInfo.collapseDims();
LARGE_INDEX(real, uint64_t, -1, -1);
}
#undef SMALL_INDEX
#undef LARGE_INDEX
}
void THCTensor_(indexSelect_long)(THCState *state, THCTensor *dst, THCTensor *src, int dim, THLongTensor *indices)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, dst, src));
THArgCheck(indices->nDimension == 1, 3, "Index is supposed to be a vector");
THCudaLongTensor *indices_ = THCudaLongTensor_newWithSize1d(state, indices->size[0]);
THCudaLongTensor_copyLong(state, indices_, indices);
THCTensor_(indexSelect)(state, dst, src, dim, indices_);
THCudaLongTensor_free(state, indices_);
}
void THCTensor_(indexSelect)(THCState *state, THCTensor *dst, THCTensor *src, int dim, THCudaLongTensor *indices)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 3, dst, src, indices));
int dims = THCTensor_(nDimension)(state, dst);
THArgCheck(dims <= MAX_CUTORCH_DIMS, 2, CUTORCH_DIM_WARNING);
dims = THCTensor_(nDimension)(state, src);
THArgCheck(dims <= MAX_CUTORCH_DIMS, 3, CUTORCH_DIM_WARNING);
dims = THCudaLongTensor_nDimension(state, indices);
THArgCheck(dims <= MAX_CUTORCH_DIMS, 5, CUTORCH_DIM_WARNING);
ptrdiff_t numIndices = THCudaLongTensor_nElement(state, indices);
int srcDims = THCTensor_(nDimension)(state, src);
cudaStream_t stream = THCState_getCurrentStream(state);
THArgCheck(THCudaLongTensor_nDimension(state, indices) == 1, 3,
"expecting vector of indices");
THArgCheck(dim < srcDims, 4, "Indexing dim is out of bounds");
THArgCheck(srcDims > 0, 2, "Source tensor is empty");
THLongStorage *newSize = THCTensor_(newSizeOf)(state, src);
THLongStorage_set(newSize, dim, numIndices);
THCTensor_(resize)(state, dst, newSize, NULL);
THLongStorage_free(newSize);
int indContig = THCudaLongTensor_isContiguous(state, indices);
// The `src` is partitioned into two parts:
// -the size of each slice we are indexing, which is the
// total size of the tensor ignoring dimension `dim`;
// -the number of indices we are choosing, which is the total size
// of the tensor `indices`.
ptrdiff_t dstTotalSize = THCTensor_(nElement)(state, dst);
int64_t srcSelectDimSize = THCTensor_(size)(state, src, dim);
ptrdiff_t sliceSize = dstTotalSize / numIndices;
int mpc = THCState_getCurrentDeviceProperties(state)->multiProcessorCount;
#define SMALL_INDEX(TENSOR_TYPE, TYPE, DST_DIM, SRC_DIM, IDX_DIM) \
indexSelectSmallIndex<TENSOR_TYPE, TYPE, DST_DIM, SRC_DIM, IDX_DIM> \
<<<smallIndexGrid, smallIndexBlock, 0, stream>>>( \
dstInfo, srcInfo, indicesInfo, \
dstSelectDim, srcSelectDim, sliceSize, srcSelectDimSize);
#define LARGE_INDEX(TENSOR_TYPE, TYPE, DST_DIM, SRC_DIM, IDX_DIM) \
indexSelectLargeIndex<TENSOR_TYPE, TYPE, DST_DIM, SRC_DIM, IDX_DIM> \
<<<largeIndexGrid, largeIndexBlock, 0, stream>>>( \
dstInfo, srcInfo, indicesInfo, \
dstSelectDim, srcSelectDim, dstTotalSize, sliceSize, srcSelectDimSize);
dim3 smallIndexGrid(std::min(THCCeilDiv(sliceSize, (ptrdiff_t)128), (ptrdiff_t)(mpc * 8)));
dim3 smallIndexBlock(std::min(sliceSize, (ptrdiff_t)128));
dim3 largeIndexGrid(std::min(THCCeilDiv(dstTotalSize, (ptrdiff_t)128), (ptrdiff_t)(mpc * 8)));
dim3 largeIndexBlock(std::min(dstTotalSize, (ptrdiff_t)128));
if (TensorUtils<THCTensor>::canUse32BitIndexMath(state, dst) &&
TensorUtils<THCTensor>::canUse32BitIndexMath(state, src) &&
TensorUtils<THCudaLongTensor>::canUse32BitIndexMath(state, indices)) {
TensorInfo<real, unsigned int> dstInfo =
getTensorInfo<THCTensor, unsigned int>(state, dst);
int dstSelectDim = dstInfo.collapseDims(dim);
dstInfo.reduceDim(dstSelectDim);
TensorInfo<real, unsigned int> srcInfo =
getTensorInfo<THCTensor, unsigned int>(state, src);
int srcSelectDim = srcInfo.collapseDims(dim);
srcInfo.reduceDim(srcSelectDim);
TensorInfo<int64_t, unsigned int> indicesInfo =
getTensorInfo<THCudaLongTensor, unsigned int>(state, indices);
indicesInfo.collapseDims();
// A reasonable choice for when to have each thread iterate over
// indices to choose
if (numIndices <= 16) {
if (dstInfo.dims == 1 && srcInfo.dims == 1 && indContig) {
SMALL_INDEX(real, unsigned int, 1, 1, -2);
} else if (dstInfo.dims == 2 && srcInfo.dims == 2 && indContig) {
SMALL_INDEX(real, unsigned int, 2, 2, -2);
} else if (dstInfo.dims == 3 && srcInfo.dims == 3 && indContig) {
SMALL_INDEX(real, unsigned int, 3, 3, -2);
} else {
SMALL_INDEX(real, unsigned int, -1, -1, -1);
}
} else {
if (dstInfo.dims == 1 && srcInfo.dims == 1 && indContig) {
LARGE_INDEX(real, unsigned int, 1, 1, -2);
} else if (dstInfo.dims == 2 && srcInfo.dims == 2 && indContig) {
LARGE_INDEX(real, unsigned int, 2, 2, -2);
} else if (dstInfo.dims == 3 && srcInfo.dims == 3 && indContig) {
LARGE_INDEX(real, unsigned int, 3, 3, -2);
} else {
LARGE_INDEX(real, unsigned int, -1, -1, -1);
}
}
} else {
TensorInfo<real, uint64_t> dstInfo =
getTensorInfo<THCTensor, uint64_t>(state, dst);
int dstSelectDim = dstInfo.collapseDims(dim);
dstInfo.reduceDim(dstSelectDim);
TensorInfo<real, uint64_t> srcInfo =
getTensorInfo<THCTensor, uint64_t>(state, src);
int srcSelectDim = srcInfo.collapseDims(dim);
srcInfo.reduceDim(srcSelectDim);
TensorInfo<int64_t, uint64_t> indicesInfo =
getTensorInfo<THCudaLongTensor, uint64_t>(state, indices);
indicesInfo.collapseDims();
LARGE_INDEX(real, uint64_t, -1, -1, -1);
}
#undef SMALL_INDEX
#undef LARGE_INDEX
}
#define MAX_ADVINDEX_CALC_DIMS 5
void THCTensor_(calculateAdvancedIndexingOffsets)(
THCState *state,
THCudaLongTensor *output,
THCTensor *indexed,
ptrdiff_t baseOffset,
THCudaLongTensor **indexers)
{
int ndim = THCTensor_(nDimension)(state, indexed);
THAssert(ndim <= MAX_ADVINDEX_CALC_DIMS);
// Assert all Tensors are on the same GPU, and that the indexing Tensors are
// contiguous
for (int i = 0; i < ndim; ++i) {
if (indexers[i] != NULL) {
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, output, indexers[i]));
THAssert(THCudaLongTensor_isContiguous(state, indexers[i]));
}
}
// Set grid, block dims
ptrdiff_t nElement = THCudaLongTensor_nElement(state, output);
const dim3 block = getApplyBlock();
dim3 grid;
THAssert(getApplyGrid(state, nElement, grid));
#define HANDLE_CASE(INDEX_TYPE, DIMS) \
{ \
LinearIndexCalcData<INDEX_TYPE, DIMS> data; \
for (int i = 0; i < DIMS; ++i) { \
data.baseSizes[i] = THCTensor_(size)(state, indexed, i); \
data.sizes[i] = indexers[i] != NULL ? \
THCudaLongTensor_nElement(state, indexers[i]) : \
THCTensor_(size)(state, indexed, i); \
data.strides[i] = THCTensor_(stride)(state, indexed, i); \
data.advIndexTensors[i] = indexers[i] != NULL ? \
THCudaLongTensor_data(state, indexers[i]) : NULL; \
} \
\
calculateLinearIndices<INDEX_TYPE, DIMS> \
<<<grid, block, 0, THCState_getCurrentStream(state)>>>( \
THCudaLongTensor_data(state, output), \
nElement, \
baseOffset, \
data \
); \
}
#define RUN_T(INDEX_TYPE) \
switch (ndim) { \
case 1: \
HANDLE_CASE(INDEX_TYPE, 1) \
break; \
case 2: \
HANDLE_CASE(INDEX_TYPE, 2) \
break; \
case 3: \
HANDLE_CASE(INDEX_TYPE, 3) \
break; \
case 4: \
HANDLE_CASE(INDEX_TYPE, 4) \
break; \
case 5: \
HANDLE_CASE(INDEX_TYPE, 5) \
break; \
default: \
THAssert(false); \
}
if (TensorUtils<THCTensor>::canUse32BitIndexMath(state, indexed)) {
RUN_T(unsigned int);
} else {
RUN_T(uint64_t);
}
#undef HANDLE_CASE
#undef RUN_T
THCudaCheck(cudaGetLastError());
}
#undef MAX_ADVINDEX_CALC_DIMS
#endif
|
e01b37bda677561b83ba8553c6647af1a03e23d0.hip | // !!! This is a file automatically generated by hipify!!!
#include <iostream>
#include <fstream>
#include <string>
#include <omp.h>
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include "../../timertest/hiperformancetimer/highperformancetimer.h"
using namespace std;
#define USE_OMP
#if defined(_DEBUG)
#define GIGA (1 << 20)
#else
#define GIGA (1 << 30)
#endif
#define BMSIZE (GIGA / 8)
#define MAX_PATTERN_LENGTH 256
__constant__ char dev_pattern[MAX_PATTERN_LENGTH];
__constant__ int dev_pattern_size;
__device__ char * dev_buffer = nullptr;
__device__ unsigned char * dev_bitmap = nullptr;
__global__ void SearchGPU_V1(char * buffer, int buffer_size, unsigned char * bitmap, int bitmap_size)
{
//int cIndex = (blockIdx.x * blockDim.x + threadIdx.x) * (blockIdx.y * blockDim.y + threadIdx.y) + 1024;
// what I learned from nvidia forums IS...... calculating global threads
int threadsperblock = blockDim.x * blockDim.y;
int threadNumInBlock = threadIdx.x + blockDim.x * threadIdx.y;
int blockNumInGrid = blockIdx.x + gridDim.x * blockIdx.y;
int cIndex = (threadsperblock * blockNumInGrid) + threadNumInBlock;
// make sure we're in the buffer
if (cIndex < buffer_size)
{
int pIndex;
// go through to see if its the pattern
for (pIndex = 0; pIndex < dev_pattern_size; pIndex++)
{
if (*(buffer + cIndex + pIndex) <= 'Z' && *(buffer + cIndex + pIndex) >= 'A')
*(buffer + cIndex + pIndex) += 32;
// if its not the pattern then 8r8k
if (*(buffer + cIndex + pIndex) != *(dev_pattern + pIndex))
break;
}
// if a match is found
if (pIndex == dev_pattern_size)
{
int byte_number = cIndex >> 3;
if (byte_number < bitmap_size)
{
int bit_number = cIndex % 8;
{
*(bitmap + byte_number) |= (1 << bit_number);
}
}
}
}
}
int SearchCPU_V1(char * buffer, int buffer_size, char * pattern, int pattern_size, unsigned char * bitmap, int bitmap_size)
{
int rv = 0;
#if defined(USE_OMP)
#pragma omp parallel for
#endif
for (int cIndex = 0; cIndex < buffer_size; cIndex++)
{
int pIndex;
for (pIndex = 0; pIndex < pattern_size; pIndex++)
{
if (tolower(*(buffer + cIndex + pIndex)) != *(pattern + pIndex))
break;
}
if (pIndex == pattern_size)
{
int byte_number = cIndex >> 3;
if (byte_number < bitmap_size)
{
int bit_number = cIndex % 8;
#if defined(USE_OMP)
#pragma omp critical
#endif
{
*(bitmap + byte_number) |= (1 << bit_number);
rv++;
}
}
}
}
return rv;
}
/* CStringToLower() - this function flattens a c string to all
lower case. It marches through memory until a null byte is
found. As such, some may consider this function unsafe.
By flattening the pattern, we can eliminate a tolower in
the search function - a potentially big win.
The original pointer is returned so that the function can be
used in an assignment statement.
*/
char * CStringToLower(char * s)
{
char * rv = s;
for (; *s != NULL; s++)
{
*s = tolower(*s);
}
return rv;
}
inline void CheckCudaAndThrow(hipError_t t, const string & message)
{
if (t != hipSuccess)
throw message;
}
int main(int argc, char * argv[])
{
cout.imbue(locale(""));
ifstream f("C:/Users/educ/Documents/enwiki-latest-abstract.xml");
HighPrecisionTime hpt;
char * hst_buffer = nullptr;
unsigned char * hst_bm = nullptr;
unsigned char * chk_bm = nullptr;
#if defined(USE_OMP)
cout << "OMP enabled on " << omp_get_max_threads() << " threads." << endl;
#endif
try
{
if (argc < 2)
throw string("First argument must be target string.");
char * pattern = CStringToLower(argv[1]);
int pattern_size = strlen(pattern);
if (!f.is_open())
throw string("File failed to open");
hst_buffer = new char[GIGA];
hst_bm = new unsigned char[BMSIZE]();
chk_bm = new unsigned char[BMSIZE];
hpt.TimeSinceLastCall();
f.read(hst_buffer, GIGA);
if (!f)
throw string("Failed to read full buffer.");
double read_time = hpt.TimeSinceLastCall();
cout << GIGA << " bytes read from disk in " << read_time << " seconds at " << GIGA / read_time / double(1 << 30) << " GB / second." << endl;
CheckCudaAndThrow(hipSetDevice(0), string("hipSetDevice(0) failed on line ") + to_string(__LINE__));
CheckCudaAndThrow(hipMalloc(&dev_buffer, GIGA), string("hipMalloc failed on line ") + to_string(__LINE__));
CheckCudaAndThrow(hipMalloc(&dev_bitmap, BMSIZE), string("hipMalloc failed on line ") + to_string(__LINE__));
CheckCudaAndThrow(hipMemset(dev_bitmap, 0, BMSIZE), string("hipMemset failed on line ") + to_string(__LINE__));
CheckCudaAndThrow(hipMemcpyToSymbol(dev_pattern, pattern, pattern_size, 0), string("hipMemcpyToSymbol failed on line ") + to_string(__LINE__));
CheckCudaAndThrow(hipMemcpyToSymbol(dev_pattern_size, &pattern_size, sizeof(int), 0), string("hipMemcpyToSymbol failed on line ") + to_string(__LINE__));
hpt.TimeSinceLastCall();
CheckCudaAndThrow(hipMemcpy(dev_buffer, hst_buffer, GIGA, hipMemcpyHostToDevice), string("hipMemcpy failed on line ") + to_string(__LINE__));
double copy_time = hpt.TimeSinceLastCall();
cout << GIGA << " data bytes copied to GPU in " << copy_time << " seconds at " << GIGA / copy_time / double(1 << 30) << " GB / second." << endl;
hpt.TimeSinceLastCall();
int matches_found = SearchCPU_V1(hst_buffer, GIGA, pattern, pattern_size, hst_bm, BMSIZE);
double time_cpu = hpt.TimeSinceLastCall();
cout << "SearchCPU_V1 found " << matches_found << " matches in " << time_cpu << " seconds.";
cout << " Searched " << GIGA / time_cpu / double(1 << 30) << " GB / second." << endl;
int threads_per_block = 1024;
dim3 grid(1024, 1024);
hpt.TimeSinceLastCall();
SearchGPU_V1 << <grid, threads_per_block >> >(dev_buffer, GIGA, dev_bitmap, BMSIZE);
CheckCudaAndThrow(hipGetLastError(), string("kernel launch failed on line ") + to_string(__LINE__));
CheckCudaAndThrow(hipDeviceSynchronize(), string("hipDeviceSynchronize() failed on line ") + to_string(__LINE__));
double time_gpu = hpt.TimeSinceLastCall();
CheckCudaAndThrow(hipMemcpy(chk_bm, dev_bitmap, BMSIZE, hipMemcpyDeviceToHost), string("hipMemcpy() failed on line ") + to_string(__LINE__));
unsigned int * bm_alias = (unsigned int *)chk_bm;
int match_count = 0;
for (int i = 0; i < BMSIZE / sizeof(int); i++)
{
unsigned int c = 0;
unsigned int v = *(bm_alias + i);
for (c = 0; v; c++)
{
v &= v - 1;
}
match_count += c;
}
cout << "SearchGPU_V1 found " << match_count << " matches in " << time_gpu << " seconds.";
cout << " Searched " << GIGA / time_gpu / double(1 << 30) << " GB / second." << endl;
cout << endl;
cout << "Ratio: " << time_cpu / time_gpu << " to 1" << endl;
}
catch (string s)
{
cout << s << endl;
}
if (dev_buffer != nullptr)
hipFree(dev_buffer);
if (dev_bitmap != nullptr)
hipFree(dev_bitmap);
if (hst_buffer != nullptr)
delete[] hst_buffer;
if (hst_bm != nullptr)
delete[] hst_bm;
if (f.is_open())
f.close();
hipDeviceReset();
#if defined(WIN64) || defined(WIN32)
cout << endl;
system("pause");
#endif
return 0;
} | e01b37bda677561b83ba8553c6647af1a03e23d0.cu | #include <iostream>
#include <fstream>
#include <string>
#include <omp.h>
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include "../../timertest/hiperformancetimer/highperformancetimer.h"
using namespace std;
#define USE_OMP
#if defined(_DEBUG)
#define GIGA (1 << 20)
#else
#define GIGA (1 << 30)
#endif
#define BMSIZE (GIGA / 8)
#define MAX_PATTERN_LENGTH 256
__constant__ char dev_pattern[MAX_PATTERN_LENGTH];
__constant__ int dev_pattern_size;
__device__ char * dev_buffer = nullptr;
__device__ unsigned char * dev_bitmap = nullptr;
__global__ void SearchGPU_V1(char * buffer, int buffer_size, unsigned char * bitmap, int bitmap_size)
{
//int cIndex = (blockIdx.x * blockDim.x + threadIdx.x) * (blockIdx.y * blockDim.y + threadIdx.y) + 1024;
// what I learned from nvidia forums IS...... calculating global threads
int threadsperblock = blockDim.x * blockDim.y;
int threadNumInBlock = threadIdx.x + blockDim.x * threadIdx.y;
int blockNumInGrid = blockIdx.x + gridDim.x * blockIdx.y;
int cIndex = (threadsperblock * blockNumInGrid) + threadNumInBlock;
// make sure we're in the buffer
if (cIndex < buffer_size)
{
int pIndex;
// go through to see if its the pattern
for (pIndex = 0; pIndex < dev_pattern_size; pIndex++)
{
if (*(buffer + cIndex + pIndex) <= 'Z' && *(buffer + cIndex + pIndex) >= 'A')
*(buffer + cIndex + pIndex) += 32;
// if its not the pattern then 8r8k
if (*(buffer + cIndex + pIndex) != *(dev_pattern + pIndex))
break;
}
// if a match is found
if (pIndex == dev_pattern_size)
{
int byte_number = cIndex >> 3;
if (byte_number < bitmap_size)
{
int bit_number = cIndex % 8;
{
*(bitmap + byte_number) |= (1 << bit_number);
}
}
}
}
}
int SearchCPU_V1(char * buffer, int buffer_size, char * pattern, int pattern_size, unsigned char * bitmap, int bitmap_size)
{
int rv = 0;
#if defined(USE_OMP)
#pragma omp parallel for
#endif
for (int cIndex = 0; cIndex < buffer_size; cIndex++)
{
int pIndex;
for (pIndex = 0; pIndex < pattern_size; pIndex++)
{
if (tolower(*(buffer + cIndex + pIndex)) != *(pattern + pIndex))
break;
}
if (pIndex == pattern_size)
{
int byte_number = cIndex >> 3;
if (byte_number < bitmap_size)
{
int bit_number = cIndex % 8;
#if defined(USE_OMP)
#pragma omp critical
#endif
{
*(bitmap + byte_number) |= (1 << bit_number);
rv++;
}
}
}
}
return rv;
}
/* CStringToLower() - this function flattens a c string to all
lower case. It marches through memory until a null byte is
found. As such, some may consider this function unsafe.
By flattening the pattern, we can eliminate a tolower in
the search function - a potentially big win.
The original pointer is returned so that the function can be
used in an assignment statement.
*/
char * CStringToLower(char * s)
{
char * rv = s;
for (; *s != NULL; s++)
{
*s = tolower(*s);
}
return rv;
}
inline void CheckCudaAndThrow(cudaError_t t, const string & message)
{
if (t != cudaSuccess)
throw message;
}
int main(int argc, char * argv[])
{
cout.imbue(locale(""));
ifstream f("C:/Users/educ/Documents/enwiki-latest-abstract.xml");
HighPrecisionTime hpt;
char * hst_buffer = nullptr;
unsigned char * hst_bm = nullptr;
unsigned char * chk_bm = nullptr;
#if defined(USE_OMP)
cout << "OMP enabled on " << omp_get_max_threads() << " threads." << endl;
#endif
try
{
if (argc < 2)
throw string("First argument must be target string.");
char * pattern = CStringToLower(argv[1]);
int pattern_size = strlen(pattern);
if (!f.is_open())
throw string("File failed to open");
hst_buffer = new char[GIGA];
hst_bm = new unsigned char[BMSIZE]();
chk_bm = new unsigned char[BMSIZE];
hpt.TimeSinceLastCall();
f.read(hst_buffer, GIGA);
if (!f)
throw string("Failed to read full buffer.");
double read_time = hpt.TimeSinceLastCall();
cout << GIGA << " bytes read from disk in " << read_time << " seconds at " << GIGA / read_time / double(1 << 30) << " GB / second." << endl;
CheckCudaAndThrow(cudaSetDevice(0), string("cudaSetDevice(0) failed on line ") + to_string(__LINE__));
CheckCudaAndThrow(cudaMalloc(&dev_buffer, GIGA), string("cudaMalloc failed on line ") + to_string(__LINE__));
CheckCudaAndThrow(cudaMalloc(&dev_bitmap, BMSIZE), string("cudaMalloc failed on line ") + to_string(__LINE__));
CheckCudaAndThrow(cudaMemset(dev_bitmap, 0, BMSIZE), string("cudaMemset failed on line ") + to_string(__LINE__));
CheckCudaAndThrow(cudaMemcpyToSymbol(dev_pattern, pattern, pattern_size, 0), string("cudaMemcpyToSymbol failed on line ") + to_string(__LINE__));
CheckCudaAndThrow(cudaMemcpyToSymbol(dev_pattern_size, &pattern_size, sizeof(int), 0), string("cudaMemcpyToSymbol failed on line ") + to_string(__LINE__));
hpt.TimeSinceLastCall();
CheckCudaAndThrow(cudaMemcpy(dev_buffer, hst_buffer, GIGA, cudaMemcpyHostToDevice), string("cudaMemcpy failed on line ") + to_string(__LINE__));
double copy_time = hpt.TimeSinceLastCall();
cout << GIGA << " data bytes copied to GPU in " << copy_time << " seconds at " << GIGA / copy_time / double(1 << 30) << " GB / second." << endl;
hpt.TimeSinceLastCall();
int matches_found = SearchCPU_V1(hst_buffer, GIGA, pattern, pattern_size, hst_bm, BMSIZE);
double time_cpu = hpt.TimeSinceLastCall();
cout << "SearchCPU_V1 found " << matches_found << " matches in " << time_cpu << " seconds.";
cout << " Searched " << GIGA / time_cpu / double(1 << 30) << " GB / second." << endl;
int threads_per_block = 1024;
dim3 grid(1024, 1024);
hpt.TimeSinceLastCall();
SearchGPU_V1 << <grid, threads_per_block >> >(dev_buffer, GIGA, dev_bitmap, BMSIZE);
CheckCudaAndThrow(cudaGetLastError(), string("kernel launch failed on line ") + to_string(__LINE__));
CheckCudaAndThrow(cudaDeviceSynchronize(), string("cudaDeviceSynchronize() failed on line ") + to_string(__LINE__));
double time_gpu = hpt.TimeSinceLastCall();
CheckCudaAndThrow(cudaMemcpy(chk_bm, dev_bitmap, BMSIZE, cudaMemcpyDeviceToHost), string("cudaMemcpy() failed on line ") + to_string(__LINE__));
unsigned int * bm_alias = (unsigned int *)chk_bm;
int match_count = 0;
for (int i = 0; i < BMSIZE / sizeof(int); i++)
{
unsigned int c = 0;
unsigned int v = *(bm_alias + i);
for (c = 0; v; c++)
{
v &= v - 1;
}
match_count += c;
}
cout << "SearchGPU_V1 found " << match_count << " matches in " << time_gpu << " seconds.";
cout << " Searched " << GIGA / time_gpu / double(1 << 30) << " GB / second." << endl;
cout << endl;
cout << "Ratio: " << time_cpu / time_gpu << " to 1" << endl;
}
catch (string s)
{
cout << s << endl;
}
if (dev_buffer != nullptr)
cudaFree(dev_buffer);
if (dev_bitmap != nullptr)
cudaFree(dev_bitmap);
if (hst_buffer != nullptr)
delete[] hst_buffer;
if (hst_bm != nullptr)
delete[] hst_bm;
if (f.is_open())
f.close();
cudaDeviceReset();
#if defined(WIN64) || defined(WIN32)
cout << endl;
system("pause");
#endif
return 0;
} |
0bb5d37ebfe263b29cdc26613b6ed6bc76042ed4.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
//#include <malloc.h>
#include <stdlib.h>
#include <math.h>
#include <hip/hip_runtime.h>
//#include "hip/hip_runtime.h"
//#include "device_launch_parameters.h"
#define N (1024*1024)
#define FULL_DATA_SIZE (N*20)
__global__ void kernel(int* a, int* b, int* c) {
int idx = threadIdx.x + blockIdx.x * blockDim.x;
if (idx < N) {
int idx1 = (idx + 1) % 256;
int idx2 = (idx + 2) % 256;
float as = (a[idx] + a[idx1] + a[idx2]) / 3.0f;
float bs = (b[idx] + b[idx1] + b[idx2]) / 3.0f;
c[idx] = (as + bs) / 2;
}
}
__global__ void add_gpu(int* a, int *b, int* c) {
int tid = threadIdx.x + blockIdx.x * blockDim.x;
if (tid <= N/256)
c[tid] = a[tid] + b[tid];
}
int main()
{
hipDeviceProp_t prop;
int whichDevice;
// overlapping computation with memory copy
// overlap,
hipGetDevice(&whichDevice);
hipGetDeviceProperties(&prop, whichDevice);
if (!prop.deviceOverlap) {
printf("device will not handle\n");
return 0;
}
hipEvent_t start, stop;
float elepsedTime;
//
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start, 0);
//
hipStream_t stream;
hipStreamCreate(&stream);
int* host_a, * host_b, * host_c;
int* dev_a, * dev_b, * dev_c;
hipMalloc((void**)&dev_a, N * sizeof(int));
hipMalloc((void**)&dev_b, N * sizeof(int));
hipMalloc((void**)&dev_c, N * sizeof(int));
// (paged-locked) :
hipHostMalloc((void**)&host_a, FULL_DATA_SIZE * sizeof(int), hipHostMallocDefault);
hipHostMalloc((void**)&host_b, FULL_DATA_SIZE * sizeof(int), hipHostMallocDefault);
hipHostMalloc((void**)&host_c, FULL_DATA_SIZE * sizeof(int), hipHostMallocDefault);
for (int i = 0; i < FULL_DATA_SIZE; i++) {
host_a[i] = rand();
host_b[i] = rand();
}
for (int i = 0; i < FULL_DATA_SIZE; i += N) {
hipMemcpyAsync(dev_a, host_a + i, N * sizeof(int), hipMemcpyHostToDevice, stream);
hipMemcpyAsync(dev_b, host_b + i, N * sizeof(int), hipMemcpyHostToDevice, stream);
kernel << < N / 256, 256, 0, stream >> > (dev_a, dev_b, dev_c);
hipMemcpyAsync(host_c + i, dev_c, N * sizeof(int), hipMemcpyDeviceToHost, stream);
}
hipStreamSynchronize(stream);
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
hipEventElapsedTime(&elepsedTime, start, stop);
printf("Time taken: %3.1f ms\n", elepsedTime);
hipHostFree(host_a);
hipHostFree(host_b);
hipHostFree(host_c);
hipFree(dev_a);
hipFree(dev_b);
hipFree(dev_c);
hipStreamDestroy(stream);
//
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start, 0);
//
hipStream_t stream0, stream1;
hipStreamCreate(&stream0);
hipStreamCreate(&stream1);
int* dev_a0, * dev_b0, * dev_c0;// gpu input buffer for stram0,
int* dev_a1, * dev_b1, * dev_c1;// gpu input buffer for stram1,
// gpu
hipMalloc((void**)&dev_a0, N * sizeof(int));
hipMalloc((void**)&dev_b0, N * sizeof(int));
hipMalloc((void**)&dev_c0, N * sizeof(int));
// gpu
hipMalloc((void**)&dev_a1, N * sizeof(int));
hipMalloc((void**)&dev_b1, N * sizeof(int));
hipMalloc((void**)&dev_c1, N * sizeof(int));
// page-locked ,
hipHostMalloc((void**)&host_a, FULL_DATA_SIZE * sizeof(int), hipHostMallocDefault);
hipHostMalloc((void**)&host_b, FULL_DATA_SIZE * sizeof(int), hipHostMallocDefault);
hipHostMalloc((void**)&host_c, FULL_DATA_SIZE * sizeof(int), hipHostMallocDefault);
for (int i = 0; i < FULL_DATA_SIZE; i++) {
host_a[i] = i;//rand();
host_b[i] = i;// rand();
}
// ,
for (int i = 0; i < FULL_DATA_SIZE; i += N * 2) {
//
hipMemcpyAsync(dev_a0, host_a + i, N * sizeof(int), hipMemcpyHostToDevice, stream0);
hipMemcpyAsync(dev_b0, host_b + i, N * sizeof(int), hipMemcpyHostToDevice, stream0);
add_gpu << <N / 256, 256, 0, stream0 >> > (dev_a0, dev_b0, dev_c0);
//
hipMemcpyAsync(host_c + i, dev_c0, N * sizeof(int), hipMemcpyDeviceToHost, stream0);
//
hipMemcpyAsync(dev_a1, host_a + i + N, N * sizeof(int), hipMemcpyHostToDevice, stream1);
hipMemcpyAsync(dev_b1, host_b + i + N, N * sizeof(int), hipMemcpyHostToDevice, stream1);
add_gpu << <N / 256, 256, 0, stream1 >> > (dev_a1, dev_b1, dev_c1);
//
hipMemcpyAsync(host_c + i + N, dev_c1, N * sizeof(int), hipMemcpyDeviceToHost, stream1);
}
//
hipStreamSynchronize(stream0);
hipStreamSynchronize(stream1);
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
hipEventElapsedTime(&elepsedTime, start, stop);
printf("Time taken: %3.1f ms\n", elepsedTime);
/*for (int i = 0; i < N / 256; i++) {
printf("%d+%d=%d\n", host_a[i], host_b[i], host_c[i]);
}
*/
hipHostFree(host_a);
hipHostFree(host_b);
hipHostFree(host_c);
hipFree(dev_a0);
hipFree(dev_b0);
hipFree(dev_c0);
hipStreamDestroy(stream0);
hipFree(dev_a1);
hipFree(dev_b1);
hipFree(dev_c1);
hipStreamDestroy(stream1);
return 0;
}
| 0bb5d37ebfe263b29cdc26613b6ed6bc76042ed4.cu | #include <stdio.h>
//#include <malloc.h>
#include <stdlib.h>
#include <math.h>
#include <cuda.h>
//#include "cuda_runtime.h"
//#include "device_launch_parameters.h"
#define N (1024*1024)
#define FULL_DATA_SIZE (N*20)
__global__ void kernel(int* a, int* b, int* c) {
int idx = threadIdx.x + blockIdx.x * blockDim.x;
if (idx < N) {
int idx1 = (idx + 1) % 256;
int idx2 = (idx + 2) % 256;
float as = (a[idx] + a[idx1] + a[idx2]) / 3.0f;
float bs = (b[idx] + b[idx1] + b[idx2]) / 3.0f;
c[idx] = (as + bs) / 2;
}
}
__global__ void add_gpu(int* a, int *b, int* c) {
int tid = threadIdx.x + blockIdx.x * blockDim.x;
if (tid <= N/256)
c[tid] = a[tid] + b[tid];
}
int main()
{
cudaDeviceProp prop;
int whichDevice;
// проверяем поддерживает ли устройство overlapping computation with memory copy
// если пооддерживает overlap, что всё хорошо
cudaGetDevice(&whichDevice);
cudaGetDeviceProperties(&prop, whichDevice);
if (!prop.deviceOverlap) {
printf("device will not handle\n");
return 0;
}
cudaEvent_t start, stop;
float elepsedTime;
// создаём ивенты
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start, 0);
// создаём потоки
cudaStream_t stream;
cudaStreamCreate(&stream);
int* host_a, * host_b, * host_c;
int* dev_a, * dev_b, * dev_c;
cudaMalloc((void**)&dev_a, N * sizeof(int));
cudaMalloc((void**)&dev_b, N * sizeof(int));
cudaMalloc((void**)&dev_c, N * sizeof(int));
//выделение закрепленной (paged-locked) памяти:
cudaHostAlloc((void**)&host_a, FULL_DATA_SIZE * sizeof(int), cudaHostAllocDefault);
cudaHostAlloc((void**)&host_b, FULL_DATA_SIZE * sizeof(int), cudaHostAllocDefault);
cudaHostAlloc((void**)&host_c, FULL_DATA_SIZE * sizeof(int), cudaHostAllocDefault);
for (int i = 0; i < FULL_DATA_SIZE; i++) {
host_a[i] = rand();
host_b[i] = rand();
}
for (int i = 0; i < FULL_DATA_SIZE; i += N) {
cudaMemcpyAsync(dev_a, host_a + i, N * sizeof(int), cudaMemcpyHostToDevice, stream);
cudaMemcpyAsync(dev_b, host_b + i, N * sizeof(int), cudaMemcpyHostToDevice, stream);
kernel << < N / 256, 256, 0, stream >> > (dev_a, dev_b, dev_c);
cudaMemcpyAsync(host_c + i, dev_c, N * sizeof(int), cudaMemcpyDeviceToHost, stream);
}
cudaStreamSynchronize(stream);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elepsedTime, start, stop);
printf("Time taken: %3.1f ms\n", elepsedTime);
cudaFreeHost(host_a);
cudaFreeHost(host_b);
cudaFreeHost(host_c);
cudaFree(dev_a);
cudaFree(dev_b);
cudaFree(dev_c);
cudaStreamDestroy(stream);
// создание ивентов
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start, 0);
// создание потоков
cudaStream_t stream0, stream1;
cudaStreamCreate(&stream0);
cudaStreamCreate(&stream1);
int* dev_a0, * dev_b0, * dev_c0;// первый gpu input buffer for stram0, который будет заполнен рандомными числами
int* dev_a1, * dev_b1, * dev_c1;// второй gpu input buffer for stram1, который будет заполнен рандомными числами
//выделение памяти на gpu
cudaMalloc((void**)&dev_a0, N * sizeof(int));
cudaMalloc((void**)&dev_b0, N * sizeof(int));
cudaMalloc((void**)&dev_c0, N * sizeof(int));
//выделение памяти на gpu
cudaMalloc((void**)&dev_a1, N * sizeof(int));
cudaMalloc((void**)&dev_b1, N * sizeof(int));
cudaMalloc((void**)&dev_c1, N * sizeof(int));
// выделение page-locked памяти, испльзуемой для стримов
cudaHostAlloc((void**)&host_a, FULL_DATA_SIZE * sizeof(int), cudaHostAllocDefault);
cudaHostAlloc((void**)&host_b, FULL_DATA_SIZE * sizeof(int), cudaHostAllocDefault);
cudaHostAlloc((void**)&host_c, FULL_DATA_SIZE * sizeof(int), cudaHostAllocDefault);
for (int i = 0; i < FULL_DATA_SIZE; i++) {
host_a[i] = i;//rand();
host_b[i] = i;// rand();
}
// теперь проитерировать всю дату, через байтные куски
for (int i = 0; i < FULL_DATA_SIZE; i += N * 2) {
// асинхронно копировать закрытиую память на устройство
cudaMemcpyAsync(dev_a0, host_a + i, N * sizeof(int), cudaMemcpyHostToDevice, stream0);
cudaMemcpyAsync(dev_b0, host_b + i, N * sizeof(int), cudaMemcpyHostToDevice, stream0);
add_gpu << <N / 256, 256, 0, stream0 >> > (dev_a0, dev_b0, dev_c0);
// копировать дату с устройства на закрытую память
cudaMemcpyAsync(host_c + i, dev_c0, N * sizeof(int), cudaMemcpyDeviceToHost, stream0);
// асинхронно копировать закрытиую память на устройство
cudaMemcpyAsync(dev_a1, host_a + i + N, N * sizeof(int), cudaMemcpyHostToDevice, stream1);
cudaMemcpyAsync(dev_b1, host_b + i + N, N * sizeof(int), cudaMemcpyHostToDevice, stream1);
add_gpu << <N / 256, 256, 0, stream1 >> > (dev_a1, dev_b1, dev_c1);
// копировать дату с устройства на закрытую память
cudaMemcpyAsync(host_c + i + N, dev_c1, N * sizeof(int), cudaMemcpyDeviceToHost, stream1);
}
// синхронизируем оба потока
cudaStreamSynchronize(stream0);
cudaStreamSynchronize(stream1);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elepsedTime, start, stop);
printf("Time taken: %3.1f ms\n", elepsedTime);
/*for (int i = 0; i < N / 256; i++) {
printf("%d+%d=%d\n", host_a[i], host_b[i], host_c[i]);
}
*/
cudaFreeHost(host_a);
cudaFreeHost(host_b);
cudaFreeHost(host_c);
cudaFree(dev_a0);
cudaFree(dev_b0);
cudaFree(dev_c0);
cudaStreamDestroy(stream0);
cudaFree(dev_a1);
cudaFree(dev_b1);
cudaFree(dev_c1);
cudaStreamDestroy(stream1);
return 0;
}
|
f3ddeccbee997ae56c292879e6a0bcf536aa3f0a.hip | // !!! This is a file automatically generated by hipify!!!
#include "../THCTensorMasked.cuh"
#include "THHTensor.hpp"
#include "THHStream.hpp"
#include "../generic/THCTensorMasked.cu"
#include "../THCGenerateLongType.h"
| f3ddeccbee997ae56c292879e6a0bcf536aa3f0a.cu | #include "../THCTensorMasked.cuh"
#include "THCTensor.hpp"
#include "THCStream.hpp"
#include "../generic/THCTensorMasked.cu"
#include "../THCGenerateLongType.h"
|
2a0acc5ebeedbe76f9bda00513390e024440391e.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2019, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cudf/filling.hpp>
#include <cudf/types.hpp>
#include <cudf/column/column.hpp>
#include <cudf/column/column_view.hpp>
#include <cudf/column/column_factories.hpp>
#include <cudf/detail/copy_range.cuh>
#include <cudf/detail/fill.hpp>
#include <cudf/scalar/scalar.hpp>
#include <cudf/utilities/error.hpp>
#include <cudf/utilities/traits.hpp>
#include <rmm/mr/device_memory_resource.hpp>
#include <hip/hip_runtime.h>
#include <memory>
namespace {
struct inplace_fill_range_dispatch {
cudf::scalar const* p_value = nullptr;
cudf::mutable_column_view& target;
template <typename T>
std::enable_if_t<cudf::is_fixed_width<T>(), void>
operator()(cudf::size_type source_begin, cudf::size_type source_end,
cudf::size_type target_begin, hipStream_t stream = 0) {
using ScalarType = cudf::experimental::scalar_type_t<T>;
#if 1
// TODO: temporary till the const issue in cudf::scalar's value() is fixed.
auto p_scalar =
const_cast<ScalarType*>(static_cast<ScalarType const*>(this->p_value));
#else
auto p_scalar = static_cast<ScalarType const*>(this->p_value);
#endif
T value = p_scalar->value(stream);
bool is_valid = p_scalar->is_valid();
cudf::experimental::detail::copy_range(
thrust::make_constant_iterator(value),
thrust::make_constant_iterator(is_valid),
target, target_begin, target_begin + (source_end - source_begin),
stream);
}
template <typename T>
std::enable_if_t<not cudf::is_fixed_width<T>(), void>
operator()(cudf::size_type source_begin, cudf::size_type source_end,
cudf::size_type target_begin, hipStream_t stream = 0) {
CUDF_FAIL("in-place fill does not work for variable width types.");
}
};
} // namespace
namespace cudf {
namespace experimental {
namespace detail {
void fill(mutable_column_view& destination,
size_type begin,
size_type end,
scalar const& value,
hipStream_t stream) {
CUDF_EXPECTS(cudf::is_fixed_width(destination.type()) == true,
"In-place fill does not support variable-sized types.");
CUDF_EXPECTS((begin >= 0) &&
(begin <= end) &&
(begin < destination.size()) &&
(end <= destination.size()),
"Range is out of bounds.");
CUDF_EXPECTS((destination.nullable() == true) || (value.is_valid() == true),
"destination should be nullable or value should be non-null.");
CUDF_EXPECTS(destination.type() == value.type(), "Data type mismatch.");
if (end != begin) { // otherwise no-op
cudf::experimental::type_dispatcher(
destination.type(),
inplace_fill_range_dispatch{&value, destination},
0, end - begin, begin, stream);
}
return;
}
std::unique_ptr<column> fill(column_view const& input,
size_type begin,
size_type end,
scalar const& value,
rmm::mr::device_memory_resource* mr,
hipStream_t stream) {
CUDF_EXPECTS(cudf::is_fixed_width(input.type()) == true,
"Variable-sized types are not supported yet.");
CUDF_EXPECTS((begin >= 0) &&
(begin <= end) &&
(begin < input.size()) &&
(end <= input.size()),
"Range is out of bounds.");
CUDF_EXPECTS(input.type() == value.type(), "Data type mismatch.");
auto p_ret = std::make_unique<column>(input, stream, mr);
if (!p_ret->nullable() && !value.is_valid()) {
p_ret->set_null_mask(
create_null_mask(p_ret->size(), ALL_VALID, stream, mr), 0);
}
if (end != begin) { // otherwise no fill
auto destination = p_ret->mutable_view();
fill(destination, begin, end, value, stream);
}
return p_ret;
}
} // namespace detail
void fill(mutable_column_view& destination,
size_type begin,
size_type end,
scalar const& value) {
return detail::fill(destination, begin, end, value, 0);
}
std::unique_ptr<column> fill(column_view const& input,
size_type begin,
size_type end,
scalar const& value,
rmm::mr::device_memory_resource* mr) {
return detail::fill(input, begin, end, value, mr, 0);
}
} // namespace experimental
} // namespace cudf
| 2a0acc5ebeedbe76f9bda00513390e024440391e.cu | /*
* Copyright (c) 2019, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cudf/filling.hpp>
#include <cudf/types.hpp>
#include <cudf/column/column.hpp>
#include <cudf/column/column_view.hpp>
#include <cudf/column/column_factories.hpp>
#include <cudf/detail/copy_range.cuh>
#include <cudf/detail/fill.hpp>
#include <cudf/scalar/scalar.hpp>
#include <cudf/utilities/error.hpp>
#include <cudf/utilities/traits.hpp>
#include <rmm/mr/device_memory_resource.hpp>
#include <cuda_runtime.h>
#include <memory>
namespace {
struct inplace_fill_range_dispatch {
cudf::scalar const* p_value = nullptr;
cudf::mutable_column_view& target;
template <typename T>
std::enable_if_t<cudf::is_fixed_width<T>(), void>
operator()(cudf::size_type source_begin, cudf::size_type source_end,
cudf::size_type target_begin, cudaStream_t stream = 0) {
using ScalarType = cudf::experimental::scalar_type_t<T>;
#if 1
// TODO: temporary till the const issue in cudf::scalar's value() is fixed.
auto p_scalar =
const_cast<ScalarType*>(static_cast<ScalarType const*>(this->p_value));
#else
auto p_scalar = static_cast<ScalarType const*>(this->p_value);
#endif
T value = p_scalar->value(stream);
bool is_valid = p_scalar->is_valid();
cudf::experimental::detail::copy_range(
thrust::make_constant_iterator(value),
thrust::make_constant_iterator(is_valid),
target, target_begin, target_begin + (source_end - source_begin),
stream);
}
template <typename T>
std::enable_if_t<not cudf::is_fixed_width<T>(), void>
operator()(cudf::size_type source_begin, cudf::size_type source_end,
cudf::size_type target_begin, cudaStream_t stream = 0) {
CUDF_FAIL("in-place fill does not work for variable width types.");
}
};
} // namespace
namespace cudf {
namespace experimental {
namespace detail {
void fill(mutable_column_view& destination,
size_type begin,
size_type end,
scalar const& value,
cudaStream_t stream) {
CUDF_EXPECTS(cudf::is_fixed_width(destination.type()) == true,
"In-place fill does not support variable-sized types.");
CUDF_EXPECTS((begin >= 0) &&
(begin <= end) &&
(begin < destination.size()) &&
(end <= destination.size()),
"Range is out of bounds.");
CUDF_EXPECTS((destination.nullable() == true) || (value.is_valid() == true),
"destination should be nullable or value should be non-null.");
CUDF_EXPECTS(destination.type() == value.type(), "Data type mismatch.");
if (end != begin) { // otherwise no-op
cudf::experimental::type_dispatcher(
destination.type(),
inplace_fill_range_dispatch{&value, destination},
0, end - begin, begin, stream);
}
return;
}
std::unique_ptr<column> fill(column_view const& input,
size_type begin,
size_type end,
scalar const& value,
rmm::mr::device_memory_resource* mr,
cudaStream_t stream) {
CUDF_EXPECTS(cudf::is_fixed_width(input.type()) == true,
"Variable-sized types are not supported yet.");
CUDF_EXPECTS((begin >= 0) &&
(begin <= end) &&
(begin < input.size()) &&
(end <= input.size()),
"Range is out of bounds.");
CUDF_EXPECTS(input.type() == value.type(), "Data type mismatch.");
auto p_ret = std::make_unique<column>(input, stream, mr);
if (!p_ret->nullable() && !value.is_valid()) {
p_ret->set_null_mask(
create_null_mask(p_ret->size(), ALL_VALID, stream, mr), 0);
}
if (end != begin) { // otherwise no fill
auto destination = p_ret->mutable_view();
fill(destination, begin, end, value, stream);
}
return p_ret;
}
} // namespace detail
void fill(mutable_column_view& destination,
size_type begin,
size_type end,
scalar const& value) {
return detail::fill(destination, begin, end, value, 0);
}
std::unique_ptr<column> fill(column_view const& input,
size_type begin,
size_type end,
scalar const& value,
rmm::mr::device_memory_resource* mr) {
return detail::fill(input, begin, end, value, mr, 0);
}
} // namespace experimental
} // namespace cudf
|
52d55199f787d8ce3657fa7973a3f6078306b7fd.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "timers.cuh"
#include <cstdio>
// RESEARCH QUESTION: how accurate is nanosleep
// [0...2n] is what we read
//
// QUESTIONS:
// what PTX does clock64() use (%globaltimer?)
// __nanosleep
// The sleep duration is approximated, but guaranteed to be in the
// interval [0, 2*t]. The implementation may reduce the sleep duration for
// individual threads within a warp such that all sleeping threads in the
// warp wake up together.
// %globaltimer (A predefined, 64-bit global nanosecond timer.)
// https://docs.nvidia.com/cuda/parallel-thread-execution/index.html#special-registers-globaltimer
//
//
// https://docs.nvidia.com/cuda/parallel-thread-execution/index.html#special-registers-warpid
// %warpid %nwarpid
// https://docs.nvidia.com/cuda/parallel-thread-execution/index.html#special-registers-smid
// %smid and %nsmid (may not be contiguous)
extern "C" static __device__ uint32_t get_dbi_bits()
{
// uint32_t w_id;
// uint32_t nw_id;
// asm("mov.u32 %0, %warpid;" : "=r"(w_id));
// asm("mov.u32 %0, %nwarpid;" : "=r"(nw_id));
// printf(" #%03d ==> @%04d (%d)\n", blockIdx.x, sm_id, nsm_id);
return ((uint32_t)blockIdx.x << 16) | (uint16_t)get_smid();
}
extern "C" __global__ void glob_get_nsmid(int *nsmid)
{
if (blockIdx.x == 0)
*nsmid = get_nsmid();
}
extern "C" __global__ void glob_block_dist(
int64_t *times, uint32_t *ids, long delay)
{
auto now = clock64();
if (delay > 0)
__nanosleep(delay);
ids[get_tid()] = get_dbi_bits();
times[get_tid()] = now;
}
// __device__ warp_event::warp_event(uint64_t st)
extern "C" __global__ void glob_block_dist_atomic(
warp_event *evt_stream, uint32_t *evt_stream_idx, long delay)
{
auto st = clock64();
if (delay > 0)
__nanosleep(delay);
write_warp_event(st, evt_stream, evt_stream_idx);
}
extern "C" __global__ void glob_get_times(tstamps *tss)
{
tss[get_tid()] = tstamps();
}
extern "C" __global__ void glob_globaltimer_cost(uint32_t *cost_samples)
{
uint32_t samples[GLOBAL_TIMER_COST_LOOP_TRIPS];
uint64_t sum = 0;
for (int i = 0; i < GLOBAL_TIMER_COST_LOOP_TRIPS; i++) {
const auto st = clock64();
sum += get_globaltimer();
const auto en = clock64();
samples[i] = (uint32_t)(en - st);
}
for (int i = 0; i < GLOBAL_TIMER_COST_LOOP_TRIPS; i++) {
cost_samples[GLOBAL_TIMER_COST_LOOP_TRIPS * get_tid() + i] = samples[i];
}
if (sum > 0x7FFFFFFFFFFFFFFFull) {
cost_samples[0] = 0;
}
}
// fetch N delta samples really fast and return a sample set
extern "C" __global__ void glob_globaltimer_resolution(
uint64_t *ticks, int timer_source)
{
auto getSample = [&]() -> uint64_t {
if (timer_source == 0) {
return get_globaltimer();
} else {
return clock64();
}
};
uint64_t samples[GLOBALTIMER_RES_SAMPLES];
auto prev = getSample();
for (int i = 0; i < GLOBALTIMER_RES_SAMPLES; i++) {
auto t = getSample();
samples[i] = t - prev;
prev = t;
}
for (int i = 0; i < GLOBALTIMER_RES_SAMPLES; i++) {
ticks[GLOBALTIMER_RES_SAMPLES * get_tid() + i] = samples[i];
}
}
extern "C" __global__ void glob_globaltimer_resolution2(
uint32_t *ticks_g, uint32_t *ticks_l)
{
uint32_t samples_g[GLOBALTIMER_RES_SAMPLES], samples_l[GLOBALTIMER_RES_SAMPLES];
uint64_t prev_g = get_globaltimer();
uint64_t prev_l = clock64();
for (int i = 0; i < GLOBALTIMER_RES_SAMPLES; i++) {
uint64_t curr_g = get_globaltimer();
samples_g[i] = (uint32_t)(curr_g - prev_g);
prev_g = curr_g;
//
uint64_t curr_l = clock64();
samples_l[i] = (uint32_t)(curr_l - prev_l);
prev_l = curr_l;
}
for (int i = 0; i < GLOBALTIMER_RES_SAMPLES; i++) {
ticks_g[GLOBALTIMER_RES_SAMPLES * get_tid() + i] = samples_g[i];
ticks_l[GLOBALTIMER_RES_SAMPLES * get_tid() + i] = samples_l[i];
}
}
/*
extern "C" static __device__ __noinline__ int function_nodelay(long delay)
{
int id = get_tid();
return id;
}
extern "C" static __device__ __noinline__ int function_delay(long delay)
{
int id = get_tid();
__nanosleep(delay);
return id;
}
extern "C" __global__ void glob_init(int64_t *times, uint32_t *ids, long delay)
{
auto st = clock64();
auto id = function_nodelay(delay);
auto en = clock64();
times[id] = (int)(en - st);
ids[id] = get_phys_id();
}
extern "C" __global__ void glob_nanosleep(int64_t *times, uint32_t *ids, long delay)
{
auto st = clock64();
auto id = function_delay(delay);
auto en = clock64();
times[id] = (int)(en - st);
ids[id] = get_phys_id();
}
extern "C" __global__ void glob_clock_value(int64_t *times, uint32_t *ids)
{
auto clock_value = clock64();
auto id = get_tid();
times[id] = clock_value;
ids[id] = get_phys_id();
}
*/ | 52d55199f787d8ce3657fa7973a3f6078306b7fd.cu | #include "timers.cuh"
#include <cstdio>
// RESEARCH QUESTION: how accurate is nanosleep
// [0...2n] is what we read
//
// QUESTIONS:
// what PTX does clock64() use (%globaltimer?)
// __nanosleep
// The sleep duration is approximated, but guaranteed to be in the
// interval [0, 2*t]. The implementation may reduce the sleep duration for
// individual threads within a warp such that all sleeping threads in the
// warp wake up together.
// %globaltimer (A predefined, 64-bit global nanosecond timer.)
// https://docs.nvidia.com/cuda/parallel-thread-execution/index.html#special-registers-globaltimer
//
//
// https://docs.nvidia.com/cuda/parallel-thread-execution/index.html#special-registers-warpid
// %warpid %nwarpid
// https://docs.nvidia.com/cuda/parallel-thread-execution/index.html#special-registers-smid
// %smid and %nsmid (may not be contiguous)
extern "C" static __device__ uint32_t get_dbi_bits()
{
// uint32_t w_id;
// uint32_t nw_id;
// asm("mov.u32 %0, %warpid;" : "=r"(w_id));
// asm("mov.u32 %0, %nwarpid;" : "=r"(nw_id));
// printf(" #%03d ==> @%04d (%d)\n", blockIdx.x, sm_id, nsm_id);
return ((uint32_t)blockIdx.x << 16) | (uint16_t)get_smid();
}
extern "C" __global__ void glob_get_nsmid(int *nsmid)
{
if (blockIdx.x == 0)
*nsmid = get_nsmid();
}
extern "C" __global__ void glob_block_dist(
int64_t *times, uint32_t *ids, long delay)
{
auto now = clock64();
if (delay > 0)
__nanosleep(delay);
ids[get_tid()] = get_dbi_bits();
times[get_tid()] = now;
}
// __device__ warp_event::warp_event(uint64_t st)
extern "C" __global__ void glob_block_dist_atomic(
warp_event *evt_stream, uint32_t *evt_stream_idx, long delay)
{
auto st = clock64();
if (delay > 0)
__nanosleep(delay);
write_warp_event(st, evt_stream, evt_stream_idx);
}
extern "C" __global__ void glob_get_times(tstamps *tss)
{
tss[get_tid()] = tstamps();
}
extern "C" __global__ void glob_globaltimer_cost(uint32_t *cost_samples)
{
uint32_t samples[GLOBAL_TIMER_COST_LOOP_TRIPS];
uint64_t sum = 0;
for (int i = 0; i < GLOBAL_TIMER_COST_LOOP_TRIPS; i++) {
const auto st = clock64();
sum += get_globaltimer();
const auto en = clock64();
samples[i] = (uint32_t)(en - st);
}
for (int i = 0; i < GLOBAL_TIMER_COST_LOOP_TRIPS; i++) {
cost_samples[GLOBAL_TIMER_COST_LOOP_TRIPS * get_tid() + i] = samples[i];
}
if (sum > 0x7FFFFFFFFFFFFFFFull) {
cost_samples[0] = 0;
}
}
// fetch N delta samples really fast and return a sample set
extern "C" __global__ void glob_globaltimer_resolution(
uint64_t *ticks, int timer_source)
{
auto getSample = [&]() -> uint64_t {
if (timer_source == 0) {
return get_globaltimer();
} else {
return clock64();
}
};
uint64_t samples[GLOBALTIMER_RES_SAMPLES];
auto prev = getSample();
for (int i = 0; i < GLOBALTIMER_RES_SAMPLES; i++) {
auto t = getSample();
samples[i] = t - prev;
prev = t;
}
for (int i = 0; i < GLOBALTIMER_RES_SAMPLES; i++) {
ticks[GLOBALTIMER_RES_SAMPLES * get_tid() + i] = samples[i];
}
}
extern "C" __global__ void glob_globaltimer_resolution2(
uint32_t *ticks_g, uint32_t *ticks_l)
{
uint32_t samples_g[GLOBALTIMER_RES_SAMPLES], samples_l[GLOBALTIMER_RES_SAMPLES];
uint64_t prev_g = get_globaltimer();
uint64_t prev_l = clock64();
for (int i = 0; i < GLOBALTIMER_RES_SAMPLES; i++) {
uint64_t curr_g = get_globaltimer();
samples_g[i] = (uint32_t)(curr_g - prev_g);
prev_g = curr_g;
//
uint64_t curr_l = clock64();
samples_l[i] = (uint32_t)(curr_l - prev_l);
prev_l = curr_l;
}
for (int i = 0; i < GLOBALTIMER_RES_SAMPLES; i++) {
ticks_g[GLOBALTIMER_RES_SAMPLES * get_tid() + i] = samples_g[i];
ticks_l[GLOBALTIMER_RES_SAMPLES * get_tid() + i] = samples_l[i];
}
}
/*
extern "C" static __device__ __noinline__ int function_nodelay(long delay)
{
int id = get_tid();
return id;
}
extern "C" static __device__ __noinline__ int function_delay(long delay)
{
int id = get_tid();
__nanosleep(delay);
return id;
}
extern "C" __global__ void glob_init(int64_t *times, uint32_t *ids, long delay)
{
auto st = clock64();
auto id = function_nodelay(delay);
auto en = clock64();
times[id] = (int)(en - st);
ids[id] = get_phys_id();
}
extern "C" __global__ void glob_nanosleep(int64_t *times, uint32_t *ids, long delay)
{
auto st = clock64();
auto id = function_delay(delay);
auto en = clock64();
times[id] = (int)(en - st);
ids[id] = get_phys_id();
}
extern "C" __global__ void glob_clock_value(int64_t *times, uint32_t *ids)
{
auto clock_value = clock64();
auto id = get_tid();
times[id] = clock_value;
ids[id] = get_phys_id();
}
*/ |
c63511ed509acebaefc26bd68e5421243c92cc68.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <math.h>
#include <stdint.h> //uint32_tint4
#include <stdlib.h> //
#include <hip/hip_runtime.h>
/**/
#define width 1024
#define heigth 1024
/*bmp*/
#pragma pack(push,1)
typedef struct tagBITMAPFILEHEADER{ //BITMAPFILEHEADER14 byte
unsigned short bfType; //bfTypebmp"BM"
uint32_t bfSize; //bfsize
unsigned short bfReserved1; //bfReserved120
unsigned short bfReserved2;
uint32_t bf0ffBits; //bf0ffBits
}BITMAPFILEHEADER;
#pragma pack(pop)
typedef struct tagBITMAPINFOHEADER{ //BITMAPINFOHEADERbmp40 byte
uint32_t biSize; //
uint32_t biWidth; //
uint32_t biHeight; //
unsigned short biPlanes; //1
unsigned short biBitCount; //bit8
uint32_t biCompression; //bmp0
uint32_t biSizeImage; //bmpbiCompression=00
uint32_t biXPelsPerMeter; //biXPelsPerMeterbiYPelsPerMeter0
uint32_t biYPelsPerMeter;
uint32_t biCirUsed; //0
uint32_t biCirImportant; //0
}BITMAPINFOHEADER;
typedef struct tagRGBQUAD{
unsigned char rgbBlue;
unsigned char rgbGreen;
unsigned char rgbRed;
unsigned char rgbReserved;
}RGBQUAD;
/*cgh*/
__global__ void func_cgh_gpu(int *x_d, int *y_d, float *z_d, float *lumi_intensity_d){
int i, j, k;
j=blockDim.x*blockIdx.x+threadIdx.x; //width
i=blockDim.y*blockIdx.y+threadIdx.y; //heigth
//
float interval=10.5F; //
float wave_len=0.633F; //
float wave_num=2.0*M_PI/wave_len; //
float cnst=interval*wave_num; //
for(k=0; k<284; k++){
lumi_intensity_d[i*width+j]=lumi_intensity_d[i*width+j]+cosf(cnst*sqrt((j-x_d[k])*(j-x_d[k])+(i-y_d[k])*(i-y_d[k])+z_d[k]*z_d[k]));
}
}
/*main*/
int main(){
BITMAPFILEHEADER bmpFh;
BITMAPINFOHEADER bmpIh;
RGBQUAD rgbQ[256];
/**/
int i, j;
int points; //
float *lumi_intensity; //
float min, max, mid; //2
unsigned char *img; //bmp
FILE *fp, *fp1;
/*BITMAPFILEHEADER*/
bmpFh.bfType =19778; //'B'=0x42,'M'=0x4d,'BM'=0x4d42=19778
bmpFh.bfSize =14+40+1024+(width*heigth); //10242564 byte
bmpFh.bfReserved1 =0;
bmpFh.bfReserved2 =0;
bmpFh.bf0ffBits =14+40+1024;
/*BITMAPINFOHEADER*/
bmpIh.biSize =40;
bmpIh.biWidth =width;
bmpIh.biHeight =heigth;
bmpIh.biPlanes =1;
bmpIh.biBitCount =8;
bmpIh.biCompression =0;
bmpIh.biSizeImage =0;
bmpIh.biXPelsPerMeter =0;
bmpIh.biYPelsPerMeter =0;
bmpIh.biCirUsed =0;
bmpIh.biCirImportant =0;
/*RGBQUAD*/
for(i=0; i<256; i++){
rgbQ[i].rgbBlue =i;
rgbQ[i].rgbGreen =i;
rgbQ[i].rgbRed =i;
rgbQ[i].rgbReserved =0;
}
/*3D*/
fp=fopen("cube284.3d","rb"); //
fread(&points, sizeof(int), 1, fp); //
//
int x[points]; //~~~~
int y[points];
float z[points];
int x_buf, y_buf, z_buf; //
/**/
for(i=0; i<points; i++){
fread(&x_buf, sizeof(int), 1, fp);
fread(&y_buf, sizeof(int), 1, fp);
fread(&z_buf, sizeof(int), 1, fp);
x[i]=x_buf*40+width*0.5; //40
y[i]=y_buf*40+heigth*0.5;
z[i]=((float)z_buf)*40+100000.0F;
}
fclose(fp);
lumi_intensity=(float *)malloc(sizeof(float)*width*heigth); //malloc
/**/
int *x_d, *y_d;
float *z_d;
float *lumi_intensity_d;
// int *points_d;
dim3 block(32,32,1); //()
dim3 grid(ceil(width/block.x),ceil(heigth/block.y),1); //()
// dim3 grid((width+block.x-1)/block.x,(heigth+block.y-1)/block.y,1);
/**/
hipMalloc((void**)&x_d, points*sizeof(int));
hipMalloc((void**)&y_d, points*sizeof(int));
hipMalloc((void**)&z_d, points*sizeof(float));
hipMalloc((void**)&lumi_intensity_d, width*heigth*sizeof(float));
// hipMalloc((void**)&points_d, sizeof(int));
/**/
hipMemcpy(x_d, x, points*sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(y_d, y, points*sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(z_d, z, points*sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(lumi_intensity_d, lumi_intensity, width*heigth*sizeof(float), hipMemcpyHostToDevice);
// hipMemcpy(points_d, &points, sizeof(int), hipMemcpyHostToDevice);
/**/
hipLaunchKernelGGL(( func_cgh_gpu), dim3(grid), dim3(block) , 0, 0, x_d, y_d, z_d, lumi_intensity_d);
/**/
hipMemcpy(lumi_intensity, lumi_intensity_d, width*heigth*sizeof(float), hipMemcpyDeviceToHost);
/**/
hipFree(x_d);
hipFree(y_d);
hipFree(z_d);
hipFree(lumi_intensity_d);
// hipFree(points_d);
//lumi_intensity[0]
min=lumi_intensity[0];
max=lumi_intensity[0];
/**/
for(i=0; i<heigth; i++){
for(j=0; j<width; j++){
if(min>lumi_intensity[i*width+j]){
min=lumi_intensity[i*width+j];
}
if(max<lumi_intensity[i*width+j]){
max=lumi_intensity[i*width+j];
}
}
}
mid=(min+max)*0.5F; //
printf("min=%lf, max=%lf, mid=%lf\n", min, max, mid);
/*malloc*/
img=(unsigned char *)malloc(sizeof(unsigned char)*width*heigth);
/*2*/
for(i=0; i<width*heigth; i++){
if(lumi_intensity[i]<mid){
img[i]=0;
}
if(lumi_intensity[i]>mid){
img[i]=255;
}
}
/*fp(b)(w)*/
fp1=fopen("root-gpu.bmp","wb");
/**/
fwrite(&bmpFh, sizeof(bmpFh), 1, fp1); //(&bmpFh.bfType, sizeof(bmpFh.bfType), 1, fp);
fwrite(&bmpIh, sizeof(bmpIh), 1, fp1);
fwrite(&rgbQ[0], sizeof(rgbQ[0]), 256, fp1);
fwrite(img, sizeof(unsigned char), width*heigth, fp1); //bmp
printf("'root-gpu.bmp' was saved.\n\n");
/*malloc*/
free(lumi_intensity);
free(img);
fclose(fp1);
return 0;
}
| c63511ed509acebaefc26bd68e5421243c92cc68.cu | #include <stdio.h>
#include <math.h>
#include <stdint.h> //uint32_tは符号なしintで4バイトに指定
#include <stdlib.h> //記憶域管理を使うため
#include <cuda.h>
/*記号定数として横幅と縦幅を定義*/
#define width 1024
#define heigth 1024
/*bmpの構造体*/
#pragma pack(push,1)
typedef struct tagBITMAPFILEHEADER{ //構造体BITMAPFILEHEADERはファイルの先頭に来るもので,サイズは14 byte
unsigned short bfType; //bfTypeは,bmp形式であることを示すため,"BM"が入る
uint32_t bfSize; //bfsizeは,ファイル全体のバイト数
unsigned short bfReserved1; //bfReserved1と2は予約領域で,0になる
unsigned short bfReserved2;
uint32_t bf0ffBits; //bf0ffBitsは先頭から画素データまでのバイト数
}BITMAPFILEHEADER;
#pragma pack(pop)
typedef struct tagBITMAPINFOHEADER{ //BITMAPINFOHEADERはbmpファイルの画像の情報の構造体で,サイズは40 byte
uint32_t biSize; //画像のサイズ
uint32_t biWidth; //横の画素数
uint32_t biHeight; //縦の画素数
unsigned short biPlanes; //1
unsigned short biBitCount; //一画素あたりの色の数のbit数.今回は8
uint32_t biCompression; //圧縮タイプを表す.bmpは非圧縮なので0
uint32_t biSizeImage; //bmp配列のサイズを表す.biCompression=0なら基本的に0
uint32_t biXPelsPerMeter; //biXPelsPerMeterとbiYPelsPerMeterは基本的に0
uint32_t biYPelsPerMeter;
uint32_t biCirUsed; //0
uint32_t biCirImportant; //0
}BITMAPINFOHEADER;
typedef struct tagRGBQUAD{
unsigned char rgbBlue;
unsigned char rgbGreen;
unsigned char rgbRed;
unsigned char rgbReserved;
}RGBQUAD;
/*cghの計算式のカーネル関数*/
__global__ void func_cgh_gpu(int *x_d, int *y_d, float *z_d, float *lumi_intensity_d){
int i, j, k;
j=blockDim.x*blockIdx.x+threadIdx.x; //widthのループの置き換え
i=blockDim.y*blockIdx.y+threadIdx.y; //heigthのループの置き換え
//計算に必要な変数の定義
float interval=10.5F; //画素間隔
float wave_len=0.633F; //光波長
float wave_num=2.0*M_PI/wave_len; //波数
float cnst=interval*wave_num; //ループに依存しないため、先に定数を計算
for(k=0; k<284; k++){
lumi_intensity_d[i*width+j]=lumi_intensity_d[i*width+j]+cosf(cnst*sqrt((j-x_d[k])*(j-x_d[k])+(i-y_d[k])*(i-y_d[k])+z_d[k]*z_d[k]));
}
}
/*main関数*/
int main(){
BITMAPFILEHEADER bmpFh;
BITMAPINFOHEADER bmpIh;
RGBQUAD rgbQ[256];
/*ホスト側の変数*/
int i, j;
int points; //物体点
float *lumi_intensity; //光強度用の配列
float min, max, mid; //2値化に用いる
unsigned char *img; //bmp用の配列
FILE *fp, *fp1;
/*BITMAPFILEHEADERの構造体*/
bmpFh.bfType =19778; //'B'=0x42,'M'=0x4d,'BM'=0x4d42=19778
bmpFh.bfSize =14+40+1024+(width*heigth); //1024はカラーパレットのサイズ.256階調で4 byte一組
bmpFh.bfReserved1 =0;
bmpFh.bfReserved2 =0;
bmpFh.bf0ffBits =14+40+1024;
/*BITMAPINFOHEADERの構造体*/
bmpIh.biSize =40;
bmpIh.biWidth =width;
bmpIh.biHeight =heigth;
bmpIh.biPlanes =1;
bmpIh.biBitCount =8;
bmpIh.biCompression =0;
bmpIh.biSizeImage =0;
bmpIh.biXPelsPerMeter =0;
bmpIh.biYPelsPerMeter =0;
bmpIh.biCirUsed =0;
bmpIh.biCirImportant =0;
/*RGBQUADの構造体*/
for(i=0; i<256; i++){
rgbQ[i].rgbBlue =i;
rgbQ[i].rgbGreen =i;
rgbQ[i].rgbRed =i;
rgbQ[i].rgbReserved =0;
}
/*3Dファイルの読み込み*/
fp=fopen("cube284.3d","rb"); //バイナリで読み込み
fread(&points, sizeof(int), 1, fp); //データのアドレス,サイズ,個数,ファイルポインタを指定
//取り出した物体点を入れる配列
int x[points]; //~~データを読み込むことで初めてこの配列が定義できる~~
int y[points];
float z[points];
int x_buf, y_buf, z_buf; //データを一時的に溜めておくための変数
/*各バッファに物体点座標を取り込み,ホログラム面と物体点の位置を考慮したデータを各配列に入れる*/
for(i=0; i<points; i++){
fread(&x_buf, sizeof(int), 1, fp);
fread(&y_buf, sizeof(int), 1, fp);
fread(&z_buf, sizeof(int), 1, fp);
x[i]=x_buf*40+width*0.5; //物体点を離すために物体点座標に40を掛け,中心の座標を足す
y[i]=y_buf*40+heigth*0.5;
z[i]=((float)z_buf)*40+100000.0F;
}
fclose(fp);
lumi_intensity=(float *)malloc(sizeof(float)*width*heigth); //malloc関数でメモリを動的に確保
/*デバイス側の変数*/
int *x_d, *y_d;
float *z_d;
float *lumi_intensity_d;
// int *points_d;
dim3 block(32,32,1); //ブロックサイズ(スレッド数)の配置
dim3 grid(ceil(width/block.x),ceil(heigth/block.y),1); //グリッドサイズ(ブロック数)の配置
// dim3 grid((width+block.x-1)/block.x,(heigth+block.y-1)/block.y,1);
/*デバイス側のメモリ確保*/
cudaMalloc((void**)&x_d, points*sizeof(int));
cudaMalloc((void**)&y_d, points*sizeof(int));
cudaMalloc((void**)&z_d, points*sizeof(float));
cudaMalloc((void**)&lumi_intensity_d, width*heigth*sizeof(float));
// cudaMalloc((void**)&points_d, sizeof(int));
/*ホスト側からデバイス側へデータ転送*/
cudaMemcpy(x_d, x, points*sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(y_d, y, points*sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(z_d, z, points*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(lumi_intensity_d, lumi_intensity, width*heigth*sizeof(float), cudaMemcpyHostToDevice);
// cudaMemcpy(points_d, &points, sizeof(int), cudaMemcpyHostToDevice);
/*カーネル関数の起動*/
func_cgh_gpu<<< grid, block >>>(x_d, y_d, z_d, lumi_intensity_d);
/*デバイス側からホスト側へデータ転送*/
cudaMemcpy(lumi_intensity, lumi_intensity_d, width*heigth*sizeof(float), cudaMemcpyDeviceToHost);
/*デバイスのメモリ解放*/
cudaFree(x_d);
cudaFree(y_d);
cudaFree(z_d);
cudaFree(lumi_intensity_d);
// cudaFree(points_d);
//最大・最小値用の変数を比較できるようにとりあえずlumi_intensity[0]を入れる
min=lumi_intensity[0];
max=lumi_intensity[0];
/*最大値,最小値を求める*/
for(i=0; i<heigth; i++){
for(j=0; j<width; j++){
if(min>lumi_intensity[i*width+j]){
min=lumi_intensity[i*width+j];
}
if(max<lumi_intensity[i*width+j]){
max=lumi_intensity[i*width+j];
}
}
}
mid=(min+max)*0.5F; //中間値(閾値)を求める
printf("min=%lf, max=%lf, mid=%lf\n", min, max, mid);
/*malloc関数でメモリを動的に確保*/
img=(unsigned char *)malloc(sizeof(unsigned char)*width*heigth);
/*各々の光強度配列の値を中間値と比較し,2値化する*/
for(i=0; i<width*heigth; i++){
if(lumi_intensity[i]<mid){
img[i]=0;
}
if(lumi_intensity[i]>mid){
img[i]=255;
}
}
/*宣言したfpと使用するファイル名,その読み書きモードを設定.バイナリ(b)で書き込み(w)*/
fp1=fopen("root-gpu.bmp","wb");
/*書き込むデータのアドレス,データのサイズ,データの個数,ファイルのポインタを指定*/
fwrite(&bmpFh, sizeof(bmpFh), 1, fp1); //(&bmpFh.bfType, sizeof(bmpFh.bfType), 1, fp);というように個別に書くことも可能
fwrite(&bmpIh, sizeof(bmpIh), 1, fp1);
fwrite(&rgbQ[0], sizeof(rgbQ[0]), 256, fp1);
fwrite(img, sizeof(unsigned char), width*heigth, fp1); //bmpに書き込み
printf("'root-gpu.bmp' was saved.\n\n");
/*malloc関数で確保したホスト側のメモリを開放する*/
free(lumi_intensity);
free(img);
fclose(fp1);
return 0;
}
|
4dedd9844db20f37d891adf06cf28025f2cff51d.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#include "core/providers/cuda/tensor/gather_elements_impl.h"
#include "core/providers/cuda/tensor/scatter_elements_impl.h"
#ifdef ENABLE_TRAINING_OPS
#include "orttraining/training_ops/cuda/tensor/gather_elements_grad_impl.h"
#endif
#include "core/providers/cuda/atomic/common.cuh"
#include "core/providers/cuda/cu_inc/common.cuh"
namespace onnxruntime {
namespace cuda {
namespace {
constexpr int kThreadsPerBlock = GridDim::maxThreadsPerBlock;
constexpr int kThreadWorkSize = 4;
// General case to compute the input(for Gather)/output(for Scatter) and indices data offset given the thread ID
// using strides and fast_divmods. The offsets are returned in a 2-element TArray.
template <bool IsStridedIndices>
struct OffsetCalculator {
OffsetCalculator(const int rank, const TArray<int64_t> masked_input_strides, const TArray<fast_divmod> indices_fdms,
const TArray<int64_t> indices_strides)
: rank_(rank), indices_fdms_(indices_fdms) {
masked_input_strides_.SetSize(rank);
if (IsStridedIndices) indices_strides_.SetSize(rank);
for (int dim = 0; dim < rank; ++dim) {
masked_input_strides_[dim] = static_cast<CUDA_LONG>(masked_input_strides[dim]);
if (IsStridedIndices) indices_strides_[dim] = static_cast<CUDA_LONG>(indices_strides[dim]);
}
}
__device__ __forceinline__ TArray<CUDA_LONG, 2> get(CUDA_LONG linear_idx) const {
TArray<CUDA_LONG, 2> offsets;
offsets[0] = 0;
offsets[1] = IsStridedIndices ? 0 : linear_idx;
CUDA_LONG q, r = linear_idx;
#pragma unroll
for (int dim = 0; dim < indices_fdms_.Capacity(); ++dim) {
if (dim == rank_) break;
indices_fdms_[dim].divmod(r, q, r);
offsets[0] += masked_input_strides_[dim] * q;
if (IsStridedIndices) offsets[1] += indices_strides_[dim] * q;
}
return offsets;
}
int rank_;
TArray<fast_divmod> indices_fdms_;
TArray<CUDA_LONG> masked_input_strides_;
TArray<CUDA_LONG> indices_strides_;
};
// Optimization for 2D case to compute the input(for Gather)/output(for Scatter) and indices data offset
// given the thread ID so we don't need FOR loop for fast_divmod computes.
// The offsets are returned in a 2-element TArray.
template <bool IsOuterAxis, bool IsStridedIndices>
struct OffsetCalculatorFor2D {
OffsetCalculatorFor2D(const fast_divmod indices_row_size_fdm, const int64_t input_row_size,
const TArray<int64_t> indices_strides)
: indices_row_size_fdm_(indices_row_size_fdm), input_row_size_(static_cast<CUDA_LONG>(input_row_size)) {
if (IsStridedIndices) {
indices_strides_.SetSize(2);
indices_strides_[0] = static_cast<CUDA_LONG>(indices_strides[0]);
indices_strides_[1] = static_cast<CUDA_LONG>(indices_strides[1]);
}
}
__device__ __forceinline__ TArray<CUDA_LONG, 2> get(CUDA_LONG linear_idx) const {
TArray<CUDA_LONG, 2> offsets;
if (IsStridedIndices) {
CUDA_LONG q, r = linear_idx;
indices_row_size_fdm_.divmod(r, q, r);
offsets[0] = IsOuterAxis ? r : q * input_row_size_;
offsets[1] = q * indices_strides_[0] + r * indices_strides_[1];
} else {
offsets[0] =
IsOuterAxis ? indices_row_size_fdm_.mod(linear_idx) : indices_row_size_fdm_.div(linear_idx) * input_row_size_;
offsets[1] = linear_idx;
}
return offsets;
}
fast_divmod indices_row_size_fdm_;
CUDA_LONG input_row_size_;
TArray<CUDA_LONG> indices_strides_;
};
} // namespace
template <class T>
struct FuncAssignment {
__device__ __inline__ void operator()(T* a, const T* b) const { *a = *b; }
};
template <typename T, typename TIndex, bool IsGather, typename OffsetCalcT, typename TFunc>
__global__ void _GatherScatterElementsKernel(const T* src_data, const TIndex* indices_data, T* output_data,
const int64_t input_dim_along_axis, const int64_t input_stride_along_axis,
const OffsetCalcT offset_calc, const TFunc& func, CUDA_LONG N) {
CUDA_LONG start = kThreadsPerBlock * kThreadWorkSize * blockIdx.x + threadIdx.x;
CUDA_LONG id;
T value[kThreadWorkSize];
if (!IsGather) {
id = start;
#pragma unroll
for (int work = 0; work < kThreadWorkSize; ++work) {
if (id < N) {
value[work] = src_data[id];
id += kThreadsPerBlock;
}
}
}
id = start;
#pragma unroll
for (int work = 0; work < kThreadWorkSize; ++work) {
if (id < N) {
TArray<CUDA_LONG, 2> offsets = offset_calc.get(id);
int64_t input_offset_along_axis = static_cast<int64_t>(indices_data[offsets[1]]);
if (input_offset_along_axis >= -input_dim_along_axis && input_offset_along_axis < input_dim_along_axis) {
if (input_offset_along_axis < 0) input_offset_along_axis += input_dim_along_axis;
CUDA_LONG input_offset = offsets[0] + static_cast<CUDA_LONG>(input_offset_along_axis * input_stride_along_axis);
if (IsGather) {
func(value + work, src_data + input_offset);
} else {
func(output_data + input_offset, value + work);
}
}
id += kThreadsPerBlock;
}
}
if (IsGather) {
id = start;
#pragma unroll
for (int work = 0; work < kThreadWorkSize; ++work) {
if (id < N) {
output_data[id] = value[work];
id += kThreadsPerBlock;
}
}
}
}
#define LAUNCH_GATHER_SCATTER_ELEMENTS_2D_KERNEL(src_data, is_outer_axis, is_strided_indices, is_gather) \
auto offset_calc = OffsetCalculatorFor2D<is_outer_axis, is_strided_indices>(args.indices_fdms[0], input_row_size, \
args.indices_strides); \
hipLaunchKernelGGL(( _GatherScatterElementsKernel<T, TIndex, is_gather, decltype(offset_calc), decltype(func)>) \
, dim3(blocksPerGrid), dim3(kThreadsPerBlock), 0, stream, src_data, indices_data, output_data, args.input_dim_along_axis, \
args.input_stride_along_axis, offset_calc, func, N)
#define LAUNCH_GATHER_SCATTER_ELEMENTS_KERNEL(src_data, is_strided_indices, is_gather) \
auto offset_calc = \
OffsetCalculator<is_strided_indices>(rank, args.masked_input_strides, args.indices_fdms, args.indices_strides); \
hipLaunchKernelGGL(( _GatherScatterElementsKernel<T, TIndex, is_gather, decltype(offset_calc), decltype(func)>) \
, dim3(blocksPerGrid), dim3(kThreadsPerBlock), 0, stream, src_data, indices_data, output_data, args.input_dim_along_axis, \
args.input_stride_along_axis, offset_calc, func, N)
#define HANDLE_GATHER_SCATTER_ELEMENTS_2D_IS_STRIDED_INDICES(src_data, is_outer_axis, is_gather) \
if (args.indices_strides.Size() > 0) { \
LAUNCH_GATHER_SCATTER_ELEMENTS_2D_KERNEL(src_data, is_outer_axis, true, is_gather); \
} else { \
LAUNCH_GATHER_SCATTER_ELEMENTS_2D_KERNEL(src_data, is_outer_axis, false, is_gather); \
}
template <typename T, typename TIndex>
void GatherElementsImpl(hipStream_t stream, const T* input_data, const TIndex* indices_data, T* output_data,
const GatherScatterElementsArgs& args) {
CUDA_LONG N = static_cast<CUDA_LONG>(args.indices_size);
int blocksPerGrid = static_cast<int>(CeilDiv(N, kThreadsPerBlock * kThreadWorkSize));
auto func = FuncAssignment<T>();
if (args.rank == 2) {
int64_t input_row_size = args.masked_input_strides[0];
if (args.axis == 0) {
HANDLE_GATHER_SCATTER_ELEMENTS_2D_IS_STRIDED_INDICES(input_data, true, true);
} else {
HANDLE_GATHER_SCATTER_ELEMENTS_2D_IS_STRIDED_INDICES(input_data, false, true);
}
return;
}
int rank = static_cast<int>(args.rank);
if (args.indices_strides.Size() > 0) {
LAUNCH_GATHER_SCATTER_ELEMENTS_KERNEL(input_data, true, true);
} else {
// Save one divmod in kernel if axis is the last dim.
if (args.rank == args.axis + 1) rank -= 1;
LAUNCH_GATHER_SCATTER_ELEMENTS_KERNEL(input_data, false, true);
}
}
template <typename T, typename TIndex, typename TFunc>
Status ScatterElementsImplInternal(hipStream_t stream, const T* input_data, const TIndex* indices_data,
const T* updates_data, T* output_data, const GatherScatterElementsArgs& args,
const TFunc& func) {
if (input_data != output_data) {
CUDA_RETURN_IF_ERROR(
hipMemcpyAsync(output_data, input_data, args.input_size * sizeof(T), hipMemcpyDeviceToDevice, stream));
}
if (args.indices_size == 0) return Status::OK();
CUDA_LONG N = static_cast<CUDA_LONG>(args.indices_size);
int blocksPerGrid = static_cast<int>(CeilDiv(N, kThreadsPerBlock * kThreadWorkSize));
if (args.rank == 2) {
int64_t input_row_size = args.masked_input_strides[0];
if (args.axis == 0) {
HANDLE_GATHER_SCATTER_ELEMENTS_2D_IS_STRIDED_INDICES(updates_data, true, false);
} else {
HANDLE_GATHER_SCATTER_ELEMENTS_2D_IS_STRIDED_INDICES(updates_data, false, false);
}
return Status::OK();
}
int rank = static_cast<int>(args.rank);
if (args.indices_strides.Size() > 0) {
LAUNCH_GATHER_SCATTER_ELEMENTS_KERNEL(updates_data, true, false);
} else {
// Save one divmod in kernel if axis is the last dim.
if (args.rank == args.axis + 1) rank -= 1;
LAUNCH_GATHER_SCATTER_ELEMENTS_KERNEL(updates_data, false, false);
}
return Status::OK();
}
#undef HANDLE_GATHER_SCATTER_ELEMENTS_2D_IS_STRIDED_INDICES
#undef LAUNCH_GATHER_SCATTER_ELEMENTS_KERNEL
#undef LAUNCH_GATHER_SCATTER_ELEMENTS_2D_KERNEL
template <typename T, typename TIndex>
Status ScatterElementsImpl(hipStream_t stream, const T* input_data, const TIndex* indices_data, const T* updates_data,
T* output_data, const GatherScatterElementsArgs& args) {
return ScatterElementsImplInternal(stream, input_data, indices_data, updates_data, output_data, args,
FuncAssignment<T>());
}
#define GATHER_SCATTER_ELEMENTS_SPECIALIZED_TINDEX_IMPL(T, TIndex) \
template void GatherElementsImpl<T, TIndex>(hipStream_t stream, const T* input_data, const TIndex* indices_data, \
T* output_data, const GatherScatterElementsArgs& args); \
template Status ScatterElementsImpl<T, TIndex>(hipStream_t stream, const T* input_data, const TIndex* indices_data, \
const T* updates_data, T* output_data, \
const GatherScatterElementsArgs& args);
#define GATHER_SCATTER_ELEMENTS_SPECIALIZED_IMPL(T) \
GATHER_SCATTER_ELEMENTS_SPECIALIZED_TINDEX_IMPL(T, int32_t) \
GATHER_SCATTER_ELEMENTS_SPECIALIZED_TINDEX_IMPL(T, int64_t)
// GatherElementsGrad needs atomic_add which supports float types only, so use half, float and double for 16, 32, and 64
// bits data respectively.
GATHER_SCATTER_ELEMENTS_SPECIALIZED_IMPL(int8_t)
GATHER_SCATTER_ELEMENTS_SPECIALIZED_IMPL(half)
GATHER_SCATTER_ELEMENTS_SPECIALIZED_IMPL(float)
GATHER_SCATTER_ELEMENTS_SPECIALIZED_IMPL(double)
#undef GATHER_SCATTER_ELEMENTS_SPECIALIZED_IMPL
#undef GATHER_SCATTER_ELEMENTS_SPECIALIZED_TINDEX_IMPL
#ifdef ENABLE_TRAINING_OPS
template <class T>
struct FuncAtomicAdd {
__device__ __inline__ void operator()(T* a, const T* b) const { atomic_add(a, *b); }
};
template <typename T, typename TIndex>
Status GatherElementsGradImpl(hipStream_t stream, const TIndex* indices_data, const T* updates_data, T* output_data,
const GatherScatterElementsArgs& args) {
// Give output_data as the input_data parameter by intention,
// to skip input_data copy, which is not applicable for GatherElementsGrad.
return ScatterElementsImplInternal(stream, output_data, indices_data, updates_data, output_data, args,
FuncAtomicAdd<T>());
}
#define GATHER_ELEMENTS_GRAD_SPECIALIZED_TINDEX_IMPL(T, TIndex) \
template Status GatherElementsGradImpl<T, TIndex>(hipStream_t stream, const TIndex* indices_data, \
const T* updates_data, T* output_data, \
const GatherScatterElementsArgs& args);
#define GATHER_ELEMENTS_GRAD_SPECIALIZED_SCATTER_ADD_IMPL(T) \
GATHER_ELEMENTS_GRAD_SPECIALIZED_TINDEX_IMPL(T, int32_t) \
GATHER_ELEMENTS_GRAD_SPECIALIZED_TINDEX_IMPL(T, int64_t)
GATHER_ELEMENTS_GRAD_SPECIALIZED_SCATTER_ADD_IMPL(half)
GATHER_ELEMENTS_GRAD_SPECIALIZED_SCATTER_ADD_IMPL(float)
GATHER_ELEMENTS_GRAD_SPECIALIZED_SCATTER_ADD_IMPL(double)
#undef GATHER_ELEMENTS_GRAD_SPECIALIZED_SCATTER_ADD_IMPL
#undef GATHER_ELEMENTS_GRAD_SPECIALIZED_TINDEX_IMPL
#endif
} // namespace cuda
} // namespace onnxruntime
| 4dedd9844db20f37d891adf06cf28025f2cff51d.cu | // Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#include "core/providers/cuda/tensor/gather_elements_impl.h"
#include "core/providers/cuda/tensor/scatter_elements_impl.h"
#ifdef ENABLE_TRAINING_OPS
#include "orttraining/training_ops/cuda/tensor/gather_elements_grad_impl.h"
#endif
#include "core/providers/cuda/atomic/common.cuh"
#include "core/providers/cuda/cu_inc/common.cuh"
namespace onnxruntime {
namespace cuda {
namespace {
constexpr int kThreadsPerBlock = GridDim::maxThreadsPerBlock;
constexpr int kThreadWorkSize = 4;
// General case to compute the input(for Gather)/output(for Scatter) and indices data offset given the thread ID
// using strides and fast_divmods. The offsets are returned in a 2-element TArray.
template <bool IsStridedIndices>
struct OffsetCalculator {
OffsetCalculator(const int rank, const TArray<int64_t> masked_input_strides, const TArray<fast_divmod> indices_fdms,
const TArray<int64_t> indices_strides)
: rank_(rank), indices_fdms_(indices_fdms) {
masked_input_strides_.SetSize(rank);
if (IsStridedIndices) indices_strides_.SetSize(rank);
for (int dim = 0; dim < rank; ++dim) {
masked_input_strides_[dim] = static_cast<CUDA_LONG>(masked_input_strides[dim]);
if (IsStridedIndices) indices_strides_[dim] = static_cast<CUDA_LONG>(indices_strides[dim]);
}
}
__device__ __forceinline__ TArray<CUDA_LONG, 2> get(CUDA_LONG linear_idx) const {
TArray<CUDA_LONG, 2> offsets;
offsets[0] = 0;
offsets[1] = IsStridedIndices ? 0 : linear_idx;
CUDA_LONG q, r = linear_idx;
#pragma unroll
for (int dim = 0; dim < indices_fdms_.Capacity(); ++dim) {
if (dim == rank_) break;
indices_fdms_[dim].divmod(r, q, r);
offsets[0] += masked_input_strides_[dim] * q;
if (IsStridedIndices) offsets[1] += indices_strides_[dim] * q;
}
return offsets;
}
int rank_;
TArray<fast_divmod> indices_fdms_;
TArray<CUDA_LONG> masked_input_strides_;
TArray<CUDA_LONG> indices_strides_;
};
// Optimization for 2D case to compute the input(for Gather)/output(for Scatter) and indices data offset
// given the thread ID so we don't need FOR loop for fast_divmod computes.
// The offsets are returned in a 2-element TArray.
template <bool IsOuterAxis, bool IsStridedIndices>
struct OffsetCalculatorFor2D {
OffsetCalculatorFor2D(const fast_divmod indices_row_size_fdm, const int64_t input_row_size,
const TArray<int64_t> indices_strides)
: indices_row_size_fdm_(indices_row_size_fdm), input_row_size_(static_cast<CUDA_LONG>(input_row_size)) {
if (IsStridedIndices) {
indices_strides_.SetSize(2);
indices_strides_[0] = static_cast<CUDA_LONG>(indices_strides[0]);
indices_strides_[1] = static_cast<CUDA_LONG>(indices_strides[1]);
}
}
__device__ __forceinline__ TArray<CUDA_LONG, 2> get(CUDA_LONG linear_idx) const {
TArray<CUDA_LONG, 2> offsets;
if (IsStridedIndices) {
CUDA_LONG q, r = linear_idx;
indices_row_size_fdm_.divmod(r, q, r);
offsets[0] = IsOuterAxis ? r : q * input_row_size_;
offsets[1] = q * indices_strides_[0] + r * indices_strides_[1];
} else {
offsets[0] =
IsOuterAxis ? indices_row_size_fdm_.mod(linear_idx) : indices_row_size_fdm_.div(linear_idx) * input_row_size_;
offsets[1] = linear_idx;
}
return offsets;
}
fast_divmod indices_row_size_fdm_;
CUDA_LONG input_row_size_;
TArray<CUDA_LONG> indices_strides_;
};
} // namespace
template <class T>
struct FuncAssignment {
__device__ __inline__ void operator()(T* a, const T* b) const { *a = *b; }
};
template <typename T, typename TIndex, bool IsGather, typename OffsetCalcT, typename TFunc>
__global__ void _GatherScatterElementsKernel(const T* src_data, const TIndex* indices_data, T* output_data,
const int64_t input_dim_along_axis, const int64_t input_stride_along_axis,
const OffsetCalcT offset_calc, const TFunc& func, CUDA_LONG N) {
CUDA_LONG start = kThreadsPerBlock * kThreadWorkSize * blockIdx.x + threadIdx.x;
CUDA_LONG id;
T value[kThreadWorkSize];
if (!IsGather) {
id = start;
#pragma unroll
for (int work = 0; work < kThreadWorkSize; ++work) {
if (id < N) {
value[work] = src_data[id];
id += kThreadsPerBlock;
}
}
}
id = start;
#pragma unroll
for (int work = 0; work < kThreadWorkSize; ++work) {
if (id < N) {
TArray<CUDA_LONG, 2> offsets = offset_calc.get(id);
int64_t input_offset_along_axis = static_cast<int64_t>(indices_data[offsets[1]]);
if (input_offset_along_axis >= -input_dim_along_axis && input_offset_along_axis < input_dim_along_axis) {
if (input_offset_along_axis < 0) input_offset_along_axis += input_dim_along_axis;
CUDA_LONG input_offset = offsets[0] + static_cast<CUDA_LONG>(input_offset_along_axis * input_stride_along_axis);
if (IsGather) {
func(value + work, src_data + input_offset);
} else {
func(output_data + input_offset, value + work);
}
}
id += kThreadsPerBlock;
}
}
if (IsGather) {
id = start;
#pragma unroll
for (int work = 0; work < kThreadWorkSize; ++work) {
if (id < N) {
output_data[id] = value[work];
id += kThreadsPerBlock;
}
}
}
}
#define LAUNCH_GATHER_SCATTER_ELEMENTS_2D_KERNEL(src_data, is_outer_axis, is_strided_indices, is_gather) \
auto offset_calc = OffsetCalculatorFor2D<is_outer_axis, is_strided_indices>(args.indices_fdms[0], input_row_size, \
args.indices_strides); \
_GatherScatterElementsKernel<T, TIndex, is_gather, decltype(offset_calc), decltype(func)> \
<<<blocksPerGrid, kThreadsPerBlock, 0, stream>>>(src_data, indices_data, output_data, args.input_dim_along_axis, \
args.input_stride_along_axis, offset_calc, func, N)
#define LAUNCH_GATHER_SCATTER_ELEMENTS_KERNEL(src_data, is_strided_indices, is_gather) \
auto offset_calc = \
OffsetCalculator<is_strided_indices>(rank, args.masked_input_strides, args.indices_fdms, args.indices_strides); \
_GatherScatterElementsKernel<T, TIndex, is_gather, decltype(offset_calc), decltype(func)> \
<<<blocksPerGrid, kThreadsPerBlock, 0, stream>>>(src_data, indices_data, output_data, args.input_dim_along_axis, \
args.input_stride_along_axis, offset_calc, func, N)
#define HANDLE_GATHER_SCATTER_ELEMENTS_2D_IS_STRIDED_INDICES(src_data, is_outer_axis, is_gather) \
if (args.indices_strides.Size() > 0) { \
LAUNCH_GATHER_SCATTER_ELEMENTS_2D_KERNEL(src_data, is_outer_axis, true, is_gather); \
} else { \
LAUNCH_GATHER_SCATTER_ELEMENTS_2D_KERNEL(src_data, is_outer_axis, false, is_gather); \
}
template <typename T, typename TIndex>
void GatherElementsImpl(cudaStream_t stream, const T* input_data, const TIndex* indices_data, T* output_data,
const GatherScatterElementsArgs& args) {
CUDA_LONG N = static_cast<CUDA_LONG>(args.indices_size);
int blocksPerGrid = static_cast<int>(CeilDiv(N, kThreadsPerBlock * kThreadWorkSize));
auto func = FuncAssignment<T>();
if (args.rank == 2) {
int64_t input_row_size = args.masked_input_strides[0];
if (args.axis == 0) {
HANDLE_GATHER_SCATTER_ELEMENTS_2D_IS_STRIDED_INDICES(input_data, true, true);
} else {
HANDLE_GATHER_SCATTER_ELEMENTS_2D_IS_STRIDED_INDICES(input_data, false, true);
}
return;
}
int rank = static_cast<int>(args.rank);
if (args.indices_strides.Size() > 0) {
LAUNCH_GATHER_SCATTER_ELEMENTS_KERNEL(input_data, true, true);
} else {
// Save one divmod in kernel if axis is the last dim.
if (args.rank == args.axis + 1) rank -= 1;
LAUNCH_GATHER_SCATTER_ELEMENTS_KERNEL(input_data, false, true);
}
}
template <typename T, typename TIndex, typename TFunc>
Status ScatterElementsImplInternal(cudaStream_t stream, const T* input_data, const TIndex* indices_data,
const T* updates_data, T* output_data, const GatherScatterElementsArgs& args,
const TFunc& func) {
if (input_data != output_data) {
CUDA_RETURN_IF_ERROR(
cudaMemcpyAsync(output_data, input_data, args.input_size * sizeof(T), cudaMemcpyDeviceToDevice, stream));
}
if (args.indices_size == 0) return Status::OK();
CUDA_LONG N = static_cast<CUDA_LONG>(args.indices_size);
int blocksPerGrid = static_cast<int>(CeilDiv(N, kThreadsPerBlock * kThreadWorkSize));
if (args.rank == 2) {
int64_t input_row_size = args.masked_input_strides[0];
if (args.axis == 0) {
HANDLE_GATHER_SCATTER_ELEMENTS_2D_IS_STRIDED_INDICES(updates_data, true, false);
} else {
HANDLE_GATHER_SCATTER_ELEMENTS_2D_IS_STRIDED_INDICES(updates_data, false, false);
}
return Status::OK();
}
int rank = static_cast<int>(args.rank);
if (args.indices_strides.Size() > 0) {
LAUNCH_GATHER_SCATTER_ELEMENTS_KERNEL(updates_data, true, false);
} else {
// Save one divmod in kernel if axis is the last dim.
if (args.rank == args.axis + 1) rank -= 1;
LAUNCH_GATHER_SCATTER_ELEMENTS_KERNEL(updates_data, false, false);
}
return Status::OK();
}
#undef HANDLE_GATHER_SCATTER_ELEMENTS_2D_IS_STRIDED_INDICES
#undef LAUNCH_GATHER_SCATTER_ELEMENTS_KERNEL
#undef LAUNCH_GATHER_SCATTER_ELEMENTS_2D_KERNEL
template <typename T, typename TIndex>
Status ScatterElementsImpl(cudaStream_t stream, const T* input_data, const TIndex* indices_data, const T* updates_data,
T* output_data, const GatherScatterElementsArgs& args) {
return ScatterElementsImplInternal(stream, input_data, indices_data, updates_data, output_data, args,
FuncAssignment<T>());
}
#define GATHER_SCATTER_ELEMENTS_SPECIALIZED_TINDEX_IMPL(T, TIndex) \
template void GatherElementsImpl<T, TIndex>(cudaStream_t stream, const T* input_data, const TIndex* indices_data, \
T* output_data, const GatherScatterElementsArgs& args); \
template Status ScatterElementsImpl<T, TIndex>(cudaStream_t stream, const T* input_data, const TIndex* indices_data, \
const T* updates_data, T* output_data, \
const GatherScatterElementsArgs& args);
#define GATHER_SCATTER_ELEMENTS_SPECIALIZED_IMPL(T) \
GATHER_SCATTER_ELEMENTS_SPECIALIZED_TINDEX_IMPL(T, int32_t) \
GATHER_SCATTER_ELEMENTS_SPECIALIZED_TINDEX_IMPL(T, int64_t)
// GatherElementsGrad needs atomic_add which supports float types only, so use half, float and double for 16, 32, and 64
// bits data respectively.
GATHER_SCATTER_ELEMENTS_SPECIALIZED_IMPL(int8_t)
GATHER_SCATTER_ELEMENTS_SPECIALIZED_IMPL(half)
GATHER_SCATTER_ELEMENTS_SPECIALIZED_IMPL(float)
GATHER_SCATTER_ELEMENTS_SPECIALIZED_IMPL(double)
#undef GATHER_SCATTER_ELEMENTS_SPECIALIZED_IMPL
#undef GATHER_SCATTER_ELEMENTS_SPECIALIZED_TINDEX_IMPL
#ifdef ENABLE_TRAINING_OPS
template <class T>
struct FuncAtomicAdd {
__device__ __inline__ void operator()(T* a, const T* b) const { atomic_add(a, *b); }
};
template <typename T, typename TIndex>
Status GatherElementsGradImpl(cudaStream_t stream, const TIndex* indices_data, const T* updates_data, T* output_data,
const GatherScatterElementsArgs& args) {
// Give output_data as the input_data parameter by intention,
// to skip input_data copy, which is not applicable for GatherElementsGrad.
return ScatterElementsImplInternal(stream, output_data, indices_data, updates_data, output_data, args,
FuncAtomicAdd<T>());
}
#define GATHER_ELEMENTS_GRAD_SPECIALIZED_TINDEX_IMPL(T, TIndex) \
template Status GatherElementsGradImpl<T, TIndex>(cudaStream_t stream, const TIndex* indices_data, \
const T* updates_data, T* output_data, \
const GatherScatterElementsArgs& args);
#define GATHER_ELEMENTS_GRAD_SPECIALIZED_SCATTER_ADD_IMPL(T) \
GATHER_ELEMENTS_GRAD_SPECIALIZED_TINDEX_IMPL(T, int32_t) \
GATHER_ELEMENTS_GRAD_SPECIALIZED_TINDEX_IMPL(T, int64_t)
GATHER_ELEMENTS_GRAD_SPECIALIZED_SCATTER_ADD_IMPL(half)
GATHER_ELEMENTS_GRAD_SPECIALIZED_SCATTER_ADD_IMPL(float)
GATHER_ELEMENTS_GRAD_SPECIALIZED_SCATTER_ADD_IMPL(double)
#undef GATHER_ELEMENTS_GRAD_SPECIALIZED_SCATTER_ADD_IMPL
#undef GATHER_ELEMENTS_GRAD_SPECIALIZED_TINDEX_IMPL
#endif
} // namespace cuda
} // namespace onnxruntime
|
fdd48bf38ac4237ad86147f71001afa0ef3b0871.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*******************************************************************************
* Copyright (c) 2015-2018 Skymind, Inc.
*
* This program and the accompanying materials are made available under the
* terms of the Apache License, Version 2.0 which is available at
* https://www.apache.org/licenses/LICENSE-2.0.
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
* SPDX-License-Identifier: Apache-2.0
******************************************************************************/
//
// @author Yurii Shyrma, created on 16.04.2018
//
#include <ops/declarable/helpers/reverse.h>
#include <helpers/ShapeUtils.h>
#include <array/ResultSet.h>
#include <TAD.h>
#include <PointersManager.h>
#include <ConstantTadHelper.h>
namespace nd4j {
namespace ops {
namespace helpers {
template <typename T>
static __global__ void reverseTadKernel(void* vinput, Nd4jLong *inputShape, void* voutput, Nd4jLong *outputShape, Nd4jLong *inputTadShape, Nd4jLong *inputTadOffsets, Nd4jLong *outputTadShape, Nd4jLong *outputTadOffsets, uint64_t limit, uint64_t numOfElemsToReverse, uint64_t numTads) {
auto input = reinterpret_cast<T*>(vinput);
auto output = reinterpret_cast<T*>(voutput);
const auto tid = blockIdx.x * blockDim.x + threadIdx.x;
const auto step = gridDim.x * blockDim.x;
// this means that we'll have additional cycle, to move middle element
auto div = numOfElemsToReverse / 2;
auto odd = numOfElemsToReverse % 2 != 0;
auto rlimit = odd ? limit / 2 + 1 : limit / 2;
// all threads operate in the same input/output space
for (uint64_t e = tid; e < rlimit; e += step) {
// finding out the TAD we're going to process
auto tadId = e / div;
if (tadId >= numTads)
continue;
// now finding out element within tad
auto idx = e % div;
//printf("TID: %i; numTads: %lld; tadLength: %lld; tadId: %i, idx: %lld\n", tid, numTads, numOfElemsToReverse, tadId, idx);
auto tadInput = input + inputTadOffsets[tadId];
auto tadOutput = output + outputTadOffsets[tadId];
// we're calculating offsets within input TAD
auto fOffset = shape::getIndexOffset(idx, inputTadShape);
auto lOffset = shape::getIndexOffset(numOfElemsToReverse - idx - 1, inputTadShape);
// now we're storing input values
auto v1 = tadInput[fOffset];
auto v2 = tadInput[lOffset];
// now we're calculating offsets within output TAD
auto zfOffset = shape::getIndexOffset(idx, outputTadShape);
auto zlOffset = shape::getIndexOffset(numOfElemsToReverse - idx - 1, outputTadShape);
// and saving values to output arrays
tadOutput[zfOffset] = v2;
tadOutput[zlOffset] = v1;
}
// moving odd element in blocks
if (odd && threadIdx.x == 0) {
for (uint64_t e = blockIdx.x; e < numTads; e += gridDim.x) {
auto tadInput = input + inputTadOffsets[e];
auto tadOutput = output + outputTadOffsets[e];
auto xOffset = shape::getIndexOffset(numOfElemsToReverse / 2, inputTadShape);
auto zOffset = shape::getIndexOffset(numOfElemsToReverse / 2, outputTadShape);
tadOutput[zOffset] = tadInput[xOffset];
}
}
}
template <typename T>
static __global__ void reverseArrayKernel(void* input, Nd4jLong *inputShape, void* output, Nd4jLong *outputShape, Nd4jLong numOfElemsToReverse) {
const auto tid = blockIdx.x * blockDim.x + threadIdx.x;
const auto step = gridDim.x * blockDim.x;
__shared__ int linearStatus;
__shared__ T* inputArr;
__shared__ T* outputArr;
__shared__ char inputOrder, outputOrder;
if (threadIdx.x == 0) {
linearStatus = (shape::elementWiseStride(inputShape) == shape::elementWiseStride(outputShape)) && (inputOrder == outputOrder)? shape::elementWiseStride(inputShape):0;
char inputOrder = shape::order(inputShape);
char outputOrder = shape::order(outputShape);
inputArr = reinterpret_cast<T*>(input);
outputArr = reinterpret_cast<T*>(output);
}
__syncthreads();
auto odd = numOfElemsToReverse % 2 != 0;
auto limit = numOfElemsToReverse / 2;
for (uint64_t e = tid; e < limit; e += step) {
// we're calculating offsets within input array
auto fOffset = shape::getIndexOffset(e, inputShape);
auto lOffset = shape::getIndexOffset(numOfElemsToReverse - e - 1, inputShape);
// now we're storing input values
auto v1 = inputArr[fOffset];
auto v2 = inputArr[lOffset];
// now we're calculating offsets within output array
auto zfOffset = shape::getIndexOffset(e, outputShape);
auto zlOffset = shape::getIndexOffset(numOfElemsToReverse - e - 1, outputShape);
// and saving values to output arrays
outputArr[zfOffset] = v2;
outputArr[zlOffset] = v1;
}
// in case of odd array we'll have to move middle value
if (odd && tid == 0) {
auto xOffset = shape::getIndexOffset(limit, inputShape);
auto zOffset = shape::getIndexOffset(limit, outputShape);
outputArr[zOffset] = inputArr[xOffset];
}
}
template<typename T>
static void reverseTad(nd4j::LaunchContext * context, const NDArray* input, NDArray* output, Nd4jLong *inputTadShape, Nd4jLong *inputTadOffsets, Nd4jLong *outputTadShape, Nd4jLong *outputTadOffsets, uint64_t tadLength) {
auto stream = context->getCudaStream();
hipLaunchKernelGGL(( reverseTadKernel<T>), dim3(256), dim3(512), 8192, *stream, input->getSpecialBuffer(), input->getSpecialShapeInfo(), output->specialBuffer(), output->specialShapeInfo(), inputTadShape, inputTadOffsets, outputTadShape, outputTadOffsets, input->lengthOf(), tadLength, input->lengthOf() / tadLength);
}
template<typename T>
static void reverseArray(nd4j::LaunchContext * context, const NDArray* input, NDArray* output, Nd4jLong numOfElemsToReverse) {
auto stream = context->getCudaStream();
Nd4jLong numOfReverse = numOfElemsToReverse;
if (numOfElemsToReverse == 0)
numOfReverse = input->lengthOf();
hipLaunchKernelGGL(( reverseArrayKernel<T>), dim3(256), dim3(512), 8192, *stream, input->getSpecialBuffer(), input->getSpecialShapeInfo(), output->specialBuffer(), output->specialShapeInfo(), numOfReverse);
}
///////////////////////////////////////////////////////////////////
template <typename T>
static void reverseSequence_(nd4j::LaunchContext * context, const NDArray* input, const NDArray* seqLengths, NDArray* output, int seqDim, const int batchDim){
int posOfNonUnityDim = -1;
seqLengths->syncToHost();
auto stream = context->getCudaStream();
if(input->isVector() || shape::isLikeVector(input->getShapeInfo(), posOfNonUnityDim) || seqLengths->lengthOf() == 1) {
int numOfElemsToReverse = seqLengths->e<int>(0);
if((seqDim == 0 && input->sizeAt(0) == 1) || (batchDim == posOfNonUnityDim))
output->assign(input);
else
hipLaunchKernelGGL(( reverseArrayKernel<T>), dim3(256), dim3(512), 8192, *stream, input->getSpecialBuffer(), input->getSpecialShapeInfo(), output->specialBuffer(), output->specialShapeInfo(), numOfElemsToReverse);//helpers::reverseArray<T>(context, const_cast<NDArray*>(input), output, numOfElemsToReverse);
}
else {
if(seqDim > batchDim)
--seqDim;
std::vector<int> dimensions = ShapeUtils::evalDimsToExclude(input->rankOf(), {batchDim});
auto inSubArrsSet = input->allTensorsAlongDimension(dimensions);
auto outSubArrsSet = output->allTensorsAlongDimension(dimensions);
for(int i = 0; i < inSubArrsSet->size(); ++i) {
int numOfElemsToReverse = seqLengths->e<int>(i);
if(numOfElemsToReverse == 0 || numOfElemsToReverse == 1) {
outSubArrsSet->at(i)->assign(inSubArrsSet->at(i));
}
else {
auto inInnerSet = inSubArrsSet->at(i)->allTensorsAlongDimension({seqDim});
auto outInnerSet = outSubArrsSet->at(i)->allTensorsAlongDimension({seqDim});
for(int j = 0; j < inInnerSet->size(); ++j)
reverseArray<T>(context, inInnerSet->at(j), outInnerSet->at(j), numOfElemsToReverse);
delete inInnerSet;
delete outInnerSet;
}
}
delete inSubArrsSet;
delete outSubArrsSet;
}
}
void reverseSequence(nd4j::LaunchContext * context, const NDArray* input, const NDArray* seqLengths, NDArray* output, int seqDim, const int batchDim) {
NDArray::prepareSpecialUse({output}, {input, seqLengths});
// if op isn't inplace - copy original data into output array
if (output->getSpecialBuffer() != input->getSpecialBuffer())
output->assign(input);
BUILD_SINGLE_SELECTOR(input->dataType(), reverseSequence_, (context, input, seqLengths, output, seqDim, batchDim), LIBND4J_TYPES);
NDArray::registerSpecialUse({output}, {input, seqLengths});
}
//////////////////////////////////////////////////////////////////////////
void reverse(nd4j::LaunchContext * context, const NDArray* input, NDArray* output, const std::vector<int>* intArgs, bool isBackProp) {
// we need to reverse axis only if that's new op
std::vector<int> dimensions = isBackProp ? ShapeUtils::evalDimsToExclude(input->rankOf(), *intArgs) : *intArgs;
std::vector<int> axis = ShapeUtils::evalDimsToExclude(input->rankOf(), dimensions);
auto packX = nd4j::ConstantTadHelper::getInstance()->tadForDimensions(input->getShapeInfo(), dimensions);
auto packZ = nd4j::ConstantTadHelper::getInstance()->tadForDimensions(output->getShapeInfo(), dimensions);
NDArray::prepareSpecialUse({output}, {input});
if (packX.numberOfTads() == 1) {
BUILD_SINGLE_SELECTOR(input->dataType(), reverseArray, (context, input, output, 0), LIBND4J_TYPES);
} else {
BUILD_SINGLE_SELECTOR(input->dataType(), reverseTad, (context, input, output, packX.platformShapeInfo(), packX.platformOffsets(), packZ.platformShapeInfo(), packZ.platformOffsets(), (uint64_t) (input->lengthOf() / packX.numberOfTads())), LIBND4J_TYPES);
}
NDArray::registerSpecialUse({output}, {input});
}
BUILD_SINGLE_TEMPLATE(template void reverseArray, (nd4j::LaunchContext * context, const NDArray *inArr, NDArray *outArr, Nd4jLong numOfElemsToReverse), LIBND4J_TYPES);
}
}
}
| fdd48bf38ac4237ad86147f71001afa0ef3b0871.cu | /*******************************************************************************
* Copyright (c) 2015-2018 Skymind, Inc.
*
* This program and the accompanying materials are made available under the
* terms of the Apache License, Version 2.0 which is available at
* https://www.apache.org/licenses/LICENSE-2.0.
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
* SPDX-License-Identifier: Apache-2.0
******************************************************************************/
//
// @author Yurii Shyrma, created on 16.04.2018
//
#include <ops/declarable/helpers/reverse.h>
#include <helpers/ShapeUtils.h>
#include <array/ResultSet.h>
#include <TAD.h>
#include <PointersManager.h>
#include <ConstantTadHelper.h>
namespace nd4j {
namespace ops {
namespace helpers {
template <typename T>
static __global__ void reverseTadKernel(void* vinput, Nd4jLong *inputShape, void* voutput, Nd4jLong *outputShape, Nd4jLong *inputTadShape, Nd4jLong *inputTadOffsets, Nd4jLong *outputTadShape, Nd4jLong *outputTadOffsets, uint64_t limit, uint64_t numOfElemsToReverse, uint64_t numTads) {
auto input = reinterpret_cast<T*>(vinput);
auto output = reinterpret_cast<T*>(voutput);
const auto tid = blockIdx.x * blockDim.x + threadIdx.x;
const auto step = gridDim.x * blockDim.x;
// this means that we'll have additional cycle, to move middle element
auto div = numOfElemsToReverse / 2;
auto odd = numOfElemsToReverse % 2 != 0;
auto rlimit = odd ? limit / 2 + 1 : limit / 2;
// all threads operate in the same input/output space
for (uint64_t e = tid; e < rlimit; e += step) {
// finding out the TAD we're going to process
auto tadId = e / div;
if (tadId >= numTads)
continue;
// now finding out element within tad
auto idx = e % div;
//printf("TID: %i; numTads: %lld; tadLength: %lld; tadId: %i, idx: %lld\n", tid, numTads, numOfElemsToReverse, tadId, idx);
auto tadInput = input + inputTadOffsets[tadId];
auto tadOutput = output + outputTadOffsets[tadId];
// we're calculating offsets within input TAD
auto fOffset = shape::getIndexOffset(idx, inputTadShape);
auto lOffset = shape::getIndexOffset(numOfElemsToReverse - idx - 1, inputTadShape);
// now we're storing input values
auto v1 = tadInput[fOffset];
auto v2 = tadInput[lOffset];
// now we're calculating offsets within output TAD
auto zfOffset = shape::getIndexOffset(idx, outputTadShape);
auto zlOffset = shape::getIndexOffset(numOfElemsToReverse - idx - 1, outputTadShape);
// and saving values to output arrays
tadOutput[zfOffset] = v2;
tadOutput[zlOffset] = v1;
}
// moving odd element in blocks
if (odd && threadIdx.x == 0) {
for (uint64_t e = blockIdx.x; e < numTads; e += gridDim.x) {
auto tadInput = input + inputTadOffsets[e];
auto tadOutput = output + outputTadOffsets[e];
auto xOffset = shape::getIndexOffset(numOfElemsToReverse / 2, inputTadShape);
auto zOffset = shape::getIndexOffset(numOfElemsToReverse / 2, outputTadShape);
tadOutput[zOffset] = tadInput[xOffset];
}
}
}
template <typename T>
static __global__ void reverseArrayKernel(void* input, Nd4jLong *inputShape, void* output, Nd4jLong *outputShape, Nd4jLong numOfElemsToReverse) {
const auto tid = blockIdx.x * blockDim.x + threadIdx.x;
const auto step = gridDim.x * blockDim.x;
__shared__ int linearStatus;
__shared__ T* inputArr;
__shared__ T* outputArr;
__shared__ char inputOrder, outputOrder;
if (threadIdx.x == 0) {
linearStatus = (shape::elementWiseStride(inputShape) == shape::elementWiseStride(outputShape)) && (inputOrder == outputOrder)? shape::elementWiseStride(inputShape):0;
char inputOrder = shape::order(inputShape);
char outputOrder = shape::order(outputShape);
inputArr = reinterpret_cast<T*>(input);
outputArr = reinterpret_cast<T*>(output);
}
__syncthreads();
auto odd = numOfElemsToReverse % 2 != 0;
auto limit = numOfElemsToReverse / 2;
for (uint64_t e = tid; e < limit; e += step) {
// we're calculating offsets within input array
auto fOffset = shape::getIndexOffset(e, inputShape);
auto lOffset = shape::getIndexOffset(numOfElemsToReverse - e - 1, inputShape);
// now we're storing input values
auto v1 = inputArr[fOffset];
auto v2 = inputArr[lOffset];
// now we're calculating offsets within output array
auto zfOffset = shape::getIndexOffset(e, outputShape);
auto zlOffset = shape::getIndexOffset(numOfElemsToReverse - e - 1, outputShape);
// and saving values to output arrays
outputArr[zfOffset] = v2;
outputArr[zlOffset] = v1;
}
// in case of odd array we'll have to move middle value
if (odd && tid == 0) {
auto xOffset = shape::getIndexOffset(limit, inputShape);
auto zOffset = shape::getIndexOffset(limit, outputShape);
outputArr[zOffset] = inputArr[xOffset];
}
}
template<typename T>
static void reverseTad(nd4j::LaunchContext * context, const NDArray* input, NDArray* output, Nd4jLong *inputTadShape, Nd4jLong *inputTadOffsets, Nd4jLong *outputTadShape, Nd4jLong *outputTadOffsets, uint64_t tadLength) {
auto stream = context->getCudaStream();
reverseTadKernel<T><<<256, 512, 8192, *stream>>>(input->getSpecialBuffer(), input->getSpecialShapeInfo(), output->specialBuffer(), output->specialShapeInfo(), inputTadShape, inputTadOffsets, outputTadShape, outputTadOffsets, input->lengthOf(), tadLength, input->lengthOf() / tadLength);
}
template<typename T>
static void reverseArray(nd4j::LaunchContext * context, const NDArray* input, NDArray* output, Nd4jLong numOfElemsToReverse) {
auto stream = context->getCudaStream();
Nd4jLong numOfReverse = numOfElemsToReverse;
if (numOfElemsToReverse == 0)
numOfReverse = input->lengthOf();
reverseArrayKernel<T><<<256, 512, 8192, *stream>>>(input->getSpecialBuffer(), input->getSpecialShapeInfo(), output->specialBuffer(), output->specialShapeInfo(), numOfReverse);
}
///////////////////////////////////////////////////////////////////
template <typename T>
static void reverseSequence_(nd4j::LaunchContext * context, const NDArray* input, const NDArray* seqLengths, NDArray* output, int seqDim, const int batchDim){
int posOfNonUnityDim = -1;
seqLengths->syncToHost();
auto stream = context->getCudaStream();
if(input->isVector() || shape::isLikeVector(input->getShapeInfo(), posOfNonUnityDim) || seqLengths->lengthOf() == 1) {
int numOfElemsToReverse = seqLengths->e<int>(0);
if((seqDim == 0 && input->sizeAt(0) == 1) || (batchDim == posOfNonUnityDim))
output->assign(input);
else
reverseArrayKernel<T><<<256, 512, 8192, *stream>>>(input->getSpecialBuffer(), input->getSpecialShapeInfo(), output->specialBuffer(), output->specialShapeInfo(), numOfElemsToReverse);//helpers::reverseArray<T>(context, const_cast<NDArray*>(input), output, numOfElemsToReverse);
}
else {
if(seqDim > batchDim)
--seqDim;
std::vector<int> dimensions = ShapeUtils::evalDimsToExclude(input->rankOf(), {batchDim});
auto inSubArrsSet = input->allTensorsAlongDimension(dimensions);
auto outSubArrsSet = output->allTensorsAlongDimension(dimensions);
for(int i = 0; i < inSubArrsSet->size(); ++i) {
int numOfElemsToReverse = seqLengths->e<int>(i);
if(numOfElemsToReverse == 0 || numOfElemsToReverse == 1) {
outSubArrsSet->at(i)->assign(inSubArrsSet->at(i));
}
else {
auto inInnerSet = inSubArrsSet->at(i)->allTensorsAlongDimension({seqDim});
auto outInnerSet = outSubArrsSet->at(i)->allTensorsAlongDimension({seqDim});
for(int j = 0; j < inInnerSet->size(); ++j)
reverseArray<T>(context, inInnerSet->at(j), outInnerSet->at(j), numOfElemsToReverse);
delete inInnerSet;
delete outInnerSet;
}
}
delete inSubArrsSet;
delete outSubArrsSet;
}
}
void reverseSequence(nd4j::LaunchContext * context, const NDArray* input, const NDArray* seqLengths, NDArray* output, int seqDim, const int batchDim) {
NDArray::prepareSpecialUse({output}, {input, seqLengths});
// if op isn't inplace - copy original data into output array
if (output->getSpecialBuffer() != input->getSpecialBuffer())
output->assign(input);
BUILD_SINGLE_SELECTOR(input->dataType(), reverseSequence_, (context, input, seqLengths, output, seqDim, batchDim), LIBND4J_TYPES);
NDArray::registerSpecialUse({output}, {input, seqLengths});
}
//////////////////////////////////////////////////////////////////////////
void reverse(nd4j::LaunchContext * context, const NDArray* input, NDArray* output, const std::vector<int>* intArgs, bool isBackProp) {
// we need to reverse axis only if that's new op
std::vector<int> dimensions = isBackProp ? ShapeUtils::evalDimsToExclude(input->rankOf(), *intArgs) : *intArgs;
std::vector<int> axis = ShapeUtils::evalDimsToExclude(input->rankOf(), dimensions);
auto packX = nd4j::ConstantTadHelper::getInstance()->tadForDimensions(input->getShapeInfo(), dimensions);
auto packZ = nd4j::ConstantTadHelper::getInstance()->tadForDimensions(output->getShapeInfo(), dimensions);
NDArray::prepareSpecialUse({output}, {input});
if (packX.numberOfTads() == 1) {
BUILD_SINGLE_SELECTOR(input->dataType(), reverseArray, (context, input, output, 0), LIBND4J_TYPES);
} else {
BUILD_SINGLE_SELECTOR(input->dataType(), reverseTad, (context, input, output, packX.platformShapeInfo(), packX.platformOffsets(), packZ.platformShapeInfo(), packZ.platformOffsets(), (uint64_t) (input->lengthOf() / packX.numberOfTads())), LIBND4J_TYPES);
}
NDArray::registerSpecialUse({output}, {input});
}
BUILD_SINGLE_TEMPLATE(template void reverseArray, (nd4j::LaunchContext * context, const NDArray *inArr, NDArray *outArr, Nd4jLong numOfElemsToReverse), LIBND4J_TYPES);
}
}
}
|
004494aef4f2cfc33b08a038212f8c3dfdf6037f.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Copyright (c) 2009-2019 The Regents of the University of Michigan
// This file is part of the HOOMD-blue project, released under the BSD 3-Clause License.
// Maintainer: mphoward
/*!
* \file mpcd/ATCollisionMethodGPU.cu
* \brief Defines GPU functions and kernels used by mpcd::ATCollisionMethodGPU
*/
#include "ATCollisionMethodGPU.cuh"
#include "ParticleDataUtilities.h"
#include "hoomd/RandomNumbers.h"
#include "hoomd/RNGIdentifiers.h"
namespace mpcd
{
namespace gpu
{
namespace kernel
{
__global__ void at_draw_velocity(Scalar4 *d_alt_vel,
Scalar4 *d_alt_vel_embed,
const unsigned int *d_tag,
const Scalar mpcd_mass,
const unsigned int *d_embed_idx,
const Scalar4 *d_vel_embed,
const unsigned int *d_tag_embed,
const unsigned int timestep,
const unsigned int seed,
const Scalar T,
const unsigned int N_mpcd,
const unsigned int N_tot)
{
// one thread per particle
unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx >= N_tot)
return;
unsigned int pidx;
unsigned int tag; Scalar mass;
if (idx < N_mpcd)
{
pidx = idx;
mass = mpcd_mass;
tag = d_tag[idx];
}
else
{
pidx = d_embed_idx[idx-N_mpcd];
mass = d_vel_embed[pidx].w;
tag = d_tag_embed[pidx];
}
// draw random velocities from normal distribution
hoomd::RandomGenerator rng(hoomd::RNGIdentifier::ATCollisionMethod, seed, tag, timestep);
hoomd::NormalDistribution<Scalar> gen(fast::sqrt(T/mass), 0.0);
Scalar3 vel;
gen(vel.x, vel.y, rng);
vel.z = gen(rng);
// save out velocities
if (idx < N_mpcd)
{
d_alt_vel[pidx] = make_scalar4(vel.x, vel.y, vel.z, __int_as_scalar(mpcd::detail::NO_CELL));
}
else
{
d_alt_vel_embed[pidx] = make_scalar4(vel.x, vel.y, vel.z, mass);
}
}
__global__ void at_apply_velocity(Scalar4 *d_vel,
Scalar4 *d_vel_embed,
const Scalar4 *d_vel_alt,
const unsigned int *d_embed_idx,
const Scalar4 *d_vel_alt_embed,
const unsigned int *d_embed_cell_ids,
const double4 *d_cell_vel,
const double4 *d_rand_vel,
const unsigned int N_mpcd,
const unsigned int N_tot)
{
// one thread per particle
unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx >= N_tot)
return;
unsigned int cell, pidx;
Scalar4 vel_rand;
if (idx < N_mpcd)
{
pidx = idx;
const Scalar4 vel_cell = d_vel[idx];
cell = __scalar_as_int(vel_cell.w);
vel_rand = d_vel_alt[idx];
}
else
{
pidx = d_embed_idx[idx-N_mpcd];
cell = d_embed_cell_ids[idx-N_mpcd];
vel_rand = d_vel_alt_embed[pidx];
}
// load cell data
const double4 v_c = d_cell_vel[cell];
const double4 vrand_c = d_rand_vel[cell];
// compute new velocity using the cell + the random draw
const Scalar3 vnew = make_scalar3(v_c.x - vrand_c.x + vel_rand.x,
v_c.y - vrand_c.y + vel_rand.y,
v_c.z - vrand_c.z + vel_rand.z);
if (idx < N_mpcd)
{
d_vel[pidx] = make_scalar4(vnew.x, vnew.y, vnew.z, __int_as_scalar(cell));
}
else
{
d_vel_embed[pidx] = make_scalar4(vnew.x, vnew.y, vnew.z, vel_rand.w);
}
}
} // end namespace kernel
hipError_t at_draw_velocity(Scalar4 *d_alt_vel,
Scalar4 *d_alt_vel_embed,
const unsigned int *d_tag,
const Scalar mpcd_mass,
const unsigned int *d_embed_idx,
const Scalar4 *d_vel_embed,
const unsigned int *d_tag_embed,
const unsigned int timestep,
const unsigned int seed,
const Scalar T,
const unsigned int N_mpcd,
const unsigned int N_tot,
const unsigned int block_size)
{
static unsigned int max_block_size = UINT_MAX;
if (max_block_size == UINT_MAX)
{
hipFuncAttributes attr;
hipFuncGetAttributes(&attr, (const void*)mpcd::gpu::kernel::at_draw_velocity);
max_block_size = attr.maxThreadsPerBlock;
}
unsigned int run_block_size = min(block_size, max_block_size);
dim3 grid(N_tot / run_block_size + 1);
hipLaunchKernelGGL(( mpcd::gpu::kernel::at_draw_velocity), dim3(grid), dim3(run_block_size), 0, 0, d_alt_vel,
d_alt_vel_embed,
d_tag,
mpcd_mass,
d_embed_idx,
d_vel_embed,
d_tag_embed,
timestep,
seed,
T,
N_mpcd,
N_tot);
return hipSuccess;
}
hipError_t at_apply_velocity(Scalar4 *d_vel,
Scalar4 *d_vel_embed,
const Scalar4 *d_vel_alt,
const unsigned int *d_embed_idx,
const Scalar4 *d_vel_alt_embed,
const unsigned int *d_embed_cell_ids,
const double4 *d_cell_vel,
const double4 *d_rand_vel,
const unsigned int N_mpcd,
const unsigned int N_tot,
const unsigned int block_size)
{
static unsigned int max_block_size = UINT_MAX;
if (max_block_size == UINT_MAX)
{
hipFuncAttributes attr;
hipFuncGetAttributes(&attr, (const void*)mpcd::gpu::kernel::at_apply_velocity);
max_block_size = attr.maxThreadsPerBlock;
}
unsigned int run_block_size = min(block_size, max_block_size);
dim3 grid(N_tot / run_block_size + 1);
hipLaunchKernelGGL(( mpcd::gpu::kernel::at_apply_velocity), dim3(grid), dim3(run_block_size), 0, 0, d_vel,
d_vel_embed,
d_vel_alt,
d_embed_idx,
d_vel_alt_embed,
d_embed_cell_ids,
d_cell_vel,
d_rand_vel,
N_mpcd,
N_tot);
return hipSuccess;
}
} // end namespace gpu
} // end namespace mpcd
| 004494aef4f2cfc33b08a038212f8c3dfdf6037f.cu | // Copyright (c) 2009-2019 The Regents of the University of Michigan
// This file is part of the HOOMD-blue project, released under the BSD 3-Clause License.
// Maintainer: mphoward
/*!
* \file mpcd/ATCollisionMethodGPU.cu
* \brief Defines GPU functions and kernels used by mpcd::ATCollisionMethodGPU
*/
#include "ATCollisionMethodGPU.cuh"
#include "ParticleDataUtilities.h"
#include "hoomd/RandomNumbers.h"
#include "hoomd/RNGIdentifiers.h"
namespace mpcd
{
namespace gpu
{
namespace kernel
{
__global__ void at_draw_velocity(Scalar4 *d_alt_vel,
Scalar4 *d_alt_vel_embed,
const unsigned int *d_tag,
const Scalar mpcd_mass,
const unsigned int *d_embed_idx,
const Scalar4 *d_vel_embed,
const unsigned int *d_tag_embed,
const unsigned int timestep,
const unsigned int seed,
const Scalar T,
const unsigned int N_mpcd,
const unsigned int N_tot)
{
// one thread per particle
unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx >= N_tot)
return;
unsigned int pidx;
unsigned int tag; Scalar mass;
if (idx < N_mpcd)
{
pidx = idx;
mass = mpcd_mass;
tag = d_tag[idx];
}
else
{
pidx = d_embed_idx[idx-N_mpcd];
mass = d_vel_embed[pidx].w;
tag = d_tag_embed[pidx];
}
// draw random velocities from normal distribution
hoomd::RandomGenerator rng(hoomd::RNGIdentifier::ATCollisionMethod, seed, tag, timestep);
hoomd::NormalDistribution<Scalar> gen(fast::sqrt(T/mass), 0.0);
Scalar3 vel;
gen(vel.x, vel.y, rng);
vel.z = gen(rng);
// save out velocities
if (idx < N_mpcd)
{
d_alt_vel[pidx] = make_scalar4(vel.x, vel.y, vel.z, __int_as_scalar(mpcd::detail::NO_CELL));
}
else
{
d_alt_vel_embed[pidx] = make_scalar4(vel.x, vel.y, vel.z, mass);
}
}
__global__ void at_apply_velocity(Scalar4 *d_vel,
Scalar4 *d_vel_embed,
const Scalar4 *d_vel_alt,
const unsigned int *d_embed_idx,
const Scalar4 *d_vel_alt_embed,
const unsigned int *d_embed_cell_ids,
const double4 *d_cell_vel,
const double4 *d_rand_vel,
const unsigned int N_mpcd,
const unsigned int N_tot)
{
// one thread per particle
unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx >= N_tot)
return;
unsigned int cell, pidx;
Scalar4 vel_rand;
if (idx < N_mpcd)
{
pidx = idx;
const Scalar4 vel_cell = d_vel[idx];
cell = __scalar_as_int(vel_cell.w);
vel_rand = d_vel_alt[idx];
}
else
{
pidx = d_embed_idx[idx-N_mpcd];
cell = d_embed_cell_ids[idx-N_mpcd];
vel_rand = d_vel_alt_embed[pidx];
}
// load cell data
const double4 v_c = d_cell_vel[cell];
const double4 vrand_c = d_rand_vel[cell];
// compute new velocity using the cell + the random draw
const Scalar3 vnew = make_scalar3(v_c.x - vrand_c.x + vel_rand.x,
v_c.y - vrand_c.y + vel_rand.y,
v_c.z - vrand_c.z + vel_rand.z);
if (idx < N_mpcd)
{
d_vel[pidx] = make_scalar4(vnew.x, vnew.y, vnew.z, __int_as_scalar(cell));
}
else
{
d_vel_embed[pidx] = make_scalar4(vnew.x, vnew.y, vnew.z, vel_rand.w);
}
}
} // end namespace kernel
cudaError_t at_draw_velocity(Scalar4 *d_alt_vel,
Scalar4 *d_alt_vel_embed,
const unsigned int *d_tag,
const Scalar mpcd_mass,
const unsigned int *d_embed_idx,
const Scalar4 *d_vel_embed,
const unsigned int *d_tag_embed,
const unsigned int timestep,
const unsigned int seed,
const Scalar T,
const unsigned int N_mpcd,
const unsigned int N_tot,
const unsigned int block_size)
{
static unsigned int max_block_size = UINT_MAX;
if (max_block_size == UINT_MAX)
{
cudaFuncAttributes attr;
cudaFuncGetAttributes(&attr, (const void*)mpcd::gpu::kernel::at_draw_velocity);
max_block_size = attr.maxThreadsPerBlock;
}
unsigned int run_block_size = min(block_size, max_block_size);
dim3 grid(N_tot / run_block_size + 1);
mpcd::gpu::kernel::at_draw_velocity<<<grid, run_block_size>>>(d_alt_vel,
d_alt_vel_embed,
d_tag,
mpcd_mass,
d_embed_idx,
d_vel_embed,
d_tag_embed,
timestep,
seed,
T,
N_mpcd,
N_tot);
return cudaSuccess;
}
cudaError_t at_apply_velocity(Scalar4 *d_vel,
Scalar4 *d_vel_embed,
const Scalar4 *d_vel_alt,
const unsigned int *d_embed_idx,
const Scalar4 *d_vel_alt_embed,
const unsigned int *d_embed_cell_ids,
const double4 *d_cell_vel,
const double4 *d_rand_vel,
const unsigned int N_mpcd,
const unsigned int N_tot,
const unsigned int block_size)
{
static unsigned int max_block_size = UINT_MAX;
if (max_block_size == UINT_MAX)
{
cudaFuncAttributes attr;
cudaFuncGetAttributes(&attr, (const void*)mpcd::gpu::kernel::at_apply_velocity);
max_block_size = attr.maxThreadsPerBlock;
}
unsigned int run_block_size = min(block_size, max_block_size);
dim3 grid(N_tot / run_block_size + 1);
mpcd::gpu::kernel::at_apply_velocity<<<grid, run_block_size>>>(d_vel,
d_vel_embed,
d_vel_alt,
d_embed_idx,
d_vel_alt_embed,
d_embed_cell_ids,
d_cell_vel,
d_rand_vel,
N_mpcd,
N_tot);
return cudaSuccess;
}
} // end namespace gpu
} // end namespace mpcd
|
ae573ded099d1ab61c3f69f570e46685b6c52d1d.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/******************************************************************************
* Copyright (c) 2011, Duane Merrill. All rights reserved.
* Copyright (c) 2011-2016, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of the NVIDIA CORPORATION nor the
* names of its contributors may be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
******************************************************************************/
/******************************************************************************
* Test of DeviceSelect::If and DevicePartition::If utilities
******************************************************************************/
// Ensure printing of CUDA runtime errors to console
#define CUB_STDERR
#include <stdio.h>
#include <typeinfo>
#include <hipcub/hipcub.hpp>
#include <hipcub/hipcub.hpp>
#include <cub/device/device_partition.cuh>
#include <cub/iterator/counting_input_iterator.cuh>
#include <thrust/device_ptr.h>
#include <thrust/copy.h>
#include <thrust/partition.h>
#include <thrust/iterator/reverse_iterator.h>
#include "test_util.h"
using namespace cub;
//---------------------------------------------------------------------
// Globals, constants and typedefs
//---------------------------------------------------------------------
bool g_verbose = false;
int g_timing_iterations = 0;
int g_repeat = 0;
float g_device_giga_bandwidth;
CachingDeviceAllocator g_allocator(true);
// Dispatch types
enum Backend
{
CUB, // CUB method
THRUST, // Thrust method
CDP, // GPU-based (dynamic parallelism) dispatch to CUB method
};
// Selection functor type
template <typename T>
struct LessThan
{
T compare;
__host__ __device__ __forceinline__
LessThan(T compare) : compare(compare) {}
__host__ __device__ __forceinline__
bool operator()(const T &a) const {
return (a < compare);
}
};
//---------------------------------------------------------------------
// Dispatch to different CUB DeviceSelect entrypoints
//---------------------------------------------------------------------
/**
* Dispatch to select if entrypoint
*/
template <typename InputIteratorT, typename FlagIteratorT, typename SelectOpT, typename OutputIteratorT, typename NumSelectedIteratorT, typename OffsetT>
CUB_RUNTIME_FUNCTION __forceinline__
hipError_t Dispatch(
Int2Type<CUB> dispatch_to,
Int2Type<false> is_flagged,
Int2Type<false> is_partition,
int timing_timing_iterations,
size_t* d_temp_storage_bytes,
hipError_t* d_cdp_error,
void* d_temp_storage,
size_t& temp_storage_bytes,
InputIteratorT d_in,
FlagIteratorT d_flags,
OutputIteratorT d_out,
NumSelectedIteratorT d_num_selected_out,
OffsetT num_items,
SelectOpT select_op,
hipStream_t stream,
bool debug_synchronous)
{
hipError_t error = hipSuccess;
for (int i = 0; i < timing_timing_iterations; ++i)
{
error = DeviceSelect::If(d_temp_storage, temp_storage_bytes, d_in, d_out, d_num_selected_out, num_items, select_op, stream, debug_synchronous);
}
return error;
}
/**
* Dispatch to partition if entrypoint
*/
template <typename InputIteratorT, typename FlagIteratorT, typename SelectOpT, typename OutputIteratorT, typename NumSelectedIteratorT, typename OffsetT>
CUB_RUNTIME_FUNCTION __forceinline__
hipError_t Dispatch(
Int2Type<CUB> dispatch_to,
Int2Type<false> is_flagged,
Int2Type<true> is_partition,
int timing_timing_iterations,
size_t* d_temp_storage_bytes,
hipError_t* d_cdp_error,
void* d_temp_storage,
size_t& temp_storage_bytes,
InputIteratorT d_in,
FlagIteratorT d_flags,
OutputIteratorT d_out,
NumSelectedIteratorT d_num_selected_out,
OffsetT num_items,
SelectOpT select_op,
hipStream_t stream,
bool debug_synchronous)
{
hipError_t error = hipSuccess;
for (int i = 0; i < timing_timing_iterations; ++i)
{
error = DevicePartition::If(d_temp_storage, temp_storage_bytes, d_in, d_out, d_num_selected_out, num_items, select_op, stream, debug_synchronous);
}
return error;
}
/**
* Dispatch to select flagged entrypoint
*/
template <typename InputIteratorT, typename FlagIteratorT, typename SelectOpT, typename OutputIteratorT, typename NumSelectedIteratorT, typename OffsetT>
CUB_RUNTIME_FUNCTION __forceinline__
hipError_t Dispatch(
Int2Type<CUB> dispatch_to,
Int2Type<true> is_flagged,
Int2Type<false> partition,
int timing_timing_iterations,
size_t* d_temp_storage_bytes,
hipError_t* d_cdp_error,
void* d_temp_storage,
size_t& temp_storage_bytes,
InputIteratorT d_in,
FlagIteratorT d_flags,
OutputIteratorT d_out,
NumSelectedIteratorT d_num_selected_out,
OffsetT num_items,
SelectOpT select_op,
hipStream_t stream,
bool debug_synchronous)
{
hipError_t error = hipSuccess;
for (int i = 0; i < timing_timing_iterations; ++i)
{
error = DeviceSelect::Flagged(d_temp_storage, temp_storage_bytes, d_in, d_flags, d_out, d_num_selected_out, num_items, stream, debug_synchronous);
}
return error;
}
/**
* Dispatch to partition flagged entrypoint
*/
template <typename InputIteratorT, typename FlagIteratorT, typename SelectOpT, typename OutputIteratorT, typename NumSelectedIteratorT, typename OffsetT>
CUB_RUNTIME_FUNCTION __forceinline__
hipError_t Dispatch(
Int2Type<CUB> dispatch_to,
Int2Type<true> is_flagged,
Int2Type<true> partition,
int timing_timing_iterations,
size_t* d_temp_storage_bytes,
hipError_t* d_cdp_error,
void* d_temp_storage,
size_t& temp_storage_bytes,
InputIteratorT d_in,
FlagIteratorT d_flags,
OutputIteratorT d_out,
NumSelectedIteratorT d_num_selected_out,
OffsetT num_items,
SelectOpT select_op,
hipStream_t stream,
bool debug_synchronous)
{
hipError_t error = hipSuccess;
for (int i = 0; i < timing_timing_iterations; ++i)
{
error = DevicePartition::Flagged(d_temp_storage, temp_storage_bytes, d_in, d_flags, d_out, d_num_selected_out, num_items, stream, debug_synchronous);
}
return error;
}
//---------------------------------------------------------------------
// Dispatch to different Thrust entrypoints
//---------------------------------------------------------------------
/**
* Dispatch to select if entrypoint
*/
template <typename InputIteratorT, typename FlagIteratorT, typename SelectOpT, typename OutputIteratorT, typename NumSelectedIteratorT, typename OffsetT>
__host__ __forceinline__
hipError_t Dispatch(
Int2Type<THRUST> dispatch_to,
Int2Type<false> is_flagged,
Int2Type<false> is_partition,
int timing_timing_iterations,
size_t* d_temp_storage_bytes,
hipError_t* d_cdp_error,
void* d_temp_storage,
size_t& temp_storage_bytes,
InputIteratorT d_in,
FlagIteratorT d_flags,
OutputIteratorT d_out,
NumSelectedIteratorT d_num_selected_out,
OffsetT num_items,
SelectOpT select_op,
hipStream_t stream,
bool debug_synchronous)
{
// The input value type
typedef typename std::iterator_traits<InputIteratorT>::value_type InputT;
// The output value type
typedef typename If<(Equals<typename std::iterator_traits<OutputIteratorT>::value_type, void>::VALUE), // OutputT = (if output iterator's value type is void) ?
typename std::iterator_traits<InputIteratorT>::value_type, // ... then the input iterator's value type,
typename std::iterator_traits<OutputIteratorT>::value_type>::Type OutputT; // ... else the output iterator's value type
if (d_temp_storage == 0)
{
temp_storage_bytes = 1;
}
else
{
thrust::device_ptr<OutputT> d_out_wrapper_end;
thrust::device_ptr<InputT> d_in_wrapper(d_in);
thrust::device_ptr<OutputT> d_out_wrapper(d_out);
for (int i = 0; i < timing_timing_iterations; ++i)
{
d_out_wrapper_end = thrust::copy_if(d_in_wrapper, d_in_wrapper + num_items, d_out_wrapper, select_op);
}
OffsetT num_selected = d_out_wrapper_end - d_out_wrapper;
CubDebugExit(hipMemcpy(d_num_selected_out, &num_selected, sizeof(OffsetT), hipMemcpyHostToDevice));
}
return hipSuccess;
}
/**
* Dispatch to partition if entrypoint
*/
template <typename InputIteratorT, typename FlagIteratorT, typename SelectOpT, typename OutputIteratorT, typename NumSelectedIteratorT, typename OffsetT>
__host__ __forceinline__
hipError_t Dispatch(
Int2Type<THRUST> dispatch_to,
Int2Type<false> is_flagged,
Int2Type<true> is_partition,
int timing_timing_iterations,
size_t* d_temp_storage_bytes,
hipError_t* d_cdp_error,
void* d_temp_storage,
size_t& temp_storage_bytes,
InputIteratorT d_in,
FlagIteratorT d_flags,
OutputIteratorT d_out,
NumSelectedIteratorT d_num_selected_out,
OffsetT num_items,
SelectOpT select_op,
hipStream_t stream,
bool debug_synchronous)
{
// The input value type
typedef typename std::iterator_traits<InputIteratorT>::value_type InputT;
// The output value type
typedef typename If<(Equals<typename std::iterator_traits<OutputIteratorT>::value_type, void>::VALUE), // OutputT = (if output iterator's value type is void) ?
typename std::iterator_traits<InputIteratorT>::value_type, // ... then the input iterator's value type,
typename std::iterator_traits<OutputIteratorT>::value_type>::Type OutputT; // ... else the output iterator's value type
typedef thrust::reverse_iterator<thrust::device_ptr<OutputT> > ReverseOutputIteratorT;
if (d_temp_storage == 0)
{
temp_storage_bytes = 1;
}
else
{
thrust::pair<thrust::device_ptr<OutputT>, ReverseOutputIteratorT> d_out_wrapper_end;
thrust::device_ptr<InputT> d_in_wrapper(d_in);
thrust::device_ptr<OutputT> d_out_wrapper(d_out);
ReverseOutputIteratorT d_out_unselected(d_out_wrapper + num_items);
for (int i = 0; i < timing_timing_iterations; ++i)
{
d_out_wrapper_end = thrust::partition_copy(
d_in_wrapper,
d_in_wrapper + num_items,
d_out_wrapper,
d_out_unselected,
select_op);
}
OffsetT num_selected = d_out_wrapper_end.first - d_out_wrapper;
CubDebugExit(hipMemcpy(d_num_selected_out, &num_selected, sizeof(OffsetT), hipMemcpyHostToDevice));
}
return hipSuccess;
}
/**
* Dispatch to select flagged entrypoint
*/
template <typename InputIteratorT, typename FlagIteratorT, typename SelectOpT, typename OutputIteratorT, typename NumSelectedIteratorT, typename OffsetT>
__host__ __forceinline__
hipError_t Dispatch(
Int2Type<THRUST> dispatch_to,
Int2Type<true> is_flagged,
Int2Type<false> is_partition,
int timing_timing_iterations,
size_t* d_temp_storage_bytes,
hipError_t* d_cdp_error,
void* d_temp_storage,
size_t& temp_storage_bytes,
InputIteratorT d_in,
FlagIteratorT d_flags,
OutputIteratorT d_out,
NumSelectedIteratorT d_num_selected_out,
OffsetT num_items,
SelectOpT select_op,
hipStream_t stream,
bool debug_synchronous)
{
// The flag type
typedef typename std::iterator_traits<FlagIteratorT>::value_type FlagT;
// The input value type
typedef typename std::iterator_traits<InputIteratorT>::value_type InputT;
// The output value type
typedef typename If<(Equals<typename std::iterator_traits<OutputIteratorT>::value_type, void>::VALUE), // OutputT = (if output iterator's value type is void) ?
typename std::iterator_traits<InputIteratorT>::value_type, // ... then the input iterator's value type,
typename std::iterator_traits<OutputIteratorT>::value_type>::Type OutputT; // ... else the output iterator's value type
if (d_temp_storage == 0)
{
temp_storage_bytes = 1;
}
else
{
thrust::device_ptr<OutputT> d_out_wrapper_end;
thrust::device_ptr<InputT> d_in_wrapper(d_in);
thrust::device_ptr<OutputT> d_out_wrapper(d_out);
thrust::device_ptr<FlagT> d_flags_wrapper(d_flags);
for (int i = 0; i < timing_timing_iterations; ++i)
{
d_out_wrapper_end = thrust::copy_if(d_in_wrapper, d_in_wrapper + num_items, d_flags_wrapper, d_out_wrapper, Cast<bool>());
}
OffsetT num_selected = d_out_wrapper_end - d_out_wrapper;
CubDebugExit(hipMemcpy(d_num_selected_out, &num_selected, sizeof(OffsetT), hipMemcpyHostToDevice));
}
return hipSuccess;
}
/**
* Dispatch to partition flagged entrypoint
*/
template <typename InputIteratorT, typename FlagIteratorT, typename SelectOpT, typename OutputIteratorT, typename NumSelectedIteratorT, typename OffsetT>
__host__ __forceinline__
hipError_t Dispatch(
Int2Type<THRUST> dispatch_to,
Int2Type<true> is_flagged,
Int2Type<true> is_partition,
int timing_timing_iterations,
size_t* d_temp_storage_bytes,
hipError_t* d_cdp_error,
void* d_temp_storage,
size_t& temp_storage_bytes,
InputIteratorT d_in,
FlagIteratorT d_flags,
OutputIteratorT d_out,
NumSelectedIteratorT d_num_selected_out,
OffsetT num_items,
SelectOpT select_op,
hipStream_t stream,
bool debug_synchronous)
{
// The flag type
typedef typename std::iterator_traits<FlagIteratorT>::value_type FlagT;
// The input value type
typedef typename std::iterator_traits<InputIteratorT>::value_type InputT;
// The output value type
typedef typename If<(Equals<typename std::iterator_traits<OutputIteratorT>::value_type, void>::VALUE), // OutputT = (if output iterator's value type is void) ?
typename std::iterator_traits<InputIteratorT>::value_type, // ... then the input iterator's value type,
typename std::iterator_traits<OutputIteratorT>::value_type>::Type OutputT; // ... else the output iterator's value type
typedef thrust::reverse_iterator<thrust::device_ptr<OutputT> > ReverseOutputIteratorT;
if (d_temp_storage == 0)
{
temp_storage_bytes = 1;
}
else
{
thrust::pair<thrust::device_ptr<OutputT>, ReverseOutputIteratorT> d_out_wrapper_end;
thrust::device_ptr<InputT> d_in_wrapper(d_in);
thrust::device_ptr<OutputT> d_out_wrapper(d_out);
thrust::device_ptr<FlagT> d_flags_wrapper(d_flags);
ReverseOutputIteratorT d_out_unselected(d_out_wrapper + num_items);
for (int i = 0; i < timing_timing_iterations; ++i)
{
d_out_wrapper_end = thrust::partition_copy(
d_in_wrapper,
d_in_wrapper + num_items,
d_flags_wrapper,
d_out_wrapper,
d_out_unselected,
Cast<bool>());
}
OffsetT num_selected = d_out_wrapper_end.first - d_out_wrapper;
CubDebugExit(hipMemcpy(d_num_selected_out, &num_selected, sizeof(OffsetT), hipMemcpyHostToDevice));
}
return hipSuccess;
}
//---------------------------------------------------------------------
// CUDA Nested Parallelism Test Kernel
//---------------------------------------------------------------------
/**
* Simple wrapper kernel to invoke DeviceSelect
*/
template <typename InputIteratorT, typename FlagIteratorT, typename SelectOpT, typename OutputIteratorT, typename NumSelectedIteratorT, typename OffsetT, typename IsFlaggedTag, typename IsPartitionTag>
__global__ void CnpDispatchKernel(
IsFlaggedTag is_flagged,
IsPartitionTag is_partition,
int timing_timing_iterations,
size_t* d_temp_storage_bytes,
hipError_t* d_cdp_error,
void* d_temp_storage,
size_t temp_storage_bytes,
InputIteratorT d_in,
FlagIteratorT d_flags,
OutputIteratorT d_out,
NumSelectedIteratorT d_num_selected_out,
OffsetT num_items,
SelectOpT select_op,
bool debug_synchronous)
{
#ifndef CUB_CDP
*d_cdp_error = hipErrorNotSupported;
#else
*d_cdp_error = Dispatch(Int2Type<CUB>(), is_flagged, is_partition, timing_timing_iterations, d_temp_storage_bytes, d_cdp_error,
d_temp_storage, temp_storage_bytes, d_in, d_flags, d_out, d_num_selected_out, num_items, select_op, 0, debug_synchronous);
*d_temp_storage_bytes = temp_storage_bytes;
#endif
}
/**
* Dispatch to CDP kernel
*/
template <typename InputIteratorT, typename FlagIteratorT, typename SelectOpT, typename OutputIteratorT, typename NumSelectedIteratorT, typename OffsetT, typename IsFlaggedTag, typename IsPartitionTag>
hipError_t Dispatch(
Int2Type<CDP> dispatch_to,
IsFlaggedTag is_flagged,
IsPartitionTag is_partition,
int timing_timing_iterations,
size_t* d_temp_storage_bytes,
hipError_t* d_cdp_error,
void* d_temp_storage,
size_t& temp_storage_bytes,
InputIteratorT d_in,
FlagIteratorT d_flags,
OutputIteratorT d_out,
NumSelectedIteratorT d_num_selected_out,
OffsetT num_items,
SelectOpT select_op,
hipStream_t stream,
bool debug_synchronous)
{
// Invoke kernel to invoke device-side dispatch
hipLaunchKernelGGL(( CnpDispatchKernel), dim3(1),dim3(1), 0, 0, is_flagged, is_partition, timing_timing_iterations, d_temp_storage_bytes, d_cdp_error,
d_temp_storage, temp_storage_bytes, d_in, d_flags, d_out, d_num_selected_out, num_items, select_op, debug_synchronous);
// Copy out temp_storage_bytes
CubDebugExit(hipMemcpy(&temp_storage_bytes, d_temp_storage_bytes, sizeof(size_t) * 1, hipMemcpyDeviceToHost));
// Copy out error
hipError_t retval;
CubDebugExit(hipMemcpy(&retval, d_cdp_error, sizeof(hipError_t) * 1, hipMemcpyDeviceToHost));
return retval;
}
//---------------------------------------------------------------------
// Test generation
//---------------------------------------------------------------------
/**
* Initialize problem
*/
template <typename T>
void Initialize(
T* h_in,
int num_items)
{
for (int i = 0; i < num_items; ++i)
{
// Initialize each item to a randomly selected value from [0..126]
unsigned int value;
RandomBits(value, 0, 0, 7);
if (value == 127)
value = 126;
InitValue(INTEGER_SEED, h_in[i], value);
}
if (g_verbose)
{
printf("Input:\n");
DisplayResults(h_in, num_items);
printf("\n\n");
}
}
/**
* Solve selection problem (and set corresponding flags)
*/
template <
typename InputIteratorT,
typename FlagIteratorT,
typename SelectOpT,
typename T>
int Solve(
InputIteratorT h_in,
SelectOpT select_op,
T* h_reference,
FlagIteratorT h_flags,
int num_items)
{
int num_selected = 0;
for (int i = 0; i < num_items; ++i)
{
if ((h_flags[i] = select_op(h_in[i])))
{
h_reference[num_selected] = h_in[i];
num_selected++;
}
else
{
h_reference[num_items - (i - num_selected) - 1] = h_in[i];
}
}
return num_selected;
}
/**
* Test DeviceSelect for a given problem input
*/
template <
Backend BACKEND,
bool IS_FLAGGED,
bool IS_PARTITION,
typename DeviceInputIteratorT,
typename FlagT,
typename SelectOpT,
typename T>
void Test(
DeviceInputIteratorT d_in,
FlagT* h_flags,
SelectOpT select_op,
T* h_reference,
int num_selected,
int num_items)
{
// Allocate device flags, output, and num-selected
FlagT* d_flags = NULL;
T* d_out = NULL;
int* d_num_selected_out = NULL;
CubDebugExit(g_allocator.DeviceAllocate((void**)&d_flags, sizeof(FlagT) * num_items));
CubDebugExit(g_allocator.DeviceAllocate((void**)&d_out, sizeof(T) * num_items));
CubDebugExit(g_allocator.DeviceAllocate((void**)&d_num_selected_out, sizeof(int)));
// Allocate CDP device arrays
size_t* d_temp_storage_bytes = NULL;
hipError_t* d_cdp_error = NULL;
CubDebugExit(g_allocator.DeviceAllocate((void**)&d_temp_storage_bytes, sizeof(size_t) * 1));
CubDebugExit(g_allocator.DeviceAllocate((void**)&d_cdp_error, sizeof(hipError_t) * 1));
// Allocate temporary storage
void *d_temp_storage = NULL;
size_t temp_storage_bytes = 0;
CubDebugExit(Dispatch(Int2Type<BACKEND>(), Int2Type<IS_FLAGGED>(), Int2Type<IS_PARTITION>(), 1, d_temp_storage_bytes, d_cdp_error,
d_temp_storage, temp_storage_bytes, d_in, d_flags, d_out, d_num_selected_out, num_items, select_op, 0, true));
CubDebugExit(g_allocator.DeviceAllocate(&d_temp_storage, temp_storage_bytes));
// Copy flags and clear device output array
CubDebugExit(hipMemcpy(d_flags, h_flags, sizeof(FlagT) * num_items, hipMemcpyHostToDevice));
CubDebugExit(hipMemset(d_out, 0, sizeof(T) * num_items));
CubDebugExit(hipMemset(d_num_selected_out, 0, sizeof(int)));
// Run warmup/correctness iteration
CubDebugExit(Dispatch(Int2Type<BACKEND>(), Int2Type<IS_FLAGGED>(), Int2Type<IS_PARTITION>(), 1, d_temp_storage_bytes, d_cdp_error,
d_temp_storage, temp_storage_bytes, d_in, d_flags, d_out, d_num_selected_out, num_items, select_op, 0, true));
// Check for correctness (and display results, if specified)
int compare1 = (IS_PARTITION) ?
CompareDeviceResults(h_reference, d_out, num_items, true, g_verbose) :
CompareDeviceResults(h_reference, d_out, num_selected, true, g_verbose);
printf("\t Data %s\n", compare1 ? "FAIL" : "PASS");
int compare2 = CompareDeviceResults(&num_selected, d_num_selected_out, 1, true, g_verbose);
printf("\t Count %s\n", compare2 ? "FAIL" : "PASS");
// Flush any stdout/stderr
fflush(stdout);
fflush(stderr);
// Performance
GpuTimer gpu_timer;
gpu_timer.Start();
CubDebugExit(Dispatch(Int2Type<BACKEND>(), Int2Type<IS_FLAGGED>(), Int2Type<IS_PARTITION>(), g_timing_iterations, d_temp_storage_bytes, d_cdp_error,
d_temp_storage, temp_storage_bytes, d_in, d_flags, d_out, d_num_selected_out, num_items, select_op, 0, false));
gpu_timer.Stop();
float elapsed_millis = gpu_timer.ElapsedMillis();
// Display performance
if (g_timing_iterations > 0)
{
float avg_millis = elapsed_millis / g_timing_iterations;
float giga_rate = float(num_items) / avg_millis / 1000.0f / 1000.0f;
int num_output_items = (IS_PARTITION) ? num_items : num_selected;
int num_flag_items = (IS_FLAGGED) ? num_items : 0;
size_t num_bytes = sizeof(T) * (num_items + num_output_items) + sizeof(FlagT) * num_flag_items;
float giga_bandwidth = float(num_bytes) / avg_millis / 1000.0f / 1000.0f;
printf(", %.3f avg ms, %.3f billion items/s, %.3f logical GB/s, %.1f%% peak", avg_millis, giga_rate, giga_bandwidth, giga_bandwidth / g_device_giga_bandwidth * 100.0);
}
printf("\n\n");
// Flush any stdout/stderr
fflush(stdout);
fflush(stderr);
// Cleanup
if (d_flags) CubDebugExit(g_allocator.DeviceFree(d_flags));
if (d_out) CubDebugExit(g_allocator.DeviceFree(d_out));
if (d_num_selected_out) CubDebugExit(g_allocator.DeviceFree(d_num_selected_out));
if (d_temp_storage_bytes) CubDebugExit(g_allocator.DeviceFree(d_temp_storage_bytes));
if (d_cdp_error) CubDebugExit(g_allocator.DeviceFree(d_cdp_error));
if (d_temp_storage) CubDebugExit(g_allocator.DeviceFree(d_temp_storage));
// Correctness asserts
AssertEquals(0, compare1 | compare2);
}
/**
* Test on pointer type
*/
template <
Backend BACKEND,
bool IS_FLAGGED,
bool IS_PARTITION,
typename T>
void TestPointer(
int num_items,
float select_ratio)
{
typedef char FlagT;
// Allocate host arrays
T* h_in = new T[num_items];
FlagT* h_flags = new FlagT[num_items];
T* h_reference = new T[num_items];
// Initialize input
Initialize(h_in, num_items);
// Select a comparison value that is select_ratio through the space of [0,127]
T compare;
if (select_ratio <= 0.0)
InitValue(INTEGER_SEED, compare, 0); // select none
else if (select_ratio >= 1.0)
InitValue(INTEGER_SEED, compare, 127); // select all
else
InitValue(INTEGER_SEED, compare, int(double(double(127) * select_ratio)));
LessThan<T> select_op(compare);
int num_selected = Solve(h_in, select_op, h_reference, h_flags, num_items);
if (g_verbose) std::cout << "\nComparison item: " << compare << "\n";
printf("\nPointer %s hipcub::%s::%s %d items, %d selected (select ratio %.3f), %s %d-byte elements\n",
(IS_PARTITION) ? "DevicePartition" : "DeviceSelect",
(IS_FLAGGED) ? "Flagged" : "If",
(BACKEND == CDP) ? "CDP CUB" : (BACKEND == THRUST) ? "Thrust" : "CUB",
num_items, num_selected, float(num_selected) / num_items, typeid(T).name(), (int) sizeof(T));
fflush(stdout);
// Allocate problem device arrays
T *d_in = NULL;
CubDebugExit(g_allocator.DeviceAllocate((void**)&d_in, sizeof(T) * num_items));
// Initialize device input
CubDebugExit(hipMemcpy(d_in, h_in, sizeof(T) * num_items, hipMemcpyHostToDevice));
// Run Test
Test<BACKEND, IS_FLAGGED, IS_PARTITION>(d_in, h_flags, select_op, h_reference, num_selected, num_items);
// Cleanup
if (h_in) delete[] h_in;
if (h_reference) delete[] h_reference;
if (h_flags) delete[] h_flags;
if (d_in) CubDebugExit(g_allocator.DeviceFree(d_in));
}
/**
* Test on iterator type
*/
template <
Backend BACKEND,
bool IS_FLAGGED,
bool IS_PARTITION,
typename T>
void TestIterator(
int num_items,
float select_ratio)
{
typedef char FlagT;
// Allocate host arrays
T* h_reference = new T[num_items];
FlagT* h_flags = new FlagT[num_items];
// Use counting iterator as the input
CountingInputIterator<T, int> h_in(0);
// Select a comparison value that is select_ratio through the space of [0,127]
T compare;
if (select_ratio <= 0.0)
InitValue(INTEGER_SEED, compare, 0); // select none
else if (select_ratio >= 1.0)
InitValue(INTEGER_SEED, compare, 127); // select all
else
InitValue(INTEGER_SEED, compare, int(double(double(127) * select_ratio)));
LessThan<T> select_op(compare);
int num_selected = Solve(h_in, select_op, h_reference, h_flags, num_items);
if (g_verbose) std::cout << "\nComparison item: " << compare << "\n";
printf("\nIterator %s hipcub::%s::%s %d items, %d selected (select ratio %.3f), %s %d-byte elements\n",
(IS_PARTITION) ? "DevicePartition" : "DeviceSelect",
(IS_FLAGGED) ? "Flagged" : "If",
(BACKEND == CDP) ? "CDP CUB" : (BACKEND == THRUST) ? "Thrust" : "CUB",
num_items, num_selected, float(num_selected) / num_items, typeid(T).name(), (int) sizeof(T));
fflush(stdout);
// Run Test
Test<BACKEND, IS_FLAGGED, IS_PARTITION>(h_in, h_flags, select_op, h_reference, num_selected, num_items);
// Cleanup
if (h_reference) delete[] h_reference;
if (h_flags) delete[] h_flags;
}
/**
* Test different selection ratios
*/
template <
Backend BACKEND,
bool IS_FLAGGED,
bool IS_PARTITION,
typename T>
void Test(
int num_items)
{
for (float select_ratio = 0.0f; select_ratio <= 1.0f; select_ratio += 0.2f)
{
TestPointer<BACKEND, IS_FLAGGED, IS_PARTITION, T>(num_items, select_ratio);
}
}
/**
* Test (select vs. partition) and (flagged vs. functor)
*/
template <
Backend BACKEND,
typename T>
void TestMethod(
int num_items)
{
// Functor
Test<BACKEND, false, false, T>(num_items);
Test<BACKEND, false, true, T>(num_items);
// Flagged
Test<BACKEND, true, false, T>(num_items);
Test<BACKEND, true, true, T>(num_items);
}
/**
* Test different dispatch
*/
template <
typename T>
void TestOp(
int num_items)
{
TestMethod<CUB, T>(num_items);
#ifdef CUB_CDP
TestMethod<CDP, T>(num_items);
#endif
}
/**
* Test different input sizes
*/
template <typename T>
void Test(
int num_items)
{
if (num_items < 0)
{
TestOp<T>(0);
TestOp<T>(1);
TestOp<T>(100);
TestOp<T>(10000);
TestOp<T>(1000000);
}
else
{
TestOp<T>(num_items);
}
}
/**
* Test select/partition on pointer types
*/
template <typename T>
void ComparePointer(
int num_items,
float select_ratio)
{
printf("-- Select-if ----------------------------\n");
TestPointer<CUB, false, false, T>(num_items, select_ratio);
TestPointer<THRUST, false, false, T>(num_items, select_ratio);
printf("-- Partition-if ----------------------------\n");
TestPointer<CUB, false, true, T>(num_items, select_ratio);
TestPointer<THRUST, false, true, T>(num_items, select_ratio);
printf("-- Select-flagged ----------------------------\n");
TestPointer<CUB, true, false, T>(num_items, select_ratio);
TestPointer<THRUST, true, false, T>(num_items, select_ratio);
printf("-- Partition-flagged ----------------------------\n");
TestPointer<CUB, true, true, T>(num_items, select_ratio);
TestPointer<THRUST, true, true, T>(num_items, select_ratio);
}
//---------------------------------------------------------------------
// Main
//---------------------------------------------------------------------
/**
* Main
*/
int main(int argc, char** argv)
{
int num_items = -1;
float select_ratio = 0.5;
// Initialize command line
CommandLineArgs args(argc, argv);
g_verbose = args.CheckCmdLineFlag("v");
args.GetCmdLineArgument("n", num_items);
args.GetCmdLineArgument("i", g_timing_iterations);
args.GetCmdLineArgument("repeat", g_repeat);
args.GetCmdLineArgument("ratio", select_ratio);
// Print usage
if (args.CheckCmdLineFlag("help"))
{
printf("%s "
"[--n=<input items> "
"[--i=<timing iterations> "
"[--device=<device-id>] "
"[--ratio=<selection ratio, default 0.5>] "
"[--repeat=<repetitions of entire test suite>] "
"[--v] "
"[--cdp] "
"\n", argv[0]);
exit(0);
}
// Initialize device
CubDebugExit(args.DeviceInit());
g_device_giga_bandwidth = args.device_giga_bandwidth;
printf("\n");
#ifdef QUICKER_TEST
// Compile/run basic CUB test
if (num_items < 0) num_items = 32000000;
printf("-- Select-if ----------------------------\n");
TestPointer<CUB, false, false, int>(num_items, select_ratio);
printf("-- Partition-if ----------------------------\n");
TestPointer<CUB, false, true, int>(num_items, select_ratio);
printf("-- Select-flagged ----------------------------\n");
TestPointer<CUB, true, false, int>(num_items, select_ratio);
printf("-- Partition-flagged ----------------------------\n");
TestPointer<CUB, true, true, int>(num_items, select_ratio);
#elif defined(QUICK_TEST)
// Get device ordinal
int device_ordinal;
CubDebugExit(hipGetDevice(&device_ordinal));
// Get device SM version
int sm_version;
CubDebugExit(SmVersion(sm_version, device_ordinal));
// Compile/run quick tests
if (num_items < 0) num_items = 32000000;
printf("-- Iterator ----------------------------\n");
TestIterator<CUB, false, false, int>(num_items, select_ratio);
ComparePointer<char>( num_items * ((sm_version <= 130) ? 1 : 4), select_ratio);
ComparePointer<short>( num_items * ((sm_version <= 130) ? 1 : 2), select_ratio);
ComparePointer<int>( num_items, select_ratio);
ComparePointer<long long>( num_items / 2, select_ratio);
ComparePointer<TestFoo>( num_items / 4, select_ratio);
#else
// Compile/run thorough tests
for (int i = 0; i <= g_repeat; ++i)
{
// Test different input types
Test<unsigned char>(num_items);
Test<unsigned short>(num_items);
Test<unsigned int>(num_items);
Test<unsigned long long>(num_items);
Test<uchar2>(num_items);
Test<ushort2>(num_items);
Test<uint2>(num_items);
Test<ulonglong2>(num_items);
Test<uchar4>(num_items);
Test<ushort4>(num_items);
Test<uint4>(num_items);
Test<ulonglong4>(num_items);
Test<TestFoo>(num_items);
Test<TestBar>(num_items);
}
#endif
return 0;
}
| ae573ded099d1ab61c3f69f570e46685b6c52d1d.cu | /******************************************************************************
* Copyright (c) 2011, Duane Merrill. All rights reserved.
* Copyright (c) 2011-2016, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of the NVIDIA CORPORATION nor the
* names of its contributors may be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
******************************************************************************/
/******************************************************************************
* Test of DeviceSelect::If and DevicePartition::If utilities
******************************************************************************/
// Ensure printing of CUDA runtime errors to console
#define CUB_STDERR
#include <stdio.h>
#include <typeinfo>
#include <cub/util_allocator.cuh>
#include <cub/device/device_select.cuh>
#include <cub/device/device_partition.cuh>
#include <cub/iterator/counting_input_iterator.cuh>
#include <thrust/device_ptr.h>
#include <thrust/copy.h>
#include <thrust/partition.h>
#include <thrust/iterator/reverse_iterator.h>
#include "test_util.h"
using namespace cub;
//---------------------------------------------------------------------
// Globals, constants and typedefs
//---------------------------------------------------------------------
bool g_verbose = false;
int g_timing_iterations = 0;
int g_repeat = 0;
float g_device_giga_bandwidth;
CachingDeviceAllocator g_allocator(true);
// Dispatch types
enum Backend
{
CUB, // CUB method
THRUST, // Thrust method
CDP, // GPU-based (dynamic parallelism) dispatch to CUB method
};
// Selection functor type
template <typename T>
struct LessThan
{
T compare;
__host__ __device__ __forceinline__
LessThan(T compare) : compare(compare) {}
__host__ __device__ __forceinline__
bool operator()(const T &a) const {
return (a < compare);
}
};
//---------------------------------------------------------------------
// Dispatch to different CUB DeviceSelect entrypoints
//---------------------------------------------------------------------
/**
* Dispatch to select if entrypoint
*/
template <typename InputIteratorT, typename FlagIteratorT, typename SelectOpT, typename OutputIteratorT, typename NumSelectedIteratorT, typename OffsetT>
CUB_RUNTIME_FUNCTION __forceinline__
cudaError_t Dispatch(
Int2Type<CUB> dispatch_to,
Int2Type<false> is_flagged,
Int2Type<false> is_partition,
int timing_timing_iterations,
size_t* d_temp_storage_bytes,
cudaError_t* d_cdp_error,
void* d_temp_storage,
size_t& temp_storage_bytes,
InputIteratorT d_in,
FlagIteratorT d_flags,
OutputIteratorT d_out,
NumSelectedIteratorT d_num_selected_out,
OffsetT num_items,
SelectOpT select_op,
cudaStream_t stream,
bool debug_synchronous)
{
cudaError_t error = cudaSuccess;
for (int i = 0; i < timing_timing_iterations; ++i)
{
error = DeviceSelect::If(d_temp_storage, temp_storage_bytes, d_in, d_out, d_num_selected_out, num_items, select_op, stream, debug_synchronous);
}
return error;
}
/**
* Dispatch to partition if entrypoint
*/
template <typename InputIteratorT, typename FlagIteratorT, typename SelectOpT, typename OutputIteratorT, typename NumSelectedIteratorT, typename OffsetT>
CUB_RUNTIME_FUNCTION __forceinline__
cudaError_t Dispatch(
Int2Type<CUB> dispatch_to,
Int2Type<false> is_flagged,
Int2Type<true> is_partition,
int timing_timing_iterations,
size_t* d_temp_storage_bytes,
cudaError_t* d_cdp_error,
void* d_temp_storage,
size_t& temp_storage_bytes,
InputIteratorT d_in,
FlagIteratorT d_flags,
OutputIteratorT d_out,
NumSelectedIteratorT d_num_selected_out,
OffsetT num_items,
SelectOpT select_op,
cudaStream_t stream,
bool debug_synchronous)
{
cudaError_t error = cudaSuccess;
for (int i = 0; i < timing_timing_iterations; ++i)
{
error = DevicePartition::If(d_temp_storage, temp_storage_bytes, d_in, d_out, d_num_selected_out, num_items, select_op, stream, debug_synchronous);
}
return error;
}
/**
* Dispatch to select flagged entrypoint
*/
template <typename InputIteratorT, typename FlagIteratorT, typename SelectOpT, typename OutputIteratorT, typename NumSelectedIteratorT, typename OffsetT>
CUB_RUNTIME_FUNCTION __forceinline__
cudaError_t Dispatch(
Int2Type<CUB> dispatch_to,
Int2Type<true> is_flagged,
Int2Type<false> partition,
int timing_timing_iterations,
size_t* d_temp_storage_bytes,
cudaError_t* d_cdp_error,
void* d_temp_storage,
size_t& temp_storage_bytes,
InputIteratorT d_in,
FlagIteratorT d_flags,
OutputIteratorT d_out,
NumSelectedIteratorT d_num_selected_out,
OffsetT num_items,
SelectOpT select_op,
cudaStream_t stream,
bool debug_synchronous)
{
cudaError_t error = cudaSuccess;
for (int i = 0; i < timing_timing_iterations; ++i)
{
error = DeviceSelect::Flagged(d_temp_storage, temp_storage_bytes, d_in, d_flags, d_out, d_num_selected_out, num_items, stream, debug_synchronous);
}
return error;
}
/**
* Dispatch to partition flagged entrypoint
*/
template <typename InputIteratorT, typename FlagIteratorT, typename SelectOpT, typename OutputIteratorT, typename NumSelectedIteratorT, typename OffsetT>
CUB_RUNTIME_FUNCTION __forceinline__
cudaError_t Dispatch(
Int2Type<CUB> dispatch_to,
Int2Type<true> is_flagged,
Int2Type<true> partition,
int timing_timing_iterations,
size_t* d_temp_storage_bytes,
cudaError_t* d_cdp_error,
void* d_temp_storage,
size_t& temp_storage_bytes,
InputIteratorT d_in,
FlagIteratorT d_flags,
OutputIteratorT d_out,
NumSelectedIteratorT d_num_selected_out,
OffsetT num_items,
SelectOpT select_op,
cudaStream_t stream,
bool debug_synchronous)
{
cudaError_t error = cudaSuccess;
for (int i = 0; i < timing_timing_iterations; ++i)
{
error = DevicePartition::Flagged(d_temp_storage, temp_storage_bytes, d_in, d_flags, d_out, d_num_selected_out, num_items, stream, debug_synchronous);
}
return error;
}
//---------------------------------------------------------------------
// Dispatch to different Thrust entrypoints
//---------------------------------------------------------------------
/**
* Dispatch to select if entrypoint
*/
template <typename InputIteratorT, typename FlagIteratorT, typename SelectOpT, typename OutputIteratorT, typename NumSelectedIteratorT, typename OffsetT>
__host__ __forceinline__
cudaError_t Dispatch(
Int2Type<THRUST> dispatch_to,
Int2Type<false> is_flagged,
Int2Type<false> is_partition,
int timing_timing_iterations,
size_t* d_temp_storage_bytes,
cudaError_t* d_cdp_error,
void* d_temp_storage,
size_t& temp_storage_bytes,
InputIteratorT d_in,
FlagIteratorT d_flags,
OutputIteratorT d_out,
NumSelectedIteratorT d_num_selected_out,
OffsetT num_items,
SelectOpT select_op,
cudaStream_t stream,
bool debug_synchronous)
{
// The input value type
typedef typename std::iterator_traits<InputIteratorT>::value_type InputT;
// The output value type
typedef typename If<(Equals<typename std::iterator_traits<OutputIteratorT>::value_type, void>::VALUE), // OutputT = (if output iterator's value type is void) ?
typename std::iterator_traits<InputIteratorT>::value_type, // ... then the input iterator's value type,
typename std::iterator_traits<OutputIteratorT>::value_type>::Type OutputT; // ... else the output iterator's value type
if (d_temp_storage == 0)
{
temp_storage_bytes = 1;
}
else
{
thrust::device_ptr<OutputT> d_out_wrapper_end;
thrust::device_ptr<InputT> d_in_wrapper(d_in);
thrust::device_ptr<OutputT> d_out_wrapper(d_out);
for (int i = 0; i < timing_timing_iterations; ++i)
{
d_out_wrapper_end = thrust::copy_if(d_in_wrapper, d_in_wrapper + num_items, d_out_wrapper, select_op);
}
OffsetT num_selected = d_out_wrapper_end - d_out_wrapper;
CubDebugExit(cudaMemcpy(d_num_selected_out, &num_selected, sizeof(OffsetT), cudaMemcpyHostToDevice));
}
return cudaSuccess;
}
/**
* Dispatch to partition if entrypoint
*/
template <typename InputIteratorT, typename FlagIteratorT, typename SelectOpT, typename OutputIteratorT, typename NumSelectedIteratorT, typename OffsetT>
__host__ __forceinline__
cudaError_t Dispatch(
Int2Type<THRUST> dispatch_to,
Int2Type<false> is_flagged,
Int2Type<true> is_partition,
int timing_timing_iterations,
size_t* d_temp_storage_bytes,
cudaError_t* d_cdp_error,
void* d_temp_storage,
size_t& temp_storage_bytes,
InputIteratorT d_in,
FlagIteratorT d_flags,
OutputIteratorT d_out,
NumSelectedIteratorT d_num_selected_out,
OffsetT num_items,
SelectOpT select_op,
cudaStream_t stream,
bool debug_synchronous)
{
// The input value type
typedef typename std::iterator_traits<InputIteratorT>::value_type InputT;
// The output value type
typedef typename If<(Equals<typename std::iterator_traits<OutputIteratorT>::value_type, void>::VALUE), // OutputT = (if output iterator's value type is void) ?
typename std::iterator_traits<InputIteratorT>::value_type, // ... then the input iterator's value type,
typename std::iterator_traits<OutputIteratorT>::value_type>::Type OutputT; // ... else the output iterator's value type
typedef thrust::reverse_iterator<thrust::device_ptr<OutputT> > ReverseOutputIteratorT;
if (d_temp_storage == 0)
{
temp_storage_bytes = 1;
}
else
{
thrust::pair<thrust::device_ptr<OutputT>, ReverseOutputIteratorT> d_out_wrapper_end;
thrust::device_ptr<InputT> d_in_wrapper(d_in);
thrust::device_ptr<OutputT> d_out_wrapper(d_out);
ReverseOutputIteratorT d_out_unselected(d_out_wrapper + num_items);
for (int i = 0; i < timing_timing_iterations; ++i)
{
d_out_wrapper_end = thrust::partition_copy(
d_in_wrapper,
d_in_wrapper + num_items,
d_out_wrapper,
d_out_unselected,
select_op);
}
OffsetT num_selected = d_out_wrapper_end.first - d_out_wrapper;
CubDebugExit(cudaMemcpy(d_num_selected_out, &num_selected, sizeof(OffsetT), cudaMemcpyHostToDevice));
}
return cudaSuccess;
}
/**
* Dispatch to select flagged entrypoint
*/
template <typename InputIteratorT, typename FlagIteratorT, typename SelectOpT, typename OutputIteratorT, typename NumSelectedIteratorT, typename OffsetT>
__host__ __forceinline__
cudaError_t Dispatch(
Int2Type<THRUST> dispatch_to,
Int2Type<true> is_flagged,
Int2Type<false> is_partition,
int timing_timing_iterations,
size_t* d_temp_storage_bytes,
cudaError_t* d_cdp_error,
void* d_temp_storage,
size_t& temp_storage_bytes,
InputIteratorT d_in,
FlagIteratorT d_flags,
OutputIteratorT d_out,
NumSelectedIteratorT d_num_selected_out,
OffsetT num_items,
SelectOpT select_op,
cudaStream_t stream,
bool debug_synchronous)
{
// The flag type
typedef typename std::iterator_traits<FlagIteratorT>::value_type FlagT;
// The input value type
typedef typename std::iterator_traits<InputIteratorT>::value_type InputT;
// The output value type
typedef typename If<(Equals<typename std::iterator_traits<OutputIteratorT>::value_type, void>::VALUE), // OutputT = (if output iterator's value type is void) ?
typename std::iterator_traits<InputIteratorT>::value_type, // ... then the input iterator's value type,
typename std::iterator_traits<OutputIteratorT>::value_type>::Type OutputT; // ... else the output iterator's value type
if (d_temp_storage == 0)
{
temp_storage_bytes = 1;
}
else
{
thrust::device_ptr<OutputT> d_out_wrapper_end;
thrust::device_ptr<InputT> d_in_wrapper(d_in);
thrust::device_ptr<OutputT> d_out_wrapper(d_out);
thrust::device_ptr<FlagT> d_flags_wrapper(d_flags);
for (int i = 0; i < timing_timing_iterations; ++i)
{
d_out_wrapper_end = thrust::copy_if(d_in_wrapper, d_in_wrapper + num_items, d_flags_wrapper, d_out_wrapper, Cast<bool>());
}
OffsetT num_selected = d_out_wrapper_end - d_out_wrapper;
CubDebugExit(cudaMemcpy(d_num_selected_out, &num_selected, sizeof(OffsetT), cudaMemcpyHostToDevice));
}
return cudaSuccess;
}
/**
* Dispatch to partition flagged entrypoint
*/
template <typename InputIteratorT, typename FlagIteratorT, typename SelectOpT, typename OutputIteratorT, typename NumSelectedIteratorT, typename OffsetT>
__host__ __forceinline__
cudaError_t Dispatch(
Int2Type<THRUST> dispatch_to,
Int2Type<true> is_flagged,
Int2Type<true> is_partition,
int timing_timing_iterations,
size_t* d_temp_storage_bytes,
cudaError_t* d_cdp_error,
void* d_temp_storage,
size_t& temp_storage_bytes,
InputIteratorT d_in,
FlagIteratorT d_flags,
OutputIteratorT d_out,
NumSelectedIteratorT d_num_selected_out,
OffsetT num_items,
SelectOpT select_op,
cudaStream_t stream,
bool debug_synchronous)
{
// The flag type
typedef typename std::iterator_traits<FlagIteratorT>::value_type FlagT;
// The input value type
typedef typename std::iterator_traits<InputIteratorT>::value_type InputT;
// The output value type
typedef typename If<(Equals<typename std::iterator_traits<OutputIteratorT>::value_type, void>::VALUE), // OutputT = (if output iterator's value type is void) ?
typename std::iterator_traits<InputIteratorT>::value_type, // ... then the input iterator's value type,
typename std::iterator_traits<OutputIteratorT>::value_type>::Type OutputT; // ... else the output iterator's value type
typedef thrust::reverse_iterator<thrust::device_ptr<OutputT> > ReverseOutputIteratorT;
if (d_temp_storage == 0)
{
temp_storage_bytes = 1;
}
else
{
thrust::pair<thrust::device_ptr<OutputT>, ReverseOutputIteratorT> d_out_wrapper_end;
thrust::device_ptr<InputT> d_in_wrapper(d_in);
thrust::device_ptr<OutputT> d_out_wrapper(d_out);
thrust::device_ptr<FlagT> d_flags_wrapper(d_flags);
ReverseOutputIteratorT d_out_unselected(d_out_wrapper + num_items);
for (int i = 0; i < timing_timing_iterations; ++i)
{
d_out_wrapper_end = thrust::partition_copy(
d_in_wrapper,
d_in_wrapper + num_items,
d_flags_wrapper,
d_out_wrapper,
d_out_unselected,
Cast<bool>());
}
OffsetT num_selected = d_out_wrapper_end.first - d_out_wrapper;
CubDebugExit(cudaMemcpy(d_num_selected_out, &num_selected, sizeof(OffsetT), cudaMemcpyHostToDevice));
}
return cudaSuccess;
}
//---------------------------------------------------------------------
// CUDA Nested Parallelism Test Kernel
//---------------------------------------------------------------------
/**
* Simple wrapper kernel to invoke DeviceSelect
*/
template <typename InputIteratorT, typename FlagIteratorT, typename SelectOpT, typename OutputIteratorT, typename NumSelectedIteratorT, typename OffsetT, typename IsFlaggedTag, typename IsPartitionTag>
__global__ void CnpDispatchKernel(
IsFlaggedTag is_flagged,
IsPartitionTag is_partition,
int timing_timing_iterations,
size_t* d_temp_storage_bytes,
cudaError_t* d_cdp_error,
void* d_temp_storage,
size_t temp_storage_bytes,
InputIteratorT d_in,
FlagIteratorT d_flags,
OutputIteratorT d_out,
NumSelectedIteratorT d_num_selected_out,
OffsetT num_items,
SelectOpT select_op,
bool debug_synchronous)
{
#ifndef CUB_CDP
*d_cdp_error = cudaErrorNotSupported;
#else
*d_cdp_error = Dispatch(Int2Type<CUB>(), is_flagged, is_partition, timing_timing_iterations, d_temp_storage_bytes, d_cdp_error,
d_temp_storage, temp_storage_bytes, d_in, d_flags, d_out, d_num_selected_out, num_items, select_op, 0, debug_synchronous);
*d_temp_storage_bytes = temp_storage_bytes;
#endif
}
/**
* Dispatch to CDP kernel
*/
template <typename InputIteratorT, typename FlagIteratorT, typename SelectOpT, typename OutputIteratorT, typename NumSelectedIteratorT, typename OffsetT, typename IsFlaggedTag, typename IsPartitionTag>
cudaError_t Dispatch(
Int2Type<CDP> dispatch_to,
IsFlaggedTag is_flagged,
IsPartitionTag is_partition,
int timing_timing_iterations,
size_t* d_temp_storage_bytes,
cudaError_t* d_cdp_error,
void* d_temp_storage,
size_t& temp_storage_bytes,
InputIteratorT d_in,
FlagIteratorT d_flags,
OutputIteratorT d_out,
NumSelectedIteratorT d_num_selected_out,
OffsetT num_items,
SelectOpT select_op,
cudaStream_t stream,
bool debug_synchronous)
{
// Invoke kernel to invoke device-side dispatch
CnpDispatchKernel<<<1,1>>>(is_flagged, is_partition, timing_timing_iterations, d_temp_storage_bytes, d_cdp_error,
d_temp_storage, temp_storage_bytes, d_in, d_flags, d_out, d_num_selected_out, num_items, select_op, debug_synchronous);
// Copy out temp_storage_bytes
CubDebugExit(cudaMemcpy(&temp_storage_bytes, d_temp_storage_bytes, sizeof(size_t) * 1, cudaMemcpyDeviceToHost));
// Copy out error
cudaError_t retval;
CubDebugExit(cudaMemcpy(&retval, d_cdp_error, sizeof(cudaError_t) * 1, cudaMemcpyDeviceToHost));
return retval;
}
//---------------------------------------------------------------------
// Test generation
//---------------------------------------------------------------------
/**
* Initialize problem
*/
template <typename T>
void Initialize(
T* h_in,
int num_items)
{
for (int i = 0; i < num_items; ++i)
{
// Initialize each item to a randomly selected value from [0..126]
unsigned int value;
RandomBits(value, 0, 0, 7);
if (value == 127)
value = 126;
InitValue(INTEGER_SEED, h_in[i], value);
}
if (g_verbose)
{
printf("Input:\n");
DisplayResults(h_in, num_items);
printf("\n\n");
}
}
/**
* Solve selection problem (and set corresponding flags)
*/
template <
typename InputIteratorT,
typename FlagIteratorT,
typename SelectOpT,
typename T>
int Solve(
InputIteratorT h_in,
SelectOpT select_op,
T* h_reference,
FlagIteratorT h_flags,
int num_items)
{
int num_selected = 0;
for (int i = 0; i < num_items; ++i)
{
if ((h_flags[i] = select_op(h_in[i])))
{
h_reference[num_selected] = h_in[i];
num_selected++;
}
else
{
h_reference[num_items - (i - num_selected) - 1] = h_in[i];
}
}
return num_selected;
}
/**
* Test DeviceSelect for a given problem input
*/
template <
Backend BACKEND,
bool IS_FLAGGED,
bool IS_PARTITION,
typename DeviceInputIteratorT,
typename FlagT,
typename SelectOpT,
typename T>
void Test(
DeviceInputIteratorT d_in,
FlagT* h_flags,
SelectOpT select_op,
T* h_reference,
int num_selected,
int num_items)
{
// Allocate device flags, output, and num-selected
FlagT* d_flags = NULL;
T* d_out = NULL;
int* d_num_selected_out = NULL;
CubDebugExit(g_allocator.DeviceAllocate((void**)&d_flags, sizeof(FlagT) * num_items));
CubDebugExit(g_allocator.DeviceAllocate((void**)&d_out, sizeof(T) * num_items));
CubDebugExit(g_allocator.DeviceAllocate((void**)&d_num_selected_out, sizeof(int)));
// Allocate CDP device arrays
size_t* d_temp_storage_bytes = NULL;
cudaError_t* d_cdp_error = NULL;
CubDebugExit(g_allocator.DeviceAllocate((void**)&d_temp_storage_bytes, sizeof(size_t) * 1));
CubDebugExit(g_allocator.DeviceAllocate((void**)&d_cdp_error, sizeof(cudaError_t) * 1));
// Allocate temporary storage
void *d_temp_storage = NULL;
size_t temp_storage_bytes = 0;
CubDebugExit(Dispatch(Int2Type<BACKEND>(), Int2Type<IS_FLAGGED>(), Int2Type<IS_PARTITION>(), 1, d_temp_storage_bytes, d_cdp_error,
d_temp_storage, temp_storage_bytes, d_in, d_flags, d_out, d_num_selected_out, num_items, select_op, 0, true));
CubDebugExit(g_allocator.DeviceAllocate(&d_temp_storage, temp_storage_bytes));
// Copy flags and clear device output array
CubDebugExit(cudaMemcpy(d_flags, h_flags, sizeof(FlagT) * num_items, cudaMemcpyHostToDevice));
CubDebugExit(cudaMemset(d_out, 0, sizeof(T) * num_items));
CubDebugExit(cudaMemset(d_num_selected_out, 0, sizeof(int)));
// Run warmup/correctness iteration
CubDebugExit(Dispatch(Int2Type<BACKEND>(), Int2Type<IS_FLAGGED>(), Int2Type<IS_PARTITION>(), 1, d_temp_storage_bytes, d_cdp_error,
d_temp_storage, temp_storage_bytes, d_in, d_flags, d_out, d_num_selected_out, num_items, select_op, 0, true));
// Check for correctness (and display results, if specified)
int compare1 = (IS_PARTITION) ?
CompareDeviceResults(h_reference, d_out, num_items, true, g_verbose) :
CompareDeviceResults(h_reference, d_out, num_selected, true, g_verbose);
printf("\t Data %s\n", compare1 ? "FAIL" : "PASS");
int compare2 = CompareDeviceResults(&num_selected, d_num_selected_out, 1, true, g_verbose);
printf("\t Count %s\n", compare2 ? "FAIL" : "PASS");
// Flush any stdout/stderr
fflush(stdout);
fflush(stderr);
// Performance
GpuTimer gpu_timer;
gpu_timer.Start();
CubDebugExit(Dispatch(Int2Type<BACKEND>(), Int2Type<IS_FLAGGED>(), Int2Type<IS_PARTITION>(), g_timing_iterations, d_temp_storage_bytes, d_cdp_error,
d_temp_storage, temp_storage_bytes, d_in, d_flags, d_out, d_num_selected_out, num_items, select_op, 0, false));
gpu_timer.Stop();
float elapsed_millis = gpu_timer.ElapsedMillis();
// Display performance
if (g_timing_iterations > 0)
{
float avg_millis = elapsed_millis / g_timing_iterations;
float giga_rate = float(num_items) / avg_millis / 1000.0f / 1000.0f;
int num_output_items = (IS_PARTITION) ? num_items : num_selected;
int num_flag_items = (IS_FLAGGED) ? num_items : 0;
size_t num_bytes = sizeof(T) * (num_items + num_output_items) + sizeof(FlagT) * num_flag_items;
float giga_bandwidth = float(num_bytes) / avg_millis / 1000.0f / 1000.0f;
printf(", %.3f avg ms, %.3f billion items/s, %.3f logical GB/s, %.1f%% peak", avg_millis, giga_rate, giga_bandwidth, giga_bandwidth / g_device_giga_bandwidth * 100.0);
}
printf("\n\n");
// Flush any stdout/stderr
fflush(stdout);
fflush(stderr);
// Cleanup
if (d_flags) CubDebugExit(g_allocator.DeviceFree(d_flags));
if (d_out) CubDebugExit(g_allocator.DeviceFree(d_out));
if (d_num_selected_out) CubDebugExit(g_allocator.DeviceFree(d_num_selected_out));
if (d_temp_storage_bytes) CubDebugExit(g_allocator.DeviceFree(d_temp_storage_bytes));
if (d_cdp_error) CubDebugExit(g_allocator.DeviceFree(d_cdp_error));
if (d_temp_storage) CubDebugExit(g_allocator.DeviceFree(d_temp_storage));
// Correctness asserts
AssertEquals(0, compare1 | compare2);
}
/**
* Test on pointer type
*/
template <
Backend BACKEND,
bool IS_FLAGGED,
bool IS_PARTITION,
typename T>
void TestPointer(
int num_items,
float select_ratio)
{
typedef char FlagT;
// Allocate host arrays
T* h_in = new T[num_items];
FlagT* h_flags = new FlagT[num_items];
T* h_reference = new T[num_items];
// Initialize input
Initialize(h_in, num_items);
// Select a comparison value that is select_ratio through the space of [0,127]
T compare;
if (select_ratio <= 0.0)
InitValue(INTEGER_SEED, compare, 0); // select none
else if (select_ratio >= 1.0)
InitValue(INTEGER_SEED, compare, 127); // select all
else
InitValue(INTEGER_SEED, compare, int(double(double(127) * select_ratio)));
LessThan<T> select_op(compare);
int num_selected = Solve(h_in, select_op, h_reference, h_flags, num_items);
if (g_verbose) std::cout << "\nComparison item: " << compare << "\n";
printf("\nPointer %s cub::%s::%s %d items, %d selected (select ratio %.3f), %s %d-byte elements\n",
(IS_PARTITION) ? "DevicePartition" : "DeviceSelect",
(IS_FLAGGED) ? "Flagged" : "If",
(BACKEND == CDP) ? "CDP CUB" : (BACKEND == THRUST) ? "Thrust" : "CUB",
num_items, num_selected, float(num_selected) / num_items, typeid(T).name(), (int) sizeof(T));
fflush(stdout);
// Allocate problem device arrays
T *d_in = NULL;
CubDebugExit(g_allocator.DeviceAllocate((void**)&d_in, sizeof(T) * num_items));
// Initialize device input
CubDebugExit(cudaMemcpy(d_in, h_in, sizeof(T) * num_items, cudaMemcpyHostToDevice));
// Run Test
Test<BACKEND, IS_FLAGGED, IS_PARTITION>(d_in, h_flags, select_op, h_reference, num_selected, num_items);
// Cleanup
if (h_in) delete[] h_in;
if (h_reference) delete[] h_reference;
if (h_flags) delete[] h_flags;
if (d_in) CubDebugExit(g_allocator.DeviceFree(d_in));
}
/**
* Test on iterator type
*/
template <
Backend BACKEND,
bool IS_FLAGGED,
bool IS_PARTITION,
typename T>
void TestIterator(
int num_items,
float select_ratio)
{
typedef char FlagT;
// Allocate host arrays
T* h_reference = new T[num_items];
FlagT* h_flags = new FlagT[num_items];
// Use counting iterator as the input
CountingInputIterator<T, int> h_in(0);
// Select a comparison value that is select_ratio through the space of [0,127]
T compare;
if (select_ratio <= 0.0)
InitValue(INTEGER_SEED, compare, 0); // select none
else if (select_ratio >= 1.0)
InitValue(INTEGER_SEED, compare, 127); // select all
else
InitValue(INTEGER_SEED, compare, int(double(double(127) * select_ratio)));
LessThan<T> select_op(compare);
int num_selected = Solve(h_in, select_op, h_reference, h_flags, num_items);
if (g_verbose) std::cout << "\nComparison item: " << compare << "\n";
printf("\nIterator %s cub::%s::%s %d items, %d selected (select ratio %.3f), %s %d-byte elements\n",
(IS_PARTITION) ? "DevicePartition" : "DeviceSelect",
(IS_FLAGGED) ? "Flagged" : "If",
(BACKEND == CDP) ? "CDP CUB" : (BACKEND == THRUST) ? "Thrust" : "CUB",
num_items, num_selected, float(num_selected) / num_items, typeid(T).name(), (int) sizeof(T));
fflush(stdout);
// Run Test
Test<BACKEND, IS_FLAGGED, IS_PARTITION>(h_in, h_flags, select_op, h_reference, num_selected, num_items);
// Cleanup
if (h_reference) delete[] h_reference;
if (h_flags) delete[] h_flags;
}
/**
* Test different selection ratios
*/
template <
Backend BACKEND,
bool IS_FLAGGED,
bool IS_PARTITION,
typename T>
void Test(
int num_items)
{
for (float select_ratio = 0.0f; select_ratio <= 1.0f; select_ratio += 0.2f)
{
TestPointer<BACKEND, IS_FLAGGED, IS_PARTITION, T>(num_items, select_ratio);
}
}
/**
* Test (select vs. partition) and (flagged vs. functor)
*/
template <
Backend BACKEND,
typename T>
void TestMethod(
int num_items)
{
// Functor
Test<BACKEND, false, false, T>(num_items);
Test<BACKEND, false, true, T>(num_items);
// Flagged
Test<BACKEND, true, false, T>(num_items);
Test<BACKEND, true, true, T>(num_items);
}
/**
* Test different dispatch
*/
template <
typename T>
void TestOp(
int num_items)
{
TestMethod<CUB, T>(num_items);
#ifdef CUB_CDP
TestMethod<CDP, T>(num_items);
#endif
}
/**
* Test different input sizes
*/
template <typename T>
void Test(
int num_items)
{
if (num_items < 0)
{
TestOp<T>(0);
TestOp<T>(1);
TestOp<T>(100);
TestOp<T>(10000);
TestOp<T>(1000000);
}
else
{
TestOp<T>(num_items);
}
}
/**
* Test select/partition on pointer types
*/
template <typename T>
void ComparePointer(
int num_items,
float select_ratio)
{
printf("-- Select-if ----------------------------\n");
TestPointer<CUB, false, false, T>(num_items, select_ratio);
TestPointer<THRUST, false, false, T>(num_items, select_ratio);
printf("-- Partition-if ----------------------------\n");
TestPointer<CUB, false, true, T>(num_items, select_ratio);
TestPointer<THRUST, false, true, T>(num_items, select_ratio);
printf("-- Select-flagged ----------------------------\n");
TestPointer<CUB, true, false, T>(num_items, select_ratio);
TestPointer<THRUST, true, false, T>(num_items, select_ratio);
printf("-- Partition-flagged ----------------------------\n");
TestPointer<CUB, true, true, T>(num_items, select_ratio);
TestPointer<THRUST, true, true, T>(num_items, select_ratio);
}
//---------------------------------------------------------------------
// Main
//---------------------------------------------------------------------
/**
* Main
*/
int main(int argc, char** argv)
{
int num_items = -1;
float select_ratio = 0.5;
// Initialize command line
CommandLineArgs args(argc, argv);
g_verbose = args.CheckCmdLineFlag("v");
args.GetCmdLineArgument("n", num_items);
args.GetCmdLineArgument("i", g_timing_iterations);
args.GetCmdLineArgument("repeat", g_repeat);
args.GetCmdLineArgument("ratio", select_ratio);
// Print usage
if (args.CheckCmdLineFlag("help"))
{
printf("%s "
"[--n=<input items> "
"[--i=<timing iterations> "
"[--device=<device-id>] "
"[--ratio=<selection ratio, default 0.5>] "
"[--repeat=<repetitions of entire test suite>] "
"[--v] "
"[--cdp] "
"\n", argv[0]);
exit(0);
}
// Initialize device
CubDebugExit(args.DeviceInit());
g_device_giga_bandwidth = args.device_giga_bandwidth;
printf("\n");
#ifdef QUICKER_TEST
// Compile/run basic CUB test
if (num_items < 0) num_items = 32000000;
printf("-- Select-if ----------------------------\n");
TestPointer<CUB, false, false, int>(num_items, select_ratio);
printf("-- Partition-if ----------------------------\n");
TestPointer<CUB, false, true, int>(num_items, select_ratio);
printf("-- Select-flagged ----------------------------\n");
TestPointer<CUB, true, false, int>(num_items, select_ratio);
printf("-- Partition-flagged ----------------------------\n");
TestPointer<CUB, true, true, int>(num_items, select_ratio);
#elif defined(QUICK_TEST)
// Get device ordinal
int device_ordinal;
CubDebugExit(cudaGetDevice(&device_ordinal));
// Get device SM version
int sm_version;
CubDebugExit(SmVersion(sm_version, device_ordinal));
// Compile/run quick tests
if (num_items < 0) num_items = 32000000;
printf("-- Iterator ----------------------------\n");
TestIterator<CUB, false, false, int>(num_items, select_ratio);
ComparePointer<char>( num_items * ((sm_version <= 130) ? 1 : 4), select_ratio);
ComparePointer<short>( num_items * ((sm_version <= 130) ? 1 : 2), select_ratio);
ComparePointer<int>( num_items, select_ratio);
ComparePointer<long long>( num_items / 2, select_ratio);
ComparePointer<TestFoo>( num_items / 4, select_ratio);
#else
// Compile/run thorough tests
for (int i = 0; i <= g_repeat; ++i)
{
// Test different input types
Test<unsigned char>(num_items);
Test<unsigned short>(num_items);
Test<unsigned int>(num_items);
Test<unsigned long long>(num_items);
Test<uchar2>(num_items);
Test<ushort2>(num_items);
Test<uint2>(num_items);
Test<ulonglong2>(num_items);
Test<uchar4>(num_items);
Test<ushort4>(num_items);
Test<uint4>(num_items);
Test<ulonglong4>(num_items);
Test<TestFoo>(num_items);
Test<TestBar>(num_items);
}
#endif
return 0;
}
|
9fb091d390d2dc2866f44d6d8e86907bc109ba8d.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <cstring>
#include <fstream>
#include <iostream>
#ifndef SHA256_H
#define SHA256_H
#include <string>
class SHA256
{
protected:
const static unsigned int sha256_k[];
static const unsigned int SHA224_256_BLOCK_SIZE = (512/8);
public:
void init();
void update(const unsigned char *message, unsigned int len);
void final(unsigned char *digest);
static const unsigned int DIGEST_SIZE = ( 256 / 8);
protected:
void transform(const unsigned char *message, unsigned int block_nb, unsigned int number_of_elements);
unsigned int m_tot_len;
unsigned int m_len;
unsigned char m_block[2*SHA224_256_BLOCK_SIZE];
unsigned int m_h[8];
};
std::string sha256(std::string input);
#define SHA2_SHFR(x, n) (x >> n)
#define SHA2_ROTR(x, n) ((x >> n) | (x << ((sizeof(x) << 3) - n)))
#define SHA2_ROTL(x, n) ((x << n) | (x >> ((sizeof(x) << 3) - n)))
#define SHA2_CH(x, y, z) ((x & y) ^ (~x & z))
#define SHA2_MAJ(x, y, z) ((x & y) ^ (x & z) ^ (y & z))
#define SHA256_F1(x) (SHA2_ROTR(x, 2) ^ SHA2_ROTR(x, 13) ^ SHA2_ROTR(x, 22))
#define SHA256_F2(x) (SHA2_ROTR(x, 6) ^ SHA2_ROTR(x, 11) ^ SHA2_ROTR(x, 25))
#define SHA256_F3(x) (SHA2_ROTR(x, 7) ^ SHA2_ROTR(x, 18) ^ SHA2_SHFR(x, 3))
#define SHA256_F4(x) (SHA2_ROTR(x, 17) ^ SHA2_ROTR(x, 19) ^ SHA2_SHFR(x, 10))
#define SHA2_UNPACK32(x, str) \
{ \
*((str) + 3) = (unsigned char) ((x) ); \
*((str) + 2) = (unsigned char) ((x) >> 8); \
*((str) + 1) = (unsigned char) ((x) >> 16); \
*((str) + 0) = (unsigned char) ((x) >> 24); \
}
#define SHA2_PACK32(str, x) \
{ \
*(x) = ((unsigned int) *((str) + 3) ) \
| ((unsigned int) *((str) + 2) << 8) \
| ((unsigned int) *((str) + 1) << 16) \
| ((unsigned int) *((str) + 0) << 24); \
}
#endif
using std::string;
using std::cout;
using std::endl;
const unsigned int SHA256::sha256_k[64] = //UL = uint32
{0x428a2f98, 0x71374491, 0xb5c0fbcf, 0xe9b5dba5,
0x3956c25b, 0x59f111f1, 0x923f82a4, 0xab1c5ed5,
0xd807aa98, 0x12835b01, 0x243185be, 0x550c7dc3,
0x72be5d74, 0x80deb1fe, 0x9bdc06a7, 0xc19bf174,
0xe49b69c1, 0xefbe4786, 0x0fc19dc6, 0x240ca1cc,
0x2de92c6f, 0x4a7484aa, 0x5cb0a9dc, 0x76f988da,
0x983e5152, 0xa831c66d, 0xb00327c8, 0xbf597fc7,
0xc6e00bf3, 0xd5a79147, 0x06ca6351, 0x14292967,
0x27b70a85, 0x2e1b2138, 0x4d2c6dfc, 0x53380d13,
0x650a7354, 0x766a0abb, 0x81c2c92e, 0x92722c85,
0xa2bfe8a1, 0xa81a664b, 0xc24b8b70, 0xc76c51a3,
0xd192e819, 0xd6990624, 0xf40e3585, 0x106aa070,
0x19a4c116, 0x1e376c08, 0x2748774c, 0x34b0bcb5,
0x391c0cb3, 0x4ed8aa4a, 0x5b9cca4f, 0x682e6ff3,
0x748f82ee, 0x78a5636f, 0x84c87814, 0x8cc70208,
0x90befffa, 0xa4506ceb, 0xbef9a3f7, 0xc67178f2};
__device__ unsigned int t11;
__device__ unsigned int t12;
__global__ void kernel1(unsigned char* sub_block_d, unsigned int *w_d, unsigned int *wv, unsigned int *m_h) {
int j = threadIdx.x;
wv[j] = m_h[j];
if ( j >= 8 ) {
SHA2_PACK32(&sub_block_d[j << 2], &w_d[j]);
// printf("%d %d\n", sub_block_d[j << 2], w_d[j]);
}
}
__global__ void kernel2(unsigned int *w_d, unsigned int *wv, unsigned int *sha256_k) {
int j = threadIdx.x;
if (j >= 16){
w_d[j] = SHA256_F4(w_d[j - 2]) + w_d[j - 7] + SHA256_F3(w_d[j - 15]) + w_d[j - 16];
}
t11 = wv[7] + SHA256_F2(wv[4]) + SHA2_CH(wv[4], wv[5], wv[6])
+ sha256_k[j] + w_d[j];
t12 = SHA256_F1(wv[0]) + SHA2_MAJ(wv[0], wv[1], wv[2]);
wv[7] = wv[6];
wv[6] = wv[5];
wv[5] = wv[4];
wv[4] = wv[3] + t11;
wv[3] = wv[2];
wv[2] = wv[1];
wv[1] = wv[0];
wv[0] = t11 + t12;
}
__global__ void kernel3(unsigned int *wv, unsigned int *m_h) {
int j = threadIdx.x;
m_h[j] += wv[j];
}
void SHA256::transform(const unsigned char *message, unsigned int block_nb, unsigned int number_of_elements)
{
unsigned int w[64];
unsigned int wv[8];
unsigned int *wv1;
unsigned int *w_d;
unsigned int *sha256_k1;
unsigned int *wv_d;
unsigned int *m_h_d;
const unsigned char *sub_block;
unsigned char *sub_block_d;
int i;
for (i = 0; i < (int) block_nb; i++) {
sub_block = message + (i << 6);
///////////////////////// K 1 ////////////////////////////////
hipMalloc((void **) &sub_block_d, sizeof(unsigned char) * number_of_elements);
hipMemcpy(sub_block_d, sub_block, sizeof(unsigned char) * number_of_elements, hipMemcpyHostToDevice);
hipMalloc((void **) &w_d, sizeof(unsigned int) * 64);
hipMemcpy(w_d, w, sizeof(unsigned int) * 64, hipMemcpyHostToDevice);
hipMalloc((void **) &wv_d, sizeof(unsigned int) * 8);
hipMemcpy(wv_d, wv, sizeof(unsigned int) * 8, hipMemcpyHostToDevice);
hipMalloc((void **) &m_h_d, sizeof(unsigned int) * 8);
hipMemcpy(m_h_d, m_h, sizeof(unsigned int) * 8, hipMemcpyHostToDevice);
hipLaunchKernelGGL(( kernel1), dim3(1), dim3(16), 0, 0, sub_block_d, w_d, wv_d, m_h_d);
hipDeviceSynchronize();
hipMemcpy(w, w_d, sizeof(unsigned int) * 64, hipMemcpyDeviceToHost);
hipMemcpy(wv, wv_d, sizeof(unsigned int) * 8, hipMemcpyDeviceToHost);
hipMemcpy(m_h, m_h_d, sizeof(unsigned int) * 8, hipMemcpyDeviceToHost);
// for(int k = 0; k < 64; k++){
// printf("%d ", w[k]);
// }
//////////////////////// K 2 ////////////////////////////////
hipMemcpy(w_d, w, sizeof(unsigned int) * 64, hipMemcpyHostToDevice);
hipMalloc((void **) &wv1, sizeof(unsigned int) * 8);
hipMemcpy(wv1, wv, sizeof(unsigned int) * 8, hipMemcpyHostToDevice);
hipMalloc((void **) &sha256_k1, sizeof(unsigned int) * 64);
hipMemcpy(sha256_k1, sha256_k, sizeof(unsigned int) * 64, hipMemcpyHostToDevice);
hipLaunchKernelGGL(( kernel2), dim3(1), dim3(64), 0, 0, w_d, wv1, sha256_k1);
hipDeviceSynchronize();
hipMemcpy(wv, wv1, sizeof(unsigned int) * 8, hipMemcpyDeviceToHost);
hipMemcpy(w, w_d, sizeof(unsigned int) * 64, hipMemcpyDeviceToHost);
//////////////////////////K 3///////////////////////////////
hipMemcpy(wv1, wv, sizeof(unsigned int) * 8, hipMemcpyHostToDevice);
hipMemcpy(m_h_d, m_h, sizeof(unsigned int) * 8, hipMemcpyHostToDevice);
hipLaunchKernelGGL(( kernel3), dim3(1), dim3(8), 0, 0, wv1, m_h_d);
hipDeviceSynchronize();
hipMemcpy(wv, wv1, sizeof(unsigned int) * 8, hipMemcpyDeviceToHost);
hipMemcpy(m_h, m_h_d, sizeof(unsigned int) * 8, hipMemcpyDeviceToHost);
}
}
void SHA256::init()
{
m_h[0] = 0x6a09e667;
m_h[1] = 0xbb67ae85;
m_h[2] = 0x3c6ef372;
m_h[3] = 0xa54ff53a;
m_h[4] = 0x510e527f;
m_h[5] = 0x9b05688c;
m_h[6] = 0x1f83d9ab;
m_h[7] = 0x5be0cd19;
m_len = 0;
m_tot_len = 0;
}
void SHA256::update(const unsigned char *message, unsigned int len)
{
unsigned int block_nb;
unsigned int new_len, rem_len, tmp_len;
const unsigned char *shifted_message;
tmp_len = SHA224_256_BLOCK_SIZE - m_len;
rem_len = len < tmp_len ? len : tmp_len;
memcpy(&m_block[m_len], message, rem_len);
if (m_len + len < SHA224_256_BLOCK_SIZE) {
m_len += len;
return;
}
new_len = len - rem_len;
block_nb = new_len / SHA224_256_BLOCK_SIZE;
shifted_message = message + rem_len;
block_nb = new_len / SHA224_256_BLOCK_SIZE;
transform(m_block, 1, SHA224_256_BLOCK_SIZE);
transform(shifted_message, block_nb, SHA224_256_BLOCK_SIZE);
rem_len = new_len % SHA224_256_BLOCK_SIZE;
memcpy(m_block, &shifted_message[block_nb << 6], rem_len);
m_len = rem_len;
m_tot_len += (block_nb + 1) << 6;
}
void SHA256::final(unsigned char *digest)
{
unsigned int block_nb;
unsigned int pm_len;
unsigned int len_b;
int i;
block_nb = (1 + ((SHA224_256_BLOCK_SIZE - 9)
< (m_len % SHA224_256_BLOCK_SIZE)));
len_b = (m_tot_len + m_len) << 3;
pm_len = block_nb << 6;
memset(m_block + m_len, 0, pm_len - m_len);
m_block[m_len] = 0x80;
SHA2_UNPACK32(len_b, m_block + pm_len - 4);
transform(m_block, block_nb, block_nb * len_b);
for (i = 0 ; i < 8; i++) {
SHA2_UNPACK32(m_h[i], &digest[i << 2]);
}
}
std::string sha256(std::string input)
{
unsigned char digest[SHA256::DIGEST_SIZE];
memset(digest,0,SHA256::DIGEST_SIZE);
SHA256 ctx = SHA256();
ctx.init();
ctx.update( (unsigned char*)input.c_str(), input.length());
ctx.final(digest);
char buf[2*SHA256::DIGEST_SIZE+1];
buf[2*SHA256::DIGEST_SIZE] = 0;
for (int i = 0; i < SHA256::DIGEST_SIZE; i++)
sprintf(buf+i*2, "%02x", digest[i]);
return std::string(buf);
}
int main(int argc, char *argv[])
{
string input = "apple";
string output1 = sha256(input);
cout << "sha256('"<< input << "'):" << output1 << endl;
return 0;
} | 9fb091d390d2dc2866f44d6d8e86907bc109ba8d.cu | #include <cstring>
#include <fstream>
#include <iostream>
#ifndef SHA256_H
#define SHA256_H
#include <string>
class SHA256
{
protected:
const static unsigned int sha256_k[];
static const unsigned int SHA224_256_BLOCK_SIZE = (512/8);
public:
void init();
void update(const unsigned char *message, unsigned int len);
void final(unsigned char *digest);
static const unsigned int DIGEST_SIZE = ( 256 / 8);
protected:
void transform(const unsigned char *message, unsigned int block_nb, unsigned int number_of_elements);
unsigned int m_tot_len;
unsigned int m_len;
unsigned char m_block[2*SHA224_256_BLOCK_SIZE];
unsigned int m_h[8];
};
std::string sha256(std::string input);
#define SHA2_SHFR(x, n) (x >> n)
#define SHA2_ROTR(x, n) ((x >> n) | (x << ((sizeof(x) << 3) - n)))
#define SHA2_ROTL(x, n) ((x << n) | (x >> ((sizeof(x) << 3) - n)))
#define SHA2_CH(x, y, z) ((x & y) ^ (~x & z))
#define SHA2_MAJ(x, y, z) ((x & y) ^ (x & z) ^ (y & z))
#define SHA256_F1(x) (SHA2_ROTR(x, 2) ^ SHA2_ROTR(x, 13) ^ SHA2_ROTR(x, 22))
#define SHA256_F2(x) (SHA2_ROTR(x, 6) ^ SHA2_ROTR(x, 11) ^ SHA2_ROTR(x, 25))
#define SHA256_F3(x) (SHA2_ROTR(x, 7) ^ SHA2_ROTR(x, 18) ^ SHA2_SHFR(x, 3))
#define SHA256_F4(x) (SHA2_ROTR(x, 17) ^ SHA2_ROTR(x, 19) ^ SHA2_SHFR(x, 10))
#define SHA2_UNPACK32(x, str) \
{ \
*((str) + 3) = (unsigned char) ((x) ); \
*((str) + 2) = (unsigned char) ((x) >> 8); \
*((str) + 1) = (unsigned char) ((x) >> 16); \
*((str) + 0) = (unsigned char) ((x) >> 24); \
}
#define SHA2_PACK32(str, x) \
{ \
*(x) = ((unsigned int) *((str) + 3) ) \
| ((unsigned int) *((str) + 2) << 8) \
| ((unsigned int) *((str) + 1) << 16) \
| ((unsigned int) *((str) + 0) << 24); \
}
#endif
using std::string;
using std::cout;
using std::endl;
const unsigned int SHA256::sha256_k[64] = //UL = uint32
{0x428a2f98, 0x71374491, 0xb5c0fbcf, 0xe9b5dba5,
0x3956c25b, 0x59f111f1, 0x923f82a4, 0xab1c5ed5,
0xd807aa98, 0x12835b01, 0x243185be, 0x550c7dc3,
0x72be5d74, 0x80deb1fe, 0x9bdc06a7, 0xc19bf174,
0xe49b69c1, 0xefbe4786, 0x0fc19dc6, 0x240ca1cc,
0x2de92c6f, 0x4a7484aa, 0x5cb0a9dc, 0x76f988da,
0x983e5152, 0xa831c66d, 0xb00327c8, 0xbf597fc7,
0xc6e00bf3, 0xd5a79147, 0x06ca6351, 0x14292967,
0x27b70a85, 0x2e1b2138, 0x4d2c6dfc, 0x53380d13,
0x650a7354, 0x766a0abb, 0x81c2c92e, 0x92722c85,
0xa2bfe8a1, 0xa81a664b, 0xc24b8b70, 0xc76c51a3,
0xd192e819, 0xd6990624, 0xf40e3585, 0x106aa070,
0x19a4c116, 0x1e376c08, 0x2748774c, 0x34b0bcb5,
0x391c0cb3, 0x4ed8aa4a, 0x5b9cca4f, 0x682e6ff3,
0x748f82ee, 0x78a5636f, 0x84c87814, 0x8cc70208,
0x90befffa, 0xa4506ceb, 0xbef9a3f7, 0xc67178f2};
__device__ unsigned int t11;
__device__ unsigned int t12;
__global__ void kernel1(unsigned char* sub_block_d, unsigned int *w_d, unsigned int *wv, unsigned int *m_h) {
int j = threadIdx.x;
wv[j] = m_h[j];
if ( j >= 8 ) {
SHA2_PACK32(&sub_block_d[j << 2], &w_d[j]);
// printf("%d %d\n", sub_block_d[j << 2], w_d[j]);
}
}
__global__ void kernel2(unsigned int *w_d, unsigned int *wv, unsigned int *sha256_k) {
int j = threadIdx.x;
if (j >= 16){
w_d[j] = SHA256_F4(w_d[j - 2]) + w_d[j - 7] + SHA256_F3(w_d[j - 15]) + w_d[j - 16];
}
t11 = wv[7] + SHA256_F2(wv[4]) + SHA2_CH(wv[4], wv[5], wv[6])
+ sha256_k[j] + w_d[j];
t12 = SHA256_F1(wv[0]) + SHA2_MAJ(wv[0], wv[1], wv[2]);
wv[7] = wv[6];
wv[6] = wv[5];
wv[5] = wv[4];
wv[4] = wv[3] + t11;
wv[3] = wv[2];
wv[2] = wv[1];
wv[1] = wv[0];
wv[0] = t11 + t12;
}
__global__ void kernel3(unsigned int *wv, unsigned int *m_h) {
int j = threadIdx.x;
m_h[j] += wv[j];
}
void SHA256::transform(const unsigned char *message, unsigned int block_nb, unsigned int number_of_elements)
{
unsigned int w[64];
unsigned int wv[8];
unsigned int *wv1;
unsigned int *w_d;
unsigned int *sha256_k1;
unsigned int *wv_d;
unsigned int *m_h_d;
const unsigned char *sub_block;
unsigned char *sub_block_d;
int i;
for (i = 0; i < (int) block_nb; i++) {
sub_block = message + (i << 6);
///////////////////////// K 1 ////////////////////////////////
cudaMalloc((void **) &sub_block_d, sizeof(unsigned char) * number_of_elements);
cudaMemcpy(sub_block_d, sub_block, sizeof(unsigned char) * number_of_elements, cudaMemcpyHostToDevice);
cudaMalloc((void **) &w_d, sizeof(unsigned int) * 64);
cudaMemcpy(w_d, w, sizeof(unsigned int) * 64, cudaMemcpyHostToDevice);
cudaMalloc((void **) &wv_d, sizeof(unsigned int) * 8);
cudaMemcpy(wv_d, wv, sizeof(unsigned int) * 8, cudaMemcpyHostToDevice);
cudaMalloc((void **) &m_h_d, sizeof(unsigned int) * 8);
cudaMemcpy(m_h_d, m_h, sizeof(unsigned int) * 8, cudaMemcpyHostToDevice);
kernel1<<<1, 16>>>(sub_block_d, w_d, wv_d, m_h_d);
cudaDeviceSynchronize();
cudaMemcpy(w, w_d, sizeof(unsigned int) * 64, cudaMemcpyDeviceToHost);
cudaMemcpy(wv, wv_d, sizeof(unsigned int) * 8, cudaMemcpyDeviceToHost);
cudaMemcpy(m_h, m_h_d, sizeof(unsigned int) * 8, cudaMemcpyDeviceToHost);
// for(int k = 0; k < 64; k++){
// printf("%d ", w[k]);
// }
//////////////////////// K 2 ////////////////////////////////
cudaMemcpy(w_d, w, sizeof(unsigned int) * 64, cudaMemcpyHostToDevice);
cudaMalloc((void **) &wv1, sizeof(unsigned int) * 8);
cudaMemcpy(wv1, wv, sizeof(unsigned int) * 8, cudaMemcpyHostToDevice);
cudaMalloc((void **) &sha256_k1, sizeof(unsigned int) * 64);
cudaMemcpy(sha256_k1, sha256_k, sizeof(unsigned int) * 64, cudaMemcpyHostToDevice);
kernel2<<<1, 64>>>(w_d, wv1, sha256_k1);
cudaDeviceSynchronize();
cudaMemcpy(wv, wv1, sizeof(unsigned int) * 8, cudaMemcpyDeviceToHost);
cudaMemcpy(w, w_d, sizeof(unsigned int) * 64, cudaMemcpyDeviceToHost);
//////////////////////////K 3///////////////////////////////
cudaMemcpy(wv1, wv, sizeof(unsigned int) * 8, cudaMemcpyHostToDevice);
cudaMemcpy(m_h_d, m_h, sizeof(unsigned int) * 8, cudaMemcpyHostToDevice);
kernel3<<<1, 8>>>(wv1, m_h_d);
cudaDeviceSynchronize();
cudaMemcpy(wv, wv1, sizeof(unsigned int) * 8, cudaMemcpyDeviceToHost);
cudaMemcpy(m_h, m_h_d, sizeof(unsigned int) * 8, cudaMemcpyDeviceToHost);
}
}
void SHA256::init()
{
m_h[0] = 0x6a09e667;
m_h[1] = 0xbb67ae85;
m_h[2] = 0x3c6ef372;
m_h[3] = 0xa54ff53a;
m_h[4] = 0x510e527f;
m_h[5] = 0x9b05688c;
m_h[6] = 0x1f83d9ab;
m_h[7] = 0x5be0cd19;
m_len = 0;
m_tot_len = 0;
}
void SHA256::update(const unsigned char *message, unsigned int len)
{
unsigned int block_nb;
unsigned int new_len, rem_len, tmp_len;
const unsigned char *shifted_message;
tmp_len = SHA224_256_BLOCK_SIZE - m_len;
rem_len = len < tmp_len ? len : tmp_len;
memcpy(&m_block[m_len], message, rem_len);
if (m_len + len < SHA224_256_BLOCK_SIZE) {
m_len += len;
return;
}
new_len = len - rem_len;
block_nb = new_len / SHA224_256_BLOCK_SIZE;
shifted_message = message + rem_len;
block_nb = new_len / SHA224_256_BLOCK_SIZE;
transform(m_block, 1, SHA224_256_BLOCK_SIZE);
transform(shifted_message, block_nb, SHA224_256_BLOCK_SIZE);
rem_len = new_len % SHA224_256_BLOCK_SIZE;
memcpy(m_block, &shifted_message[block_nb << 6], rem_len);
m_len = rem_len;
m_tot_len += (block_nb + 1) << 6;
}
void SHA256::final(unsigned char *digest)
{
unsigned int block_nb;
unsigned int pm_len;
unsigned int len_b;
int i;
block_nb = (1 + ((SHA224_256_BLOCK_SIZE - 9)
< (m_len % SHA224_256_BLOCK_SIZE)));
len_b = (m_tot_len + m_len) << 3;
pm_len = block_nb << 6;
memset(m_block + m_len, 0, pm_len - m_len);
m_block[m_len] = 0x80;
SHA2_UNPACK32(len_b, m_block + pm_len - 4);
transform(m_block, block_nb, block_nb * len_b);
for (i = 0 ; i < 8; i++) {
SHA2_UNPACK32(m_h[i], &digest[i << 2]);
}
}
std::string sha256(std::string input)
{
unsigned char digest[SHA256::DIGEST_SIZE];
memset(digest,0,SHA256::DIGEST_SIZE);
SHA256 ctx = SHA256();
ctx.init();
ctx.update( (unsigned char*)input.c_str(), input.length());
ctx.final(digest);
char buf[2*SHA256::DIGEST_SIZE+1];
buf[2*SHA256::DIGEST_SIZE] = 0;
for (int i = 0; i < SHA256::DIGEST_SIZE; i++)
sprintf(buf+i*2, "%02x", digest[i]);
return std::string(buf);
}
int main(int argc, char *argv[])
{
string input = "apple";
string output1 = sha256(input);
cout << "sha256('"<< input << "'):" << output1 << endl;
return 0;
} |
04bf5a899ca29fb9530e95ced9c3064c76fbb22b.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <cmath>
#include <vector>
#include <iostream>
#include "caffe/layers/nlu_layer.hpp"
#include "caffe/util/math_functions.hpp"
using namespace std;
namespace caffe {
template <typename Dtype>
__device__ Dtype Abs(const Dtype x) {
return ((x > Dtype(0)) ? x : (-x));
}
template <typename Dtype>
__device__ Dtype Sign(const Dtype x) {
return ((x > Dtype(0)) ? 1 : (-1));
}
template <typename Dtype>
__device__ Dtype Max(const Dtype x, const Dtype y) {
return ((x > y) ? x : y);
}
template <typename Dtype>
__device__ Dtype Min(const Dtype x, const Dtype y) {
return ((x > y) ? y : x);
}
template <typename Dtype>
__global__ void NLUForward(const int n, const Dtype* in, Dtype ratio, Dtype* out) {
CUDA_KERNEL_LOOP(index, n) {
out[index] = Min( Max(in[index] * ratio + Dtype(0.5), Dtype(0)), Dtype(1));
}
}
template <typename Dtype>
void NLULayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
const Dtype* bottom_data = bottom[0]->gpu_data();
Dtype* top_data = top[0]->mutable_gpu_data();
const int count = bottom[0]->count();
hipLaunchKernelGGL(( NLUForward<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
count, bottom_data, Dtype(ratio_), top_data);
CUDA_POST_KERNEL_CHECK;
}
template <typename Dtype>
__global__ void NLUBackward(const int n, const float alpha, const Dtype* in_diff,
const Dtype* bottom_data, const Dtype* noise, Dtype ratio, Dtype thre, Dtype* out_diff) {
CUDA_KERNEL_LOOP(index, n) {
out_diff[index] = in_diff[index] * ( (Abs(bottom_data[index]) > thre) ? Dtype(0) : ratio)
+ ( (Abs(bottom_data[index]) > thre) ? ( Sign(bottom_data[index])*alpha*(Abs(bottom_data[index]) - thre) ) : Dtype(0))
+ noise[index];
}
}
template <typename Dtype>
void NLULayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down,
const vector<Blob<Dtype>*>& bottom) {
if (propagate_down[0]) {
const Dtype* bottom_data = bottom[0]->gpu_data();
const Dtype* top_diff = top[0]->gpu_diff();
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
const int count = bottom[0]->count();
Dtype* noise_gaussian =
static_cast<Dtype*>(rand_vec_.mutable_gpu_data());
Dtype fSTD = sqrt(eta_/pow(1+t_,gamma_));
caffe_gpu_rng_gaussian(count, Dtype(max_mean_), fSTD, noise_gaussian);
Dtype dThre = 1.0 / (2 * ratio_);
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( NLUBackward<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
count, this->alpha_, top_diff, bottom_data, noise_gaussian, ratio_, dThre, bottom_diff);
CUDA_POST_KERNEL_CHECK;
t_++;
}
}
INSTANTIATE_LAYER_GPU_FUNCS(NLULayer);
} // namespace caffe
| 04bf5a899ca29fb9530e95ced9c3064c76fbb22b.cu | #include <cmath>
#include <vector>
#include <iostream>
#include "caffe/layers/nlu_layer.hpp"
#include "caffe/util/math_functions.hpp"
using namespace std;
namespace caffe {
template <typename Dtype>
__device__ Dtype Abs(const Dtype x) {
return ((x > Dtype(0)) ? x : (-x));
}
template <typename Dtype>
__device__ Dtype Sign(const Dtype x) {
return ((x > Dtype(0)) ? 1 : (-1));
}
template <typename Dtype>
__device__ Dtype Max(const Dtype x, const Dtype y) {
return ((x > y) ? x : y);
}
template <typename Dtype>
__device__ Dtype Min(const Dtype x, const Dtype y) {
return ((x > y) ? y : x);
}
template <typename Dtype>
__global__ void NLUForward(const int n, const Dtype* in, Dtype ratio, Dtype* out) {
CUDA_KERNEL_LOOP(index, n) {
out[index] = Min( Max(in[index] * ratio + Dtype(0.5), Dtype(0)), Dtype(1));
}
}
template <typename Dtype>
void NLULayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
const Dtype* bottom_data = bottom[0]->gpu_data();
Dtype* top_data = top[0]->mutable_gpu_data();
const int count = bottom[0]->count();
NLUForward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>(
count, bottom_data, Dtype(ratio_), top_data);
CUDA_POST_KERNEL_CHECK;
}
template <typename Dtype>
__global__ void NLUBackward(const int n, const float alpha, const Dtype* in_diff,
const Dtype* bottom_data, const Dtype* noise, Dtype ratio, Dtype thre, Dtype* out_diff) {
CUDA_KERNEL_LOOP(index, n) {
out_diff[index] = in_diff[index] * ( (Abs(bottom_data[index]) > thre) ? Dtype(0) : ratio)
+ ( (Abs(bottom_data[index]) > thre) ? ( Sign(bottom_data[index])*alpha*(Abs(bottom_data[index]) - thre) ) : Dtype(0))
+ noise[index];
}
}
template <typename Dtype>
void NLULayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down,
const vector<Blob<Dtype>*>& bottom) {
if (propagate_down[0]) {
const Dtype* bottom_data = bottom[0]->gpu_data();
const Dtype* top_diff = top[0]->gpu_diff();
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
const int count = bottom[0]->count();
Dtype* noise_gaussian =
static_cast<Dtype*>(rand_vec_.mutable_gpu_data());
Dtype fSTD = sqrt(eta_/pow(1+t_,gamma_));
caffe_gpu_rng_gaussian(count, Dtype(max_mean_), fSTD, noise_gaussian);
Dtype dThre = 1.0 / (2 * ratio_);
// NOLINT_NEXT_LINE(whitespace/operators)
NLUBackward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>(
count, this->alpha_, top_diff, bottom_data, noise_gaussian, ratio_, dThre, bottom_diff);
CUDA_POST_KERNEL_CHECK;
t_++;
}
}
INSTANTIATE_LAYER_GPU_FUNCS(NLULayer);
} // namespace caffe
|
3c1f0bcbd0ff852484792f5895528374ebe94ca2.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <cassert>
#include <stdio.h>
#include <cudaTools/diagnostic.h>
#include <cudaTools/performance.h>
// Fixed number of threads per block.
static const int THREADS_PER_BLOCK = 32;
// Simple kernel which adds 1 to every array entry.
__global__ void AddOne( int i_numElements, float* o_array )
{
int globalIndex = threadIdx.x + ( blockIdx.x * blockDim.x );
if ( globalIndex > i_numElements )
{
return;
}
o_array[ globalIndex ] += 1.0f;
}
// Simple kernel which multiplies each every by 5.
__global__ void MultiplyFive( int i_numElements, float* o_array )
{
int globalIndex = threadIdx.x + ( blockIdx.x * blockDim.x );
if ( globalIndex > i_numElements )
{
return;
}
o_array[ globalIndex ] *= 5.0f;
}
int main( int i_argc, char** i_argv )
{
// Allocate cuda stream.
hipStream_t stream;
CUDA_CHECK( hipStreamCreate( &stream ) );
// Allocate host array and fill with "1"s.
int numElements = 128;
float* hostArray = ( float* ) malloc( sizeof( float ) * numElements );
for ( size_t i = 0; i < numElements; ++i )
{
hostArray[ i ] = 1.0f;
}
// Allocate device array.
int numBlocks = ( numElements + THREADS_PER_BLOCK - 1 ) / THREADS_PER_BLOCK;
float* deviceArray = nullptr;
CUDA_CHECK( hipMalloc( &deviceArray, sizeof( float ) * numElements ) );
{
CudaTimer timer;
// Upload asynchronously with stream.
CUDA_CHECK(
hipMemcpyAsync( deviceArray, hostArray, sizeof( float ) * numElements, hipMemcpyHostToDevice, stream ) );
// Execute kernels with stream & synchronize stream.
hipLaunchKernelGGL(( AddOne), dim3(numBlocks), dim3(THREADS_PER_BLOCK), 0, stream , numElements, deviceArray );
hipLaunchKernelGGL(( MultiplyFive), dim3(numBlocks), dim3(THREADS_PER_BLOCK), 0, stream , numElements, deviceArray );
// Download device -> host.
CUDA_CHECK(
hipMemcpyAsync( hostArray, deviceArray, sizeof( float ) * numElements, hipMemcpyDeviceToHost, stream ) );
hipStreamSynchronize( stream );
printf( "Took: %f ms\n", timer.Stop() );
}
for ( size_t i = 0; i < numElements; ++i )
{
assert( hostArray[ i ] == 10.0f );
}
// Deallocate resources.
free( hostArray );
CUDA_CHECK( hipFree( deviceArray ) );
CUDA_CHECK( hipStreamDestroy( stream ) );
}
| 3c1f0bcbd0ff852484792f5895528374ebe94ca2.cu | #include <cassert>
#include <stdio.h>
#include <cudaTools/diagnostic.h>
#include <cudaTools/performance.h>
// Fixed number of threads per block.
static const int THREADS_PER_BLOCK = 32;
// Simple kernel which adds 1 to every array entry.
__global__ void AddOne( int i_numElements, float* o_array )
{
int globalIndex = threadIdx.x + ( blockIdx.x * blockDim.x );
if ( globalIndex > i_numElements )
{
return;
}
o_array[ globalIndex ] += 1.0f;
}
// Simple kernel which multiplies each every by 5.
__global__ void MultiplyFive( int i_numElements, float* o_array )
{
int globalIndex = threadIdx.x + ( blockIdx.x * blockDim.x );
if ( globalIndex > i_numElements )
{
return;
}
o_array[ globalIndex ] *= 5.0f;
}
int main( int i_argc, char** i_argv )
{
// Allocate cuda stream.
cudaStream_t stream;
CUDA_CHECK( cudaStreamCreate( &stream ) );
// Allocate host array and fill with "1"s.
int numElements = 128;
float* hostArray = ( float* ) malloc( sizeof( float ) * numElements );
for ( size_t i = 0; i < numElements; ++i )
{
hostArray[ i ] = 1.0f;
}
// Allocate device array.
int numBlocks = ( numElements + THREADS_PER_BLOCK - 1 ) / THREADS_PER_BLOCK;
float* deviceArray = nullptr;
CUDA_CHECK( cudaMalloc( &deviceArray, sizeof( float ) * numElements ) );
{
CudaTimer timer;
// Upload asynchronously with stream.
CUDA_CHECK(
cudaMemcpyAsync( deviceArray, hostArray, sizeof( float ) * numElements, cudaMemcpyHostToDevice, stream ) );
// Execute kernels with stream & synchronize stream.
AddOne<<< numBlocks, THREADS_PER_BLOCK, 0, stream >>>( numElements, deviceArray );
MultiplyFive<<< numBlocks, THREADS_PER_BLOCK, 0, stream >>>( numElements, deviceArray );
// Download device -> host.
CUDA_CHECK(
cudaMemcpyAsync( hostArray, deviceArray, sizeof( float ) * numElements, cudaMemcpyDeviceToHost, stream ) );
cudaStreamSynchronize( stream );
printf( "Took: %f ms\n", timer.Stop() );
}
for ( size_t i = 0; i < numElements; ++i )
{
assert( hostArray[ i ] == 10.0f );
}
// Deallocate resources.
free( hostArray );
CUDA_CHECK( cudaFree( deviceArray ) );
CUDA_CHECK( cudaStreamDestroy( stream ) );
}
|
9dbb773a51f73ff8baaf469b69f7fd3505e91c68.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include "force.h"
#ifndef BLOCK_SIZE
#define BLOCK_SIZE 8
#endif
__global__ void applyLocalForce(float *initial_field, float *final_field, float *force, float *pos, float r, unsigned int size_x, unsigned int size_y, unsigned int size_z) {
int bx=blockIdx.x, by=blockIdx.y, bz=blockIdx.z;
int tx=threadIdx.x, ty=threadIdx.y, tz=threadIdx.z;
int x=bx*blockDim.x+tx;
int y=by*blockDim.y+ty;
int z=bz*blockDim.z+tz;
if(x<size_x && y<size_y && z<size_z) {
if(sqrt(pow(pos[0] - x*LATTICE_SIZE,2) + pow(pos[1] - y*LATTICE_SIZE,2) + pow(pos[2] - z*LATTICE_SIZE,2)) < r) {
float gaussian = exp(-1*(pow(pos[0]-x*LATTICE_SIZE,2)+pow(pos[1]-y*LATTICE_SIZE,2)+pow(pos[2]-z*LATTICE_SIZE,2))/r);
final_field[((z*size_y+y)*size_x+x)*3+0] = initial_field[((z*size_y+y)*size_x+x)*3+0] + force[0]*DELTA_T*gaussian;
final_field[((z*size_y+y)*size_x+x)*3+1] = initial_field[((z*size_y+y)*size_x+x)*3+1] + force[1]*DELTA_T*gaussian;
final_field[((z*size_y+y)*size_x+x)*3+2] = initial_field[((z*size_y+y)*size_x+x)*3+2] + force[2]*DELTA_T*gaussian;
} else {
final_field[((z*size_y+y)*size_x+x)*3+0] = initial_field[((z*size_y+y)*size_x+x)*3+0];
final_field[((z*size_y+y)*size_x+x)*3+1] = initial_field[((z*size_y+y)*size_x+x)*3+1];
final_field[((z*size_y+y)*size_x+x)*3+2] = initial_field[((z*size_y+y)*size_x+x)*3+2];
}
}
}
__global__ void applyGlobalForce(float *initial_field, float *final_field, float *force, unsigned int n) {
int i = blockDim.x*blockIdx.x+threadIdx.x;
if(i<n) {
final_field[i] = initial_field[i] + force[i%3]*DELTA_T;
}
}
void localForce(float *&vec_field, float *force, float *pos, float r, unsigned int size_x, unsigned int size_y, unsigned int size_z) {
dim3 DimBlock(BLOCK_SIZE,BLOCK_SIZE,BLOCK_SIZE);
dim3 DimGrid((size_x-1)/BLOCK_SIZE+1,(size_y-1)/BLOCK_SIZE+1,(size_z-1)/BLOCK_SIZE+1);
float *v2, *temp;
hipMalloc((void**) &v2, 3*size_x*size_y*size_z*sizeof(float));
hipLaunchKernelGGL(( applyLocalForce), dim3(DimGrid), dim3(DimBlock), 0, 0, vec_field, v2, force, pos, r, size_x, size_y, size_z);
hipLaunchKernelGGL(( applyVelocityBoundary), dim3(DimGrid), dim3(DimBlock), 0, 0, v2, size_x, size_y, size_z);
temp = vec_field;
vec_field = v2;
v2 = temp;
hipFree(v2);
}
void globalForce(float *&vec_field, float *force, unsigned int size_x, unsigned int size_y, unsigned int size_z){
unsigned int n = 3*size_x*size_y*size_z;
dim3 DimBlock(1000,1,1);
dim3 DimGrid((n-1)/1000+1,1,1);
dim3 DimBlock_3D(BLOCK_SIZE,BLOCK_SIZE,BLOCK_SIZE);
dim3 DimGrid_3D((size_x-1)/BLOCK_SIZE+1,(size_y-1)/BLOCK_SIZE+1,(size_z-1)/BLOCK_SIZE+1);
float *v2, *temp;
hipMalloc((void**) &v2,n*sizeof(float));
hipDeviceSynchronize();
hipLaunchKernelGGL(( applyGlobalForce), dim3(DimGrid), dim3(DimBlock), 0, 0, vec_field, v2, force, n);
hipLaunchKernelGGL(( applyVelocityBoundary), dim3(DimGrid_3D), dim3(DimBlock_3D), 0, 0, v2, size_x, size_y, size_z);
temp = vec_field;
vec_field = v2;
v2 = temp;
hipFree(v2);
};
| 9dbb773a51f73ff8baaf469b69f7fd3505e91c68.cu | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include "force.h"
#ifndef BLOCK_SIZE
#define BLOCK_SIZE 8
#endif
__global__ void applyLocalForce(float *initial_field, float *final_field, float *force, float *pos, float r, unsigned int size_x, unsigned int size_y, unsigned int size_z) {
int bx=blockIdx.x, by=blockIdx.y, bz=blockIdx.z;
int tx=threadIdx.x, ty=threadIdx.y, tz=threadIdx.z;
int x=bx*blockDim.x+tx;
int y=by*blockDim.y+ty;
int z=bz*blockDim.z+tz;
if(x<size_x && y<size_y && z<size_z) {
if(sqrt(pow(pos[0] - x*LATTICE_SIZE,2) + pow(pos[1] - y*LATTICE_SIZE,2) + pow(pos[2] - z*LATTICE_SIZE,2)) < r) {
float gaussian = exp(-1*(pow(pos[0]-x*LATTICE_SIZE,2)+pow(pos[1]-y*LATTICE_SIZE,2)+pow(pos[2]-z*LATTICE_SIZE,2))/r);
final_field[((z*size_y+y)*size_x+x)*3+0] = initial_field[((z*size_y+y)*size_x+x)*3+0] + force[0]*DELTA_T*gaussian;
final_field[((z*size_y+y)*size_x+x)*3+1] = initial_field[((z*size_y+y)*size_x+x)*3+1] + force[1]*DELTA_T*gaussian;
final_field[((z*size_y+y)*size_x+x)*3+2] = initial_field[((z*size_y+y)*size_x+x)*3+2] + force[2]*DELTA_T*gaussian;
} else {
final_field[((z*size_y+y)*size_x+x)*3+0] = initial_field[((z*size_y+y)*size_x+x)*3+0];
final_field[((z*size_y+y)*size_x+x)*3+1] = initial_field[((z*size_y+y)*size_x+x)*3+1];
final_field[((z*size_y+y)*size_x+x)*3+2] = initial_field[((z*size_y+y)*size_x+x)*3+2];
}
}
}
__global__ void applyGlobalForce(float *initial_field, float *final_field, float *force, unsigned int n) {
int i = blockDim.x*blockIdx.x+threadIdx.x;
if(i<n) {
final_field[i] = initial_field[i] + force[i%3]*DELTA_T;
}
}
void localForce(float *&vec_field, float *force, float *pos, float r, unsigned int size_x, unsigned int size_y, unsigned int size_z) {
dim3 DimBlock(BLOCK_SIZE,BLOCK_SIZE,BLOCK_SIZE);
dim3 DimGrid((size_x-1)/BLOCK_SIZE+1,(size_y-1)/BLOCK_SIZE+1,(size_z-1)/BLOCK_SIZE+1);
float *v2, *temp;
cudaMalloc((void**) &v2, 3*size_x*size_y*size_z*sizeof(float));
applyLocalForce<<<DimGrid, DimBlock>>>(vec_field, v2, force, pos, r, size_x, size_y, size_z);
applyVelocityBoundary<<<DimGrid, DimBlock>>>(v2, size_x, size_y, size_z);
temp = vec_field;
vec_field = v2;
v2 = temp;
cudaFree(v2);
}
void globalForce(float *&vec_field, float *force, unsigned int size_x, unsigned int size_y, unsigned int size_z){
unsigned int n = 3*size_x*size_y*size_z;
dim3 DimBlock(1000,1,1);
dim3 DimGrid((n-1)/1000+1,1,1);
dim3 DimBlock_3D(BLOCK_SIZE,BLOCK_SIZE,BLOCK_SIZE);
dim3 DimGrid_3D((size_x-1)/BLOCK_SIZE+1,(size_y-1)/BLOCK_SIZE+1,(size_z-1)/BLOCK_SIZE+1);
float *v2, *temp;
cudaMalloc((void**) &v2,n*sizeof(float));
cudaDeviceSynchronize();
applyGlobalForce<<<DimGrid, DimBlock>>>(vec_field, v2, force, n);
applyVelocityBoundary<<<DimGrid_3D, DimBlock_3D>>>(v2, size_x, size_y, size_z);
temp = vec_field;
vec_field = v2;
v2 = temp;
cudaFree(v2);
};
|
31b9b0615030fc3acac08a5188ee80860811e64e.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "KNN_CUDA.cuh"
__global__ void cu_insertion_sort(float *dist, int *ind, int dataset_size, int query_size, int k) {
// Variables
int l, i, j;
float *p_dist;
int *p_ind;
float curr_dist, max_dist;
int curr_row, max_row;
unsigned int xIndex = blockIdx.x * blockDim.x + threadIdx.x;
if (xIndex < query_size) {
// Pointer shift, initialization, and max value
p_dist = dist + xIndex * dataset_size;
p_ind = ind + xIndex * dataset_size;
max_dist = p_dist[0];
p_ind[0] = 1;
// Part 1 : sort kth firt elementZ
for (l = 1; l < k; l++) {
curr_row = l;
curr_dist = p_dist[curr_row];
if (curr_dist < max_dist) {
i = l - 1;
for (int a = 0; a < l - 1; a++) {
if (p_dist[a] > curr_dist) {
i = a;
break;
}
}
for (j = l; j > i; j--) {
p_dist[j] = p_dist[j - 1];
p_ind[j] = p_ind[j - 1];
}
p_dist[i] = curr_dist;
p_ind[i] = l + 1;
}
else {
p_ind[l] = l + 1;
}
max_dist = p_dist[curr_row];
}
// Part 2 : insert element in the k-th first lines
max_row = k - 1;
for (l = k; l < dataset_size; l++) {
curr_dist = p_dist[l];
if (curr_dist < max_dist) {
i = k - 1;
for (int a = 0; a < k - 1; a++) {
if (p_dist[a] > curr_dist) {
i = a;
break;
}
}
for (j = k - 1; j > i; j--) {
p_dist[j] = p_dist[(j - 1)];
p_ind[j] = p_ind[(j - 1)];
}
p_dist[i] = curr_dist;
p_ind[i] = l;
max_dist = p_dist[max_row];
}
}
}
}
void insertion_sort_cuda(float* result, int* index, int dataset_size, int query_size) {
dim3 dim_grid_sort(1);
dim3 dim_block_sort(query_size);
cu_insertion_sort << < dim_grid_sort, dim_block_sort >> > (result, index, dataset_size, query_size, 50);
hipDeviceSynchronize();
}
void print_result(thrust::host_vector<int> device_result_index, vector<int> groundtruth_dataset, int dataset_size, int query_size, int k, bool p) {
thrust::host_vector<int> host_result_index = device_result_index;
if (p) {
for (int i = 0; i < query_size; i++) {
cout << "###################### QUERY" << i << " ###################### " << endl;
for (int j = 0; j < k; j++) {
cout << " MY INDEX : " << host_result_index[j + i * dataset_size] << endl;
cout << "GROUNDTRUTH: " << groundtruth_dataset[j + i * 100] << endl << endl;
}
cout << endl << endl;
}
}
}
| 31b9b0615030fc3acac08a5188ee80860811e64e.cu | #include "KNN_CUDA.cuh"
__global__ void cu_insertion_sort(float *dist, int *ind, int dataset_size, int query_size, int k) {
// Variables
int l, i, j;
float *p_dist;
int *p_ind;
float curr_dist, max_dist;
int curr_row, max_row;
unsigned int xIndex = blockIdx.x * blockDim.x + threadIdx.x;
if (xIndex < query_size) {
// Pointer shift, initialization, and max value
p_dist = dist + xIndex * dataset_size;
p_ind = ind + xIndex * dataset_size;
max_dist = p_dist[0];
p_ind[0] = 1;
// Part 1 : sort kth firt elementZ
for (l = 1; l < k; l++) {
curr_row = l;
curr_dist = p_dist[curr_row];
if (curr_dist < max_dist) {
i = l - 1;
for (int a = 0; a < l - 1; a++) {
if (p_dist[a] > curr_dist) {
i = a;
break;
}
}
for (j = l; j > i; j--) {
p_dist[j] = p_dist[j - 1];
p_ind[j] = p_ind[j - 1];
}
p_dist[i] = curr_dist;
p_ind[i] = l + 1;
}
else {
p_ind[l] = l + 1;
}
max_dist = p_dist[curr_row];
}
// Part 2 : insert element in the k-th first lines
max_row = k - 1;
for (l = k; l < dataset_size; l++) {
curr_dist = p_dist[l];
if (curr_dist < max_dist) {
i = k - 1;
for (int a = 0; a < k - 1; a++) {
if (p_dist[a] > curr_dist) {
i = a;
break;
}
}
for (j = k - 1; j > i; j--) {
p_dist[j] = p_dist[(j - 1)];
p_ind[j] = p_ind[(j - 1)];
}
p_dist[i] = curr_dist;
p_ind[i] = l;
max_dist = p_dist[max_row];
}
}
}
}
void insertion_sort_cuda(float* result, int* index, int dataset_size, int query_size) {
dim3 dim_grid_sort(1);
dim3 dim_block_sort(query_size);
cu_insertion_sort << < dim_grid_sort, dim_block_sort >> > (result, index, dataset_size, query_size, 50);
cudaDeviceSynchronize();
}
void print_result(thrust::host_vector<int> device_result_index, vector<int> groundtruth_dataset, int dataset_size, int query_size, int k, bool p) {
thrust::host_vector<int> host_result_index = device_result_index;
if (p) {
for (int i = 0; i < query_size; i++) {
cout << "###################### QUERY" << i << " ###################### " << endl;
for (int j = 0; j < k; j++) {
cout << " MY INDEX : " << host_result_index[j + i * dataset_size] << endl;
cout << "GROUNDTRUTH: " << groundtruth_dataset[j + i * 100] << endl << endl;
}
cout << endl << endl;
}
}
}
|
coalesced_put_message_size.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
*
* NVIDIA CORPORATION and its licensors retain all intellectual property
* and proprietary rights in and to this software, related documentation
* and any modifications thereto. Any use, reproduction, disclosure or
* distribution of this software and related documentation without an express
* license agreement from NVIDIA CORPORATION is strictly prohibited.
*
* See COPYRIGHT.txt for license information
*/
#include <stdio.h>
#include <iostream>
#include "mpi.h"
#include "nvshmem.h"
#include "nvshmemx.h"
#include "time.cuh"
#undef CUDA_CHECK
#define CUDA_CHECK(stmt) \
do { \
hipError_t result = (stmt); \
if (hipSuccess != result) { \
fprintf(stderr, "[%s:%d] cuda failed with %s \n",\
__FILE__, __LINE__, hipGetErrorString(result));\
exit(-1); \
} \
} while (0)
#define MPI_CHECK(stmt) \
do { \
int result = (stmt); \
if (MPI_SUCCESS != result) { \
fprintf(stderr, "[%s:%d] MPI failed with error %d \n",\
__FILE__, __LINE__, result); \
exit(-1); \
} \
} while (0)
#define TID (threadIdx.x+blockIdx.x*blockDim.x)
#define WARPID ((threadIdx.x+blockIdx.x*blockDim.x)>>5)
__global__ void long_band_block(int num_messages, int message_size, long long *remote_buffer, long long *local_buffer, int remote_pe)
{
for(int i = blockIdx.x; i<num_messages; i+=gridDim.x)
nvshmemx_longlong_put_block(remote_buffer+message_size*i, local_buffer+message_size*i, message_size, remote_pe);
}
__global__ void long_band_warp(int num_messages, int message_size, long long *remote_buffer, long long *local_buffer, int remote_pe)
{
for(int i = (TID>>5); i<num_messages; i+=((blockDim.x*gridDim.x)>>5))
nvshmemx_longlong_put_warp(remote_buffer+message_size*i, local_buffer+message_size*i, message_size, remote_pe);
}
__global__ void long_band_thread(int num_messages, int message_size, long long *remote_buffer, long long *local_buffer, int remote_pe)
{
for(int i = TID; i<num_messages; i+=blockDim.x*gridDim.x)
nvshmem_longlong_put(remote_buffer+message_size*i, local_buffer+message_size*i, message_size, remote_pe);
}
__global__ void char_band_block(int num_messages, int message_size, char *remote_buffer, char *local_buffer, int remote_pe)
{
for(int i = blockIdx.x; i<num_messages; i+=gridDim.x)
nvshmemx_putmem_block(remote_buffer+message_size*i, local_buffer+message_size*i, message_size, remote_pe);
}
__global__ void char_band_warp(int num_messages, int message_size, char *remote_buffer, char *local_buffer, int remote_pe)
{
for(int i = (TID >> 5); i<num_messages; i+=((blockDim.x*gridDim.x)>>5))
nvshmemx_putmem_warp(remote_buffer+message_size*i, local_buffer+message_size*i, message_size, remote_pe);
}
__global__ void char_band_thread(int num_messages, int message_size, char *remote_buffer, char *local_buffer, int remote_pe)
{
for(int i = TID; i<num_messages; i+=blockDim.x*gridDim.x)
nvshmem_putmem((void *)(remote_buffer+message_size*i), (void *)(local_buffer+message_size*i), message_size, remote_pe);
}
int main (int c, char *v[])
{
int rank, nranks;
MPI_Comm mpi_comm;
nvshmemx_init_attr_t attr;
int mype, npes;
MPI_CHECK(MPI_Init(&c, &v));
MPI_CHECK(MPI_Comm_rank(MPI_COMM_WORLD, &rank));
MPI_CHECK(MPI_Comm_size(MPI_COMM_WORLD, &nranks));
mpi_comm = MPI_COMM_WORLD;
attr.mpi_comm = &mpi_comm;
nvshmemx_init_attr (NVSHMEMX_INIT_WITH_MPI_COMM, &attr);
mype = nvshmem_my_pe();
npes = nvshmem_n_pes();
//application picks the device each PE will use
int deviceCount = 0;
hipGetDeviceCount(&deviceCount);
printf("[%d] has %d GPUs, setDevice on GPU %d\n", mype, deviceCount, mype%deviceCount);
CUDA_CHECK(hipSetDevice(mype%deviceCount));
int bytes = 1<<30;
char * remote_buffer = (char *)nvshmem_malloc(sizeof(char)*bytes);
char * local_buffer;
local_buffer = (char *)nvshmem_malloc(sizeof(char)*bytes);
GpuTimer timer;
float totaltime = 0.0;
int message_bytes = 1024*1024;
int num_messages = bytes/message_bytes;
hipStream_t *streams;
streams = (hipStream_t *)malloc(sizeof(hipStream_t)*(npes-1));
for(int i=0; i<npes-1; i++)
hipStreamCreateWithFlags(streams+i, hipStreamNonBlocking);
int numBlock = 160;
int numThread = 1024;
int num_rounds = 20;
CUDA_CHECK(hipOccupancyMaxPotentialBlockSize(&numBlock, &numThread, (void *)long_band_block));
nvshmem_barrier_all();
for(int size_power = 0; size_power <=10; size_power++)
{
totaltime = 0.0;
message_bytes = (1<<size_power)*1024;
std::cout << mype << " send "<< num_messages << " of messages to "<< npes-1 << " GPUs with message size(bytes) "<< message_bytes << " using nvshmem_longlong_put using threads: "<< numBlock << "x"<< numThread << std::endl;
nvshmem_barrier_all();
for(int round = 0; round < num_rounds; round++)
{
int remote_pe = (mype+1)%npes;
timer.Start();
for(int j=0; j<npes-1; j++)
{
hipLaunchKernelGGL(( long_band_block), dim3(numBlock), dim3(numThread), 0, streams[j], num_messages, message_bytes/sizeof(long long), (long long *)remote_buffer, (long long *)local_buffer, remote_pe);
remote_pe = (remote_pe+1) % npes;
}
hipDeviceSynchronize();
timer.Stop();
totaltime = totaltime + timer.ElapsedMillis();
}
nvshmem_barrier_all();
totaltime = totaltime/num_rounds;
std::cout << "PE "<<mype << " average time: " << totaltime << " bandwidth: "<<(num_messages*message_bytes*(npes-1)/(totaltime/1000)/(1024*1024*1024))<<" GB/s"<<std::endl;
nvshmem_barrier_all();
}
nvshmem_barrier_all();
if(mype == 0)
std::cout << "-----------------------------------------\n";
nvshmem_barrier_all();
for(int size_power = 0; size_power <=10; size_power++)
{
totaltime = 0.0;
message_bytes = (1<<size_power)*1024;
std::cout << mype << " send "<< num_messages << " of messages to "<< npes-1 << " GPUs with message size(bytes) "<< message_bytes << " using nvshmem_putmem_thread using threads: "<< numBlock << "x"<< numThread << std::endl;
nvshmem_barrier_all();
for(int round = 0; round < num_rounds; round++)
{
int remote_pe = (mype+1)%npes;
timer.Start();
for(int j=0; j<npes-1; j++)
{
hipLaunchKernelGGL(( char_band_thread), dim3(numBlock), dim3(numThread), 0, streams[j], num_messages, message_bytes/sizeof(char), (char *)remote_buffer, (char *)local_buffer, remote_pe);
remote_pe = (remote_pe+1) % npes;
}
hipDeviceSynchronize();
timer.Stop();
totaltime = totaltime + timer.ElapsedMillis();
}
nvshmem_barrier_all();
totaltime = totaltime/num_rounds;
std::cout << "PE "<<mype << " average time: " << totaltime << " bandwidth: "<<(num_messages*message_bytes*(npes-1)/(totaltime/1000)/(1024*1024*1024))<<" GB/s"<<std::endl;
nvshmem_barrier_all();
}
nvshmem_barrier_all();
if(mype == 0)
std::cout << "-----------------------------------------\n";
nvshmem_barrier_all();
printf("[%d of %d] run complete \n", mype, npes);
nvshmem_free(remote_buffer);
nvshmem_free(local_buffer);
nvshmem_finalize();
MPI_CHECK(MPI_Finalize());
return 0;
}
| coalesced_put_message_size.cu | /*
* Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
*
* NVIDIA CORPORATION and its licensors retain all intellectual property
* and proprietary rights in and to this software, related documentation
* and any modifications thereto. Any use, reproduction, disclosure or
* distribution of this software and related documentation without an express
* license agreement from NVIDIA CORPORATION is strictly prohibited.
*
* See COPYRIGHT.txt for license information
*/
#include <stdio.h>
#include <iostream>
#include "mpi.h"
#include "nvshmem.h"
#include "nvshmemx.h"
#include "time.cuh"
#undef CUDA_CHECK
#define CUDA_CHECK(stmt) \
do { \
cudaError_t result = (stmt); \
if (cudaSuccess != result) { \
fprintf(stderr, "[%s:%d] cuda failed with %s \n",\
__FILE__, __LINE__, cudaGetErrorString(result));\
exit(-1); \
} \
} while (0)
#define MPI_CHECK(stmt) \
do { \
int result = (stmt); \
if (MPI_SUCCESS != result) { \
fprintf(stderr, "[%s:%d] MPI failed with error %d \n",\
__FILE__, __LINE__, result); \
exit(-1); \
} \
} while (0)
#define TID (threadIdx.x+blockIdx.x*blockDim.x)
#define WARPID ((threadIdx.x+blockIdx.x*blockDim.x)>>5)
__global__ void long_band_block(int num_messages, int message_size, long long *remote_buffer, long long *local_buffer, int remote_pe)
{
for(int i = blockIdx.x; i<num_messages; i+=gridDim.x)
nvshmemx_longlong_put_block(remote_buffer+message_size*i, local_buffer+message_size*i, message_size, remote_pe);
}
__global__ void long_band_warp(int num_messages, int message_size, long long *remote_buffer, long long *local_buffer, int remote_pe)
{
for(int i = (TID>>5); i<num_messages; i+=((blockDim.x*gridDim.x)>>5))
nvshmemx_longlong_put_warp(remote_buffer+message_size*i, local_buffer+message_size*i, message_size, remote_pe);
}
__global__ void long_band_thread(int num_messages, int message_size, long long *remote_buffer, long long *local_buffer, int remote_pe)
{
for(int i = TID; i<num_messages; i+=blockDim.x*gridDim.x)
nvshmem_longlong_put(remote_buffer+message_size*i, local_buffer+message_size*i, message_size, remote_pe);
}
__global__ void char_band_block(int num_messages, int message_size, char *remote_buffer, char *local_buffer, int remote_pe)
{
for(int i = blockIdx.x; i<num_messages; i+=gridDim.x)
nvshmemx_putmem_block(remote_buffer+message_size*i, local_buffer+message_size*i, message_size, remote_pe);
}
__global__ void char_band_warp(int num_messages, int message_size, char *remote_buffer, char *local_buffer, int remote_pe)
{
for(int i = (TID >> 5); i<num_messages; i+=((blockDim.x*gridDim.x)>>5))
nvshmemx_putmem_warp(remote_buffer+message_size*i, local_buffer+message_size*i, message_size, remote_pe);
}
__global__ void char_band_thread(int num_messages, int message_size, char *remote_buffer, char *local_buffer, int remote_pe)
{
for(int i = TID; i<num_messages; i+=blockDim.x*gridDim.x)
nvshmem_putmem((void *)(remote_buffer+message_size*i), (void *)(local_buffer+message_size*i), message_size, remote_pe);
}
int main (int c, char *v[])
{
int rank, nranks;
MPI_Comm mpi_comm;
nvshmemx_init_attr_t attr;
int mype, npes;
MPI_CHECK(MPI_Init(&c, &v));
MPI_CHECK(MPI_Comm_rank(MPI_COMM_WORLD, &rank));
MPI_CHECK(MPI_Comm_size(MPI_COMM_WORLD, &nranks));
mpi_comm = MPI_COMM_WORLD;
attr.mpi_comm = &mpi_comm;
nvshmemx_init_attr (NVSHMEMX_INIT_WITH_MPI_COMM, &attr);
mype = nvshmem_my_pe();
npes = nvshmem_n_pes();
//application picks the device each PE will use
int deviceCount = 0;
cudaGetDeviceCount(&deviceCount);
printf("[%d] has %d GPUs, setDevice on GPU %d\n", mype, deviceCount, mype%deviceCount);
CUDA_CHECK(cudaSetDevice(mype%deviceCount));
int bytes = 1<<30;
char * remote_buffer = (char *)nvshmem_malloc(sizeof(char)*bytes);
char * local_buffer;
local_buffer = (char *)nvshmem_malloc(sizeof(char)*bytes);
GpuTimer timer;
float totaltime = 0.0;
int message_bytes = 1024*1024;
int num_messages = bytes/message_bytes;
cudaStream_t *streams;
streams = (cudaStream_t *)malloc(sizeof(cudaStream_t)*(npes-1));
for(int i=0; i<npes-1; i++)
cudaStreamCreateWithFlags(streams+i, cudaStreamNonBlocking);
int numBlock = 160;
int numThread = 1024;
int num_rounds = 20;
CUDA_CHECK(cudaOccupancyMaxPotentialBlockSize(&numBlock, &numThread, (void *)long_band_block));
nvshmem_barrier_all();
for(int size_power = 0; size_power <=10; size_power++)
{
totaltime = 0.0;
message_bytes = (1<<size_power)*1024;
std::cout << mype << " send "<< num_messages << " of messages to "<< npes-1 << " GPUs with message size(bytes) "<< message_bytes << " using nvshmem_longlong_put using threads: "<< numBlock << "x"<< numThread << std::endl;
nvshmem_barrier_all();
for(int round = 0; round < num_rounds; round++)
{
int remote_pe = (mype+1)%npes;
timer.Start();
for(int j=0; j<npes-1; j++)
{
long_band_block<<<numBlock, numThread, 0, streams[j]>>>(num_messages, message_bytes/sizeof(long long), (long long *)remote_buffer, (long long *)local_buffer, remote_pe);
remote_pe = (remote_pe+1) % npes;
}
cudaDeviceSynchronize();
timer.Stop();
totaltime = totaltime + timer.ElapsedMillis();
}
nvshmem_barrier_all();
totaltime = totaltime/num_rounds;
std::cout << "PE "<<mype << " average time: " << totaltime << " bandwidth: "<<(num_messages*message_bytes*(npes-1)/(totaltime/1000)/(1024*1024*1024))<<" GB/s"<<std::endl;
nvshmem_barrier_all();
}
nvshmem_barrier_all();
if(mype == 0)
std::cout << "-----------------------------------------\n";
nvshmem_barrier_all();
for(int size_power = 0; size_power <=10; size_power++)
{
totaltime = 0.0;
message_bytes = (1<<size_power)*1024;
std::cout << mype << " send "<< num_messages << " of messages to "<< npes-1 << " GPUs with message size(bytes) "<< message_bytes << " using nvshmem_putmem_thread using threads: "<< numBlock << "x"<< numThread << std::endl;
nvshmem_barrier_all();
for(int round = 0; round < num_rounds; round++)
{
int remote_pe = (mype+1)%npes;
timer.Start();
for(int j=0; j<npes-1; j++)
{
char_band_thread<<<numBlock, numThread, 0, streams[j]>>>(num_messages, message_bytes/sizeof(char), (char *)remote_buffer, (char *)local_buffer, remote_pe);
remote_pe = (remote_pe+1) % npes;
}
cudaDeviceSynchronize();
timer.Stop();
totaltime = totaltime + timer.ElapsedMillis();
}
nvshmem_barrier_all();
totaltime = totaltime/num_rounds;
std::cout << "PE "<<mype << " average time: " << totaltime << " bandwidth: "<<(num_messages*message_bytes*(npes-1)/(totaltime/1000)/(1024*1024*1024))<<" GB/s"<<std::endl;
nvshmem_barrier_all();
}
nvshmem_barrier_all();
if(mype == 0)
std::cout << "-----------------------------------------\n";
nvshmem_barrier_all();
printf("[%d of %d] run complete \n", mype, npes);
nvshmem_free(remote_buffer);
nvshmem_free(local_buffer);
nvshmem_finalize();
MPI_CHECK(MPI_Finalize());
return 0;
}
|
0e7e25288dcf0f45382dd799e6ac0a639d094938.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright 2021, Oak Ridge National Laboratory.
* MGARD-GPU: MultiGrid Adaptive Reduction of Data Accelerated by GPUs
* Author: Jieyang Chen ([email protected])
* Date: September 27, 2021
*/
// #include "compressors.hpp"
#include "cuda/Common.h"
#include "cuda/CommonInternal.h"
#include "cuda/LosslessCompression.h"
#include "cuda/ParallelHuffman/huffman_workflow.cuh"
#include <typeinfo>
#include <zstd.h>
namespace mgard {
void huffman_encoding(long int *quantized_data, const std::size_t n,
unsigned char **out_data_hit, size_t *out_data_hit_size,
unsigned char **out_data_miss, size_t *out_data_miss_size,
unsigned char **out_tree, size_t *out_tree_size);
void huffman_decoding(long int *quantized_data,
const std::size_t quantized_data_size,
unsigned char *out_data_hit, size_t out_data_hit_size,
unsigned char *out_data_miss, size_t out_data_miss_size,
unsigned char *out_tree, size_t out_tree_size);
} // namespace mgard
namespace mgard_cuda {
/*! CHECK
* Check that the condition holds. If it doesn't print a message and die.
*/
#define CHECK(cond, ...) \
do { \
if (!(cond)) { \
fprintf(stderr, "%s:%d CHECK(%s) failed: ", __FILE__, __LINE__, #cond); \
fprintf(stderr, "" __VA_ARGS__); \
fprintf(stderr, "\n"); \
exit(1); \
} \
} while (0)
/*! CHECK_ZSTD
* Check the zstd error code and die if an error occurred after printing a
* message.
*/
/*! CHECK_ZSTD
* Check the zstd error code and die if an error occurred after printing a
* message.
*/
#define CHECK_ZSTD(fn, ...) \
do { \
size_t const err = (fn); \
CHECK(!ZSTD_isError(err), "%s", ZSTD_getErrorName(err)); \
} while (0)
unsigned char *compress_memory_huffman(long int *const src,
const std::size_t srcLen,
std::size_t outsize) {
unsigned char *out_data_hit = 0;
size_t out_data_hit_size;
unsigned char *out_data_miss = 0;
size_t out_data_miss_size;
unsigned char *out_tree = 0;
size_t out_tree_size;
mgard::huffman_encoding(src, srcLen, &out_data_hit, &out_data_hit_size,
&out_data_miss, &out_data_miss_size, &out_tree,
&out_tree_size);
const size_t total_size =
out_data_hit_size / 8 + 4 + out_data_miss_size + out_tree_size;
unsigned char *payload = (unsigned char *)malloc(total_size);
unsigned char *bufp = payload;
if (out_tree_size) {
std::memcpy(bufp, out_tree, out_tree_size);
bufp += out_tree_size;
}
std::memcpy(bufp, out_data_hit, out_data_hit_size / 8 + 4);
bufp += out_data_hit_size / 8 + 4;
if (out_data_miss_size) {
std::memcpy(bufp, out_data_miss, out_data_miss_size);
bufp += out_data_miss_size;
}
free(out_tree);
free(out_data_hit);
free(out_data_miss);
// const MemoryBuffer<unsigned char> out_data =
// compress_memory_zstd(payload, total_size);
const size_t cBuffSize = ZSTD_compressBound(total_size);
unsigned char *const zstd_buffer = new unsigned char[cBuffSize];
const std::size_t cSize =
ZSTD_compress(zstd_buffer, cBuffSize, payload, total_size, 1);
CHECK_ZSTD(cSize);
// return MemoryBuffer<unsigned char>(buffer, cSize);
free(payload);
payload = 0;
const std::size_t bufferLen = 3 * sizeof(size_t) + cSize;
unsigned char *const buffer = new unsigned char[bufferLen];
outsize = bufferLen;
bufp = buffer;
*(size_t *)bufp = out_tree_size;
bufp += sizeof(size_t);
*(size_t *)bufp = out_data_hit_size;
bufp += sizeof(size_t);
*(size_t *)bufp = out_data_miss_size;
bufp += sizeof(size_t);
{
unsigned char const *const p = zstd_buffer;
std::copy(p, p + cSize, bufp);
}
// return MemoryBuffer<unsigned char>(buffer, bufferLen);
return buffer;
}
void decompress_memory_huffman(unsigned char *const src,
const std::size_t srcLen, long int *const dst,
const std::size_t dstLen) {
unsigned char *out_data_hit = 0;
size_t out_data_hit_size;
unsigned char *out_data_miss = 0;
size_t out_data_miss_size;
unsigned char *out_tree = 0;
size_t out_tree_size;
unsigned char *buf = src;
out_tree_size = *(size_t *)buf;
buf += sizeof(size_t);
out_data_hit_size = *(size_t *)buf;
buf += sizeof(size_t);
out_data_miss_size = *(size_t *)buf;
buf += sizeof(size_t);
size_t total_huffman_size =
out_tree_size + out_data_hit_size / 8 + 4 + out_data_miss_size;
unsigned char *huffman_encoding_p =
(unsigned char *)malloc(total_huffman_size);
// decompress_memory_zstd(buf, srcLen - 3 * sizeof(size_t),
// huffman_encoding_p,
// total_huffman_size);
size_t const dSize = ZSTD_decompress(huffman_encoding_p, total_huffman_size,
buf, srcLen - 3 * sizeof(size_t));
CHECK_ZSTD(dSize);
/* When zstd knows the content size, it will error if it doesn't match. */
CHECK(dstLen == dSize, "Impossible because zstd will check this condition!");
out_tree = huffman_encoding_p;
out_data_hit = huffman_encoding_p + out_tree_size;
out_data_miss =
huffman_encoding_p + out_tree_size + out_data_hit_size / 8 + 4;
mgard::huffman_decoding(dst, dstLen, out_data_hit, out_data_hit_size,
out_data_miss, out_data_miss_size, out_tree,
out_tree_size);
free(huffman_encoding_p);
}
template <uint32_t D, typename T, typename C>
void cascaded_compress(Handle<D, T> &handle, C *input_data, size_t input_count,
void *&output_data, size_t &output_size, int n_rle,
int n_de, bool bitpack, int queue_idx) {
// nvcomp::CascadedCompressor compressor(nvcomp::TypeOf<C>(), n_rle, n_de,
// bitpack);
nvcompBatchedCascadedOpts_t options = nvcompBatchedCascadedDefaultOpts;
options.type = nvcomp::TypeOf<C>();
options.num_RLEs = n_rle;
options.num_deltas = n_de;
options.use_bp = bitpack;
nvcomp::CascadedManager nvcomp_manager{
options, *(hipStream_t *)handle.get(queue_idx)};
// size_t *temp_bytes;
// cudaMallocHostHelper((void **)&temp_bytes, sizeof(size_t));
// size_t *output_bytes;
// cudaMallocHostHelper((void **)&output_bytes, sizeof(size_t));
// compressor.configure(input_count * sizeof(C), temp_bytes, output_bytes);
auto comp_config =
nvcomp_manager.configure_compression(input_count * sizeof(C));
// void *temp_space;
// cudaMallocHelper(handle, &temp_space, *temp_bytes);
// cudaMallocHelper(handle, &output_data, *output_bytes);
hipMalloc(&output_data, comp_config.max_compressed_buffer_size);
// compressor.compress_async(input_data, input_count * sizeof(C), temp_space,
// *temp_bytes, output_data, output_bytes,
// *(hipStream_t *)handle.get(queue_idx));
uint8_t *output_data_uint8_t = (uint8_t *)output_data;
nvcomp_manager.compress((uint8_t *)input_data, output_data_uint8_t,
comp_config);
output_size = nvcomp_manager.get_compressed_output_size(output_data_uint8_t);
handle.sync(queue_idx);
// cudaFreeHelper(temp_space);
// cudaFreeHostHelper(temp_bytes);
// cudaFreeHostHelper(output_bytes);
}
template <uint32_t D, typename T, typename C>
void cascaded_decompress(Handle<D, T> &handle, void *input_data,
size_t input_size, C *&output_data, int queue_idx) {
// nvcomp::Decompressor<C> decompressor(input_data, input_size,
// *(hipStream_t
// *)handle.get(queue_idx));
// nvcomp::CascadedDecompressor decompressor;
auto decomp_nvcomp_manager = nvcomp::create_manager(
(uint8_t *)input_data, *(hipStream_t *)handle.get(queue_idx));
// size_t *temp_bytes;
// cudaMallocHostHelper((void **)&temp_bytes, sizeof(size_t));
// size_t *output_bytes;
// cudaMallocHostHelper((void **)&output_bytes, sizeof(size_t));
// decompressor.configure(input_data, input_size, temp_bytes, output_bytes,
// *(hipStream_t *)handle.get(queue_idx));
nvcomp::DecompressionConfig decomp_config =
decomp_nvcomp_manager->configure_decompression((uint8_t *)input_data);
// void *temp_space;
// cudaMallocHelper(handle, (void **)&temp_space, *temp_bytes);
// cudaMallocHelper(handle, (void **)&output_data, *output_bytes);
hipMalloc(&output_data, decomp_config.decomp_data_size);
uint8_t *output_data_uint8_t = (uint8_t *)output_data;
decomp_nvcomp_manager->decompress(output_data_uint8_t, (uint8_t *)input_data,
decomp_config);
// output_size = decomp_config.decomp_data_size;
handle.sync(queue_idx);
// decompressor.decompress_async(input_data, input_size, temp_space,
// *temp_bytes,
// output_data, *output_bytes,
// *(hipStream_t *)handle.get(queue_idx));
// handle.sync(queue_idx);
// cudaFreeHelper(temp_space);
// cudaFreeHostHelper(temp_bytes);
// cudaFreeHostHelper(output_bytes);
}
template <uint32_t D, typename T, typename C>
void lz4_compress(Handle<D, T> &handle, C *input_data, size_t input_count,
void *&output_data, size_t &output_size, size_t chunk_size,
int queue_idx) {
nvcompType_t dtype = NVCOMP_TYPE_UCHAR;
// nvcomp::LZ4Compressor compressor(chunk_size, dtype);
nvcomp::LZ4Manager nvcomp_manager{chunk_size, dtype,
*(hipStream_t *)handle.get(queue_idx)};
// size_t *temp_bytes;
// cudaMallocHostHelper((void **)&temp_bytes, sizeof(size_t));
// size_t *output_bytes;
// cudaMallocHostHelper((void **)&output_bytes, sizeof(size_t));
// compressor.configure(input_count * sizeof(C), temp_bytes, output_bytes);
nvcomp::CompressionConfig comp_config =
nvcomp_manager.configure_compression(input_count * sizeof(C));
// void *temp_space;
// cudaMallocHelper(handle, &temp_space, *temp_bytes);
cudaMallocHelper(handle, &output_data,
comp_config.max_compressed_buffer_size);
// compressor.compress_async(input_data, input_count * sizeof(C), temp_space,
// *temp_bytes, output_data, output_bytes,
// *(hipStream_t *)handle.get(queue_idx));
uint8_t *output_data_uint8_t = (uint8_t *)output_data;
nvcomp_manager.compress((uint8_t *)input_data, output_data_uint8_t,
comp_config);
output_size = nvcomp_manager.get_compressed_output_size(output_data_uint8_t);
handle.sync(queue_idx);
// output_size = *output_bytes;
// cudaFreeHelper(temp_space);
// cudaFreeHostHelper(temp_bytes);
// cudaFreeHostHelper(output_bytes);
}
template <uint32_t D, typename T, typename C>
void lz4_decompress(Handle<D, T> &handle, void *input_data, size_t input_size,
C *&output_data, size_t &output_size, int queue_idx) {
auto decomp_nvcomp_manager = nvcomp::create_manager(
(uint8_t *)input_data, *(hipStream_t *)handle.get(queue_idx));
// size_t *temp_bytes;
// cudaMallocHostHelper((void **)&temp_bytes, sizeof(size_t));
// size_t *output_bytes;
// cudaMallocHostHelper((void **)&output_bytes, sizeof(size_t));
// decompressor.configure(input_data, input_size, temp_bytes, output_bytes,
// *(hipStream_t *)handle.get(queue_idx));
nvcomp::DecompressionConfig decomp_config =
decomp_nvcomp_manager->configure_decompression((uint8_t *)input_data);
// void *temp_space;
// cudaMallocHelper(handle, (void **)&temp_space, *temp_bytes);
// cudaMallocHelper(handle, (void **)&output_data, *output_bytes);
hipMalloc(&output_data, decomp_config.decomp_data_size);
// decompressor.decompress_async(input_data, input_size, temp_space,
// *temp_bytes,
// output_data, *output_bytes,
// *(hipStream_t *)handle.get(queue_idx));
uint8_t *output_data_uint8_t = (uint8_t *)output_data;
decomp_nvcomp_manager->decompress(output_data_uint8_t, (uint8_t *)input_data,
decomp_config);
output_size = decomp_config.decomp_data_size;
handle.sync(queue_idx);
// cudaFreeHelper(temp_space);
// cudaFreeHostHelper(temp_bytes);
// cudaFreeHostHelper(output_bytes);
}
#define KERNELS(D, T, C) \
template void cascaded_compress<D, T, C>( \
Handle<D, T> & handle, C * input_data, size_t input_count, \
void *&output_data, size_t &output_size, int n_rle, int n_de, \
bool bitpack, int queue_idx); \
template void cascaded_decompress<D, T, C>( \
Handle<D, T> & handle, void *input_data, size_t input_size, \
C *&output_data, int queue_idx); \
template void lz4_compress<D, T, C>(Handle<D, T> & handle, C * input_data, \
size_t input_count, void *&output_data, \
size_t &output_size, size_t chunk_size, \
int queue_idx); \
template void lz4_decompress<D, T, C>( \
Handle<D, T> & handle, void *input_data, size_t input_size, \
C *&output_data, size_t &output_count, int queue_idx);
KERNELS(1, double, uint8_t)
KERNELS(1, float, uint8_t)
KERNELS(2, double, uint8_t)
KERNELS(2, float, uint8_t)
KERNELS(3, double, uint8_t)
KERNELS(3, float, uint8_t)
KERNELS(4, double, uint8_t)
KERNELS(4, float, uint8_t)
KERNELS(5, double, uint8_t)
KERNELS(5, float, uint8_t)
KERNELS(1, double, uint32_t)
KERNELS(1, float, uint32_t)
KERNELS(2, double, uint32_t)
KERNELS(2, float, uint32_t)
KERNELS(3, double, uint32_t)
KERNELS(3, float, uint32_t)
KERNELS(4, double, uint32_t)
KERNELS(4, float, uint32_t)
KERNELS(5, double, uint32_t)
KERNELS(5, float, uint32_t)
KERNELS(1, double, uint64_t)
KERNELS(1, float, uint64_t)
KERNELS(2, double, uint64_t)
KERNELS(2, float, uint64_t)
KERNELS(3, double, uint64_t)
KERNELS(3, float, uint64_t)
KERNELS(4, double, uint64_t)
KERNELS(4, float, uint64_t)
KERNELS(5, double, uint64_t)
KERNELS(5, float, uint64_t)
#undef KERNELS
template <uint32_t D, typename T, typename S, typename Q>
void SeparateOutlierAndPrimary(Handle<D, T> &handle, S *dqv, size_t n,
size_t *outlier_idx, size_t outlier_count,
size_t primary_count, S *doutlier, Q *dprimary,
int queue_idx) {
// printf("compress outlier_idx: "); for(int i = 0; i < outlier_count; i++)
// {printf("%llu ", outlier_idx[i]);} printf("\n");
printf("compress outlier_count: %llu\n", outlier_count);
printf("compress primary_count: %llu\n", primary_count);
printf("start separating primary and outlier\n");
size_t p = 0;
size_t pp = 0;
size_t op = 0;
size_t size = outlier_idx[0] - 0;
// printf("copy primary\n");
if (size > 0) {
mgard_cuda::cudaMemcpyAsyncHelper(handle, dprimary + pp, dqv + p,
size * sizeof(Q), mgard_cuda::D2D,
queue_idx);
}
pp += size;
p += size;
for (int i = 0; i < outlier_count - 1; i++) {
size = 1;
// printf("copy outlier\n");
mgard_cuda::cudaMemcpyAsyncHelper(handle, doutlier + op, dqv + p,
size * sizeof(S), mgard_cuda::D2D,
queue_idx);
op += size;
p += size;
size = outlier_idx[i + 1] - outlier_idx[i] - 1;
// printf("copy primary %d %d %d\n", p, size, outlier_idx[outlier_idx.size()
// - 1]);
if (size > 0) {
mgard_cuda::cudaMemcpyAsyncHelper(handle, dprimary + pp, dqv + p,
size * sizeof(Q), mgard_cuda::D2D,
queue_idx);
}
pp += size;
p += size;
}
size = 1;
// printf("copy outlier\n");
mgard_cuda::cudaMemcpyAsyncHelper(handle, doutlier + op, dqv + p,
size * sizeof(S), mgard_cuda::D2D,
queue_idx);
op += size;
p += size;
size = n - outlier_idx[outlier_count - 1] - 1;
// printf("copy primary %d %d %d\n", p, size, outlier_idx[outlier_idx.size() -
// 1]);
if (size > 0) {
mgard_cuda::cudaMemcpyAsyncHelper(handle, dprimary + pp, dqv + p,
size * sizeof(Q), mgard_cuda::D2D,
queue_idx);
}
// printf("done copy primary\n");
pp += size;
p += size;
if (pp != primary_count || op != outlier_count) {
printf("Primary or outlier size mismatch!\n");
}
printf("done separating primary and outlier\n");
}
template <uint32_t D, typename T, typename S, typename Q>
void CombineOutlierAndPrimary(Handle<D, T> &handle, S *dqv, size_t n,
size_t *outlier_idx, size_t outlier_count,
size_t primary_count, S *doutlier, Q *dprimary,
int queue_idx) {
size_t p = 0;
size_t pp = 0;
size_t op = 0;
size_t size = outlier_idx[0] - 0;
// printf("copy primary\n");
if (size > 0) {
mgard_cuda::cudaMemcpyAsyncHelper(handle, dqv + p, dprimary + pp,
size * sizeof(Q), mgard_cuda::D2D,
queue_idx);
}
pp += size;
p += size;
for (int i = 0; i < outlier_count - 1; i++) {
size = 1;
// printf("copy outlier\n");
mgard_cuda::cudaMemcpyAsyncHelper(handle, dqv + p, doutlier + op,
size * sizeof(S), mgard_cuda::D2D,
queue_idx);
op += size;
p += size;
size = outlier_idx[i + 1] - outlier_idx[i] - 1;
// printf("copy primary %d %d %d\n", p, size, outlier_idx[outlier_idx.size()
// - 1]);
if (size > 0) {
mgard_cuda::cudaMemcpyAsyncHelper(handle, dqv + p, dprimary + pp,
size * sizeof(Q), mgard_cuda::D2D,
queue_idx);
}
pp += size;
p += size;
}
size = 1;
// printf("copy outlier\n");
mgard_cuda::cudaMemcpyAsyncHelper(handle, dqv + p, doutlier + op,
size * sizeof(S), mgard_cuda::D2D,
queue_idx);
op += size;
p += size;
size = n - outlier_idx[outlier_count - 1] - 1;
// printf("copy primary %d %d %d\n", p, size, outlier_idx[outlier_idx.size() -
// 1]);
if (size > 0) {
mgard_cuda::cudaMemcpyAsyncHelper(handle, dqv + p, dprimary + pp,
size * sizeof(Q), mgard_cuda::D2D,
queue_idx);
}
// printf("done copy primary\n");
pp += size;
p += size;
}
#define KERNELS(D, T, S, Q) \
template void SeparateOutlierAndPrimary<D, T, S, Q>( \
Handle<D, T> & handle, S * dqv, size_t n, size_t * outlier_idx,\
size_t outlier_count, \
size_t primary_count,\
S * doutlier, \
Q * dprimary, int queue_idx); \
template void CombineOutlierAndPrimary<D, T, S, Q>( \
Handle<D, T> & handle, S * dqv, size_t n, size_t * outlier_idx,\
size_t outlier_count, \
size_t primary_count,\
S * doutlier, \
Q * dprimary, int queue_idx);
KERNELS(1, double, int, uint32_t)
KERNELS(1, float, int, uint32_t)
KERNELS(2, double, int, uint32_t)
KERNELS(2, float, int, uint32_t)
KERNELS(3, double, int, uint32_t)
KERNELS(3, float, int, uint32_t)
KERNELS(4, double, int, uint32_t)
KERNELS(4, float, int, uint32_t)
KERNELS(5, double, int, uint32_t)
KERNELS(5, float, int, uint32_t)
#undef KERNELS
template <uint32_t D, typename T, typename S, typename Q, typename H>
void huffman_compress(Handle<D, T> &handle, S *input_data, size_t input_count,
std::vector<size_t> &outlier_idx, H *&out_meta,
size_t &out_meta_size, H *&out_data,
size_t &out_data_size, int chunk_size, int dict_size,
int queue_idx) {
HuffmanEncode<D, T, S, Q, H>(handle, input_data, input_count, outlier_idx,
out_meta, out_meta_size, out_data, out_data_size,
chunk_size, dict_size);
}
template <uint32_t D, typename T, typename S, typename Q, typename H>
void huffman_decompress(Handle<D, T> &handle, H *in_meta, size_t in_meta_size,
H *in_data, size_t in_data_size, S *&output_data,
size_t &output_count, int queue_idx) {
HuffmanDecode<D, T, S, Q, H>(handle, output_data, output_count, in_meta,
in_meta_size, in_data, in_data_size);
}
#define KERNELS(D, T, S, Q, H) \
template void huffman_compress<D, T, S, Q, H>( \
Handle<D, T> & handle, S * input_data, size_t input_count, \
std::vector<size_t> & outlier_idx, H * &out_meta, \
size_t & out_meta_size, H * &out_data, size_t & out_data_size, \
int chunk_size, int dict_size, int queue_idx); \
template void huffman_decompress<D, T, S, Q, H>( \
Handle<D, T> & handle, H * in_meta, size_t in_meta_size, H * in_data, \
size_t in_data_size, S * &output_data, size_t & output_count, \
int queue_idx);
KERNELS(1, double, int, uint32_t, uint32_t)
KERNELS(1, float, int, uint32_t, uint32_t)
KERNELS(2, double, int, uint32_t, uint32_t)
KERNELS(2, float, int, uint32_t, uint32_t)
KERNELS(3, double, int, uint32_t, uint32_t)
KERNELS(3, float, int, uint32_t, uint32_t)
KERNELS(4, double, int, uint32_t, uint32_t)
KERNELS(4, float, int, uint32_t, uint32_t)
KERNELS(5, double, int, uint32_t, uint32_t)
KERNELS(5, float, int, uint32_t, uint32_t)
KERNELS(1, double, int, uint32_t, uint64_t)
KERNELS(1, float, int, uint32_t, uint64_t)
KERNELS(2, double, int, uint32_t, uint64_t)
KERNELS(2, float, int, uint32_t, uint64_t)
KERNELS(3, double, int, uint32_t, uint64_t)
KERNELS(3, float, int, uint32_t, uint64_t)
KERNELS(4, double, int, uint32_t, uint64_t)
KERNELS(4, float, int, uint32_t, uint64_t)
KERNELS(5, double, int, uint32_t, uint64_t)
KERNELS(5, float, int, uint32_t, uint64_t)
template <uint32_t D, typename T, typename S, typename H>
void cpu_lossless_compression(Handle<D, T> &handle, S *input_data,
size_t input_count, H *&out_data,
size_t &out_data_size) {
int *int_vector = new int[input_count];
cudaMemcpyAsyncHelper(handle, int_vector, input_data, input_count * sizeof(S),
AUTO, 0);
handle.sync(0);
std::vector<long int> input_vector(input_count);
for (int i = 0; i < input_count; i++)
input_vector[i] = int_vector[i];
// printf("%u %u\n", sizeof(long int), sizeof(int));
// printf("dqv\n");
// print_matrix_cuda(1, input_count, input_data, input_count);
// printf("input_vector: ");
// for (int i = 0; i < input_vector.size(); i++) printf("%d ",
// input_vector[i]); printf("\n"); Compress an array of data using `zstd`.
std::size_t zstd_outsize;
unsigned char *buffer = compress_memory_huffman(
input_vector.data(), input_vector.size() * sizeof(long int),
zstd_outsize);
out_data_size = zstd_outsize;
cudaMallocHelper(handle, (void **)&out_data, out_data_size);
cudaMemcpyAsyncHelper(handle, out_data, buffer, out_data_size, AUTO, 0);
handle.sync(0);
delete[] int_vector;
}
template <uint32_t D, typename T, typename S, typename H>
void cpu_lossless_decompression(Handle<D, T> &handle, H *input_data,
size_t input_count, S *&out_data,
size_t output_count) {
// printf("cpu decompression: %llu\n", input_count);
std::vector<unsigned char> input_vector(input_count);
cudaMemcpyAsyncHelper(handle, input_vector.data(), input_data, input_count,
AUTO, 0);
handle.sync(0);
// printf("copy done\n");
long int *output_vector = new long int[output_count];
int *int_vector = new int[output_count];
decompress_memory_huffman(
reinterpret_cast<unsigned char *>(input_vector.data()),
input_vector.size(), output_vector,
output_count * sizeof(*output_vector));
for (int i = 0; i < output_count; i++)
int_vector[i] = output_vector[i];
cudaMallocHelper(handle, (void **)&out_data, output_count * sizeof(S));
cudaMemcpyAsyncHelper(handle, out_data, int_vector, output_count * sizeof(S),
AUTO, 0);
handle.sync(0);
delete[] output_vector;
delete[] int_vector;
// printf("dqv\n");
// print_matrix_cuda(1, output_count, out_data, output_count);
}
#define KERNELS(D, T, S, H) \
template void cpu_lossless_compression<D, T, S, H>( \
Handle<D, T> & handle, S * input_data, size_t input_count, \
H * &out_data, size_t & out_data_size); \
template void cpu_lossless_decompression<D, T, S, H>( \
Handle<D, T> & handle, H * input_data, size_t input_count, \
S * &out_data, size_t output_count);
KERNELS(1, double, int, unsigned char)
KERNELS(1, float, int, unsigned char)
KERNELS(2, double, int, unsigned char)
KERNELS(2, float, int, unsigned char)
KERNELS(3, double, int, unsigned char)
KERNELS(3, float, int, unsigned char)
KERNELS(4, double, int, unsigned char)
KERNELS(4, float, int, unsigned char)
KERNELS(5, double, int, unsigned char)
KERNELS(5, float, int, unsigned char)
} // namespace mgard_cuda | 0e7e25288dcf0f45382dd799e6ac0a639d094938.cu | /*
* Copyright 2021, Oak Ridge National Laboratory.
* MGARD-GPU: MultiGrid Adaptive Reduction of Data Accelerated by GPUs
* Author: Jieyang Chen ([email protected])
* Date: September 27, 2021
*/
// #include "compressors.hpp"
#include "cuda/Common.h"
#include "cuda/CommonInternal.h"
#include "cuda/LosslessCompression.h"
#include "cuda/ParallelHuffman/huffman_workflow.cuh"
#include <typeinfo>
#include <zstd.h>
namespace mgard {
void huffman_encoding(long int *quantized_data, const std::size_t n,
unsigned char **out_data_hit, size_t *out_data_hit_size,
unsigned char **out_data_miss, size_t *out_data_miss_size,
unsigned char **out_tree, size_t *out_tree_size);
void huffman_decoding(long int *quantized_data,
const std::size_t quantized_data_size,
unsigned char *out_data_hit, size_t out_data_hit_size,
unsigned char *out_data_miss, size_t out_data_miss_size,
unsigned char *out_tree, size_t out_tree_size);
} // namespace mgard
namespace mgard_cuda {
/*! CHECK
* Check that the condition holds. If it doesn't print a message and die.
*/
#define CHECK(cond, ...) \
do { \
if (!(cond)) { \
fprintf(stderr, "%s:%d CHECK(%s) failed: ", __FILE__, __LINE__, #cond); \
fprintf(stderr, "" __VA_ARGS__); \
fprintf(stderr, "\n"); \
exit(1); \
} \
} while (0)
/*! CHECK_ZSTD
* Check the zstd error code and die if an error occurred after printing a
* message.
*/
/*! CHECK_ZSTD
* Check the zstd error code and die if an error occurred after printing a
* message.
*/
#define CHECK_ZSTD(fn, ...) \
do { \
size_t const err = (fn); \
CHECK(!ZSTD_isError(err), "%s", ZSTD_getErrorName(err)); \
} while (0)
unsigned char *compress_memory_huffman(long int *const src,
const std::size_t srcLen,
std::size_t outsize) {
unsigned char *out_data_hit = 0;
size_t out_data_hit_size;
unsigned char *out_data_miss = 0;
size_t out_data_miss_size;
unsigned char *out_tree = 0;
size_t out_tree_size;
mgard::huffman_encoding(src, srcLen, &out_data_hit, &out_data_hit_size,
&out_data_miss, &out_data_miss_size, &out_tree,
&out_tree_size);
const size_t total_size =
out_data_hit_size / 8 + 4 + out_data_miss_size + out_tree_size;
unsigned char *payload = (unsigned char *)malloc(total_size);
unsigned char *bufp = payload;
if (out_tree_size) {
std::memcpy(bufp, out_tree, out_tree_size);
bufp += out_tree_size;
}
std::memcpy(bufp, out_data_hit, out_data_hit_size / 8 + 4);
bufp += out_data_hit_size / 8 + 4;
if (out_data_miss_size) {
std::memcpy(bufp, out_data_miss, out_data_miss_size);
bufp += out_data_miss_size;
}
free(out_tree);
free(out_data_hit);
free(out_data_miss);
// const MemoryBuffer<unsigned char> out_data =
// compress_memory_zstd(payload, total_size);
const size_t cBuffSize = ZSTD_compressBound(total_size);
unsigned char *const zstd_buffer = new unsigned char[cBuffSize];
const std::size_t cSize =
ZSTD_compress(zstd_buffer, cBuffSize, payload, total_size, 1);
CHECK_ZSTD(cSize);
// return MemoryBuffer<unsigned char>(buffer, cSize);
free(payload);
payload = 0;
const std::size_t bufferLen = 3 * sizeof(size_t) + cSize;
unsigned char *const buffer = new unsigned char[bufferLen];
outsize = bufferLen;
bufp = buffer;
*(size_t *)bufp = out_tree_size;
bufp += sizeof(size_t);
*(size_t *)bufp = out_data_hit_size;
bufp += sizeof(size_t);
*(size_t *)bufp = out_data_miss_size;
bufp += sizeof(size_t);
{
unsigned char const *const p = zstd_buffer;
std::copy(p, p + cSize, bufp);
}
// return MemoryBuffer<unsigned char>(buffer, bufferLen);
return buffer;
}
void decompress_memory_huffman(unsigned char *const src,
const std::size_t srcLen, long int *const dst,
const std::size_t dstLen) {
unsigned char *out_data_hit = 0;
size_t out_data_hit_size;
unsigned char *out_data_miss = 0;
size_t out_data_miss_size;
unsigned char *out_tree = 0;
size_t out_tree_size;
unsigned char *buf = src;
out_tree_size = *(size_t *)buf;
buf += sizeof(size_t);
out_data_hit_size = *(size_t *)buf;
buf += sizeof(size_t);
out_data_miss_size = *(size_t *)buf;
buf += sizeof(size_t);
size_t total_huffman_size =
out_tree_size + out_data_hit_size / 8 + 4 + out_data_miss_size;
unsigned char *huffman_encoding_p =
(unsigned char *)malloc(total_huffman_size);
// decompress_memory_zstd(buf, srcLen - 3 * sizeof(size_t),
// huffman_encoding_p,
// total_huffman_size);
size_t const dSize = ZSTD_decompress(huffman_encoding_p, total_huffman_size,
buf, srcLen - 3 * sizeof(size_t));
CHECK_ZSTD(dSize);
/* When zstd knows the content size, it will error if it doesn't match. */
CHECK(dstLen == dSize, "Impossible because zstd will check this condition!");
out_tree = huffman_encoding_p;
out_data_hit = huffman_encoding_p + out_tree_size;
out_data_miss =
huffman_encoding_p + out_tree_size + out_data_hit_size / 8 + 4;
mgard::huffman_decoding(dst, dstLen, out_data_hit, out_data_hit_size,
out_data_miss, out_data_miss_size, out_tree,
out_tree_size);
free(huffman_encoding_p);
}
template <uint32_t D, typename T, typename C>
void cascaded_compress(Handle<D, T> &handle, C *input_data, size_t input_count,
void *&output_data, size_t &output_size, int n_rle,
int n_de, bool bitpack, int queue_idx) {
// nvcomp::CascadedCompressor compressor(nvcomp::TypeOf<C>(), n_rle, n_de,
// bitpack);
nvcompBatchedCascadedOpts_t options = nvcompBatchedCascadedDefaultOpts;
options.type = nvcomp::TypeOf<C>();
options.num_RLEs = n_rle;
options.num_deltas = n_de;
options.use_bp = bitpack;
nvcomp::CascadedManager nvcomp_manager{
options, *(cudaStream_t *)handle.get(queue_idx)};
// size_t *temp_bytes;
// cudaMallocHostHelper((void **)&temp_bytes, sizeof(size_t));
// size_t *output_bytes;
// cudaMallocHostHelper((void **)&output_bytes, sizeof(size_t));
// compressor.configure(input_count * sizeof(C), temp_bytes, output_bytes);
auto comp_config =
nvcomp_manager.configure_compression(input_count * sizeof(C));
// void *temp_space;
// cudaMallocHelper(handle, &temp_space, *temp_bytes);
// cudaMallocHelper(handle, &output_data, *output_bytes);
cudaMalloc(&output_data, comp_config.max_compressed_buffer_size);
// compressor.compress_async(input_data, input_count * sizeof(C), temp_space,
// *temp_bytes, output_data, output_bytes,
// *(cudaStream_t *)handle.get(queue_idx));
uint8_t *output_data_uint8_t = (uint8_t *)output_data;
nvcomp_manager.compress((uint8_t *)input_data, output_data_uint8_t,
comp_config);
output_size = nvcomp_manager.get_compressed_output_size(output_data_uint8_t);
handle.sync(queue_idx);
// cudaFreeHelper(temp_space);
// cudaFreeHostHelper(temp_bytes);
// cudaFreeHostHelper(output_bytes);
}
template <uint32_t D, typename T, typename C>
void cascaded_decompress(Handle<D, T> &handle, void *input_data,
size_t input_size, C *&output_data, int queue_idx) {
// nvcomp::Decompressor<C> decompressor(input_data, input_size,
// *(cudaStream_t
// *)handle.get(queue_idx));
// nvcomp::CascadedDecompressor decompressor;
auto decomp_nvcomp_manager = nvcomp::create_manager(
(uint8_t *)input_data, *(cudaStream_t *)handle.get(queue_idx));
// size_t *temp_bytes;
// cudaMallocHostHelper((void **)&temp_bytes, sizeof(size_t));
// size_t *output_bytes;
// cudaMallocHostHelper((void **)&output_bytes, sizeof(size_t));
// decompressor.configure(input_data, input_size, temp_bytes, output_bytes,
// *(cudaStream_t *)handle.get(queue_idx));
nvcomp::DecompressionConfig decomp_config =
decomp_nvcomp_manager->configure_decompression((uint8_t *)input_data);
// void *temp_space;
// cudaMallocHelper(handle, (void **)&temp_space, *temp_bytes);
// cudaMallocHelper(handle, (void **)&output_data, *output_bytes);
cudaMalloc(&output_data, decomp_config.decomp_data_size);
uint8_t *output_data_uint8_t = (uint8_t *)output_data;
decomp_nvcomp_manager->decompress(output_data_uint8_t, (uint8_t *)input_data,
decomp_config);
// output_size = decomp_config.decomp_data_size;
handle.sync(queue_idx);
// decompressor.decompress_async(input_data, input_size, temp_space,
// *temp_bytes,
// output_data, *output_bytes,
// *(cudaStream_t *)handle.get(queue_idx));
// handle.sync(queue_idx);
// cudaFreeHelper(temp_space);
// cudaFreeHostHelper(temp_bytes);
// cudaFreeHostHelper(output_bytes);
}
template <uint32_t D, typename T, typename C>
void lz4_compress(Handle<D, T> &handle, C *input_data, size_t input_count,
void *&output_data, size_t &output_size, size_t chunk_size,
int queue_idx) {
nvcompType_t dtype = NVCOMP_TYPE_UCHAR;
// nvcomp::LZ4Compressor compressor(chunk_size, dtype);
nvcomp::LZ4Manager nvcomp_manager{chunk_size, dtype,
*(cudaStream_t *)handle.get(queue_idx)};
// size_t *temp_bytes;
// cudaMallocHostHelper((void **)&temp_bytes, sizeof(size_t));
// size_t *output_bytes;
// cudaMallocHostHelper((void **)&output_bytes, sizeof(size_t));
// compressor.configure(input_count * sizeof(C), temp_bytes, output_bytes);
nvcomp::CompressionConfig comp_config =
nvcomp_manager.configure_compression(input_count * sizeof(C));
// void *temp_space;
// cudaMallocHelper(handle, &temp_space, *temp_bytes);
cudaMallocHelper(handle, &output_data,
comp_config.max_compressed_buffer_size);
// compressor.compress_async(input_data, input_count * sizeof(C), temp_space,
// *temp_bytes, output_data, output_bytes,
// *(cudaStream_t *)handle.get(queue_idx));
uint8_t *output_data_uint8_t = (uint8_t *)output_data;
nvcomp_manager.compress((uint8_t *)input_data, output_data_uint8_t,
comp_config);
output_size = nvcomp_manager.get_compressed_output_size(output_data_uint8_t);
handle.sync(queue_idx);
// output_size = *output_bytes;
// cudaFreeHelper(temp_space);
// cudaFreeHostHelper(temp_bytes);
// cudaFreeHostHelper(output_bytes);
}
template <uint32_t D, typename T, typename C>
void lz4_decompress(Handle<D, T> &handle, void *input_data, size_t input_size,
C *&output_data, size_t &output_size, int queue_idx) {
auto decomp_nvcomp_manager = nvcomp::create_manager(
(uint8_t *)input_data, *(cudaStream_t *)handle.get(queue_idx));
// size_t *temp_bytes;
// cudaMallocHostHelper((void **)&temp_bytes, sizeof(size_t));
// size_t *output_bytes;
// cudaMallocHostHelper((void **)&output_bytes, sizeof(size_t));
// decompressor.configure(input_data, input_size, temp_bytes, output_bytes,
// *(cudaStream_t *)handle.get(queue_idx));
nvcomp::DecompressionConfig decomp_config =
decomp_nvcomp_manager->configure_decompression((uint8_t *)input_data);
// void *temp_space;
// cudaMallocHelper(handle, (void **)&temp_space, *temp_bytes);
// cudaMallocHelper(handle, (void **)&output_data, *output_bytes);
cudaMalloc(&output_data, decomp_config.decomp_data_size);
// decompressor.decompress_async(input_data, input_size, temp_space,
// *temp_bytes,
// output_data, *output_bytes,
// *(cudaStream_t *)handle.get(queue_idx));
uint8_t *output_data_uint8_t = (uint8_t *)output_data;
decomp_nvcomp_manager->decompress(output_data_uint8_t, (uint8_t *)input_data,
decomp_config);
output_size = decomp_config.decomp_data_size;
handle.sync(queue_idx);
// cudaFreeHelper(temp_space);
// cudaFreeHostHelper(temp_bytes);
// cudaFreeHostHelper(output_bytes);
}
#define KERNELS(D, T, C) \
template void cascaded_compress<D, T, C>( \
Handle<D, T> & handle, C * input_data, size_t input_count, \
void *&output_data, size_t &output_size, int n_rle, int n_de, \
bool bitpack, int queue_idx); \
template void cascaded_decompress<D, T, C>( \
Handle<D, T> & handle, void *input_data, size_t input_size, \
C *&output_data, int queue_idx); \
template void lz4_compress<D, T, C>(Handle<D, T> & handle, C * input_data, \
size_t input_count, void *&output_data, \
size_t &output_size, size_t chunk_size, \
int queue_idx); \
template void lz4_decompress<D, T, C>( \
Handle<D, T> & handle, void *input_data, size_t input_size, \
C *&output_data, size_t &output_count, int queue_idx);
KERNELS(1, double, uint8_t)
KERNELS(1, float, uint8_t)
KERNELS(2, double, uint8_t)
KERNELS(2, float, uint8_t)
KERNELS(3, double, uint8_t)
KERNELS(3, float, uint8_t)
KERNELS(4, double, uint8_t)
KERNELS(4, float, uint8_t)
KERNELS(5, double, uint8_t)
KERNELS(5, float, uint8_t)
KERNELS(1, double, uint32_t)
KERNELS(1, float, uint32_t)
KERNELS(2, double, uint32_t)
KERNELS(2, float, uint32_t)
KERNELS(3, double, uint32_t)
KERNELS(3, float, uint32_t)
KERNELS(4, double, uint32_t)
KERNELS(4, float, uint32_t)
KERNELS(5, double, uint32_t)
KERNELS(5, float, uint32_t)
KERNELS(1, double, uint64_t)
KERNELS(1, float, uint64_t)
KERNELS(2, double, uint64_t)
KERNELS(2, float, uint64_t)
KERNELS(3, double, uint64_t)
KERNELS(3, float, uint64_t)
KERNELS(4, double, uint64_t)
KERNELS(4, float, uint64_t)
KERNELS(5, double, uint64_t)
KERNELS(5, float, uint64_t)
#undef KERNELS
template <uint32_t D, typename T, typename S, typename Q>
void SeparateOutlierAndPrimary(Handle<D, T> &handle, S *dqv, size_t n,
size_t *outlier_idx, size_t outlier_count,
size_t primary_count, S *doutlier, Q *dprimary,
int queue_idx) {
// printf("compress outlier_idx: "); for(int i = 0; i < outlier_count; i++)
// {printf("%llu ", outlier_idx[i]);} printf("\n");
printf("compress outlier_count: %llu\n", outlier_count);
printf("compress primary_count: %llu\n", primary_count);
printf("start separating primary and outlier\n");
size_t p = 0;
size_t pp = 0;
size_t op = 0;
size_t size = outlier_idx[0] - 0;
// printf("copy primary\n");
if (size > 0) {
mgard_cuda::cudaMemcpyAsyncHelper(handle, dprimary + pp, dqv + p,
size * sizeof(Q), mgard_cuda::D2D,
queue_idx);
}
pp += size;
p += size;
for (int i = 0; i < outlier_count - 1; i++) {
size = 1;
// printf("copy outlier\n");
mgard_cuda::cudaMemcpyAsyncHelper(handle, doutlier + op, dqv + p,
size * sizeof(S), mgard_cuda::D2D,
queue_idx);
op += size;
p += size;
size = outlier_idx[i + 1] - outlier_idx[i] - 1;
// printf("copy primary %d %d %d\n", p, size, outlier_idx[outlier_idx.size()
// - 1]);
if (size > 0) {
mgard_cuda::cudaMemcpyAsyncHelper(handle, dprimary + pp, dqv + p,
size * sizeof(Q), mgard_cuda::D2D,
queue_idx);
}
pp += size;
p += size;
}
size = 1;
// printf("copy outlier\n");
mgard_cuda::cudaMemcpyAsyncHelper(handle, doutlier + op, dqv + p,
size * sizeof(S), mgard_cuda::D2D,
queue_idx);
op += size;
p += size;
size = n - outlier_idx[outlier_count - 1] - 1;
// printf("copy primary %d %d %d\n", p, size, outlier_idx[outlier_idx.size() -
// 1]);
if (size > 0) {
mgard_cuda::cudaMemcpyAsyncHelper(handle, dprimary + pp, dqv + p,
size * sizeof(Q), mgard_cuda::D2D,
queue_idx);
}
// printf("done copy primary\n");
pp += size;
p += size;
if (pp != primary_count || op != outlier_count) {
printf("Primary or outlier size mismatch!\n");
}
printf("done separating primary and outlier\n");
}
template <uint32_t D, typename T, typename S, typename Q>
void CombineOutlierAndPrimary(Handle<D, T> &handle, S *dqv, size_t n,
size_t *outlier_idx, size_t outlier_count,
size_t primary_count, S *doutlier, Q *dprimary,
int queue_idx) {
size_t p = 0;
size_t pp = 0;
size_t op = 0;
size_t size = outlier_idx[0] - 0;
// printf("copy primary\n");
if (size > 0) {
mgard_cuda::cudaMemcpyAsyncHelper(handle, dqv + p, dprimary + pp,
size * sizeof(Q), mgard_cuda::D2D,
queue_idx);
}
pp += size;
p += size;
for (int i = 0; i < outlier_count - 1; i++) {
size = 1;
// printf("copy outlier\n");
mgard_cuda::cudaMemcpyAsyncHelper(handle, dqv + p, doutlier + op,
size * sizeof(S), mgard_cuda::D2D,
queue_idx);
op += size;
p += size;
size = outlier_idx[i + 1] - outlier_idx[i] - 1;
// printf("copy primary %d %d %d\n", p, size, outlier_idx[outlier_idx.size()
// - 1]);
if (size > 0) {
mgard_cuda::cudaMemcpyAsyncHelper(handle, dqv + p, dprimary + pp,
size * sizeof(Q), mgard_cuda::D2D,
queue_idx);
}
pp += size;
p += size;
}
size = 1;
// printf("copy outlier\n");
mgard_cuda::cudaMemcpyAsyncHelper(handle, dqv + p, doutlier + op,
size * sizeof(S), mgard_cuda::D2D,
queue_idx);
op += size;
p += size;
size = n - outlier_idx[outlier_count - 1] - 1;
// printf("copy primary %d %d %d\n", p, size, outlier_idx[outlier_idx.size() -
// 1]);
if (size > 0) {
mgard_cuda::cudaMemcpyAsyncHelper(handle, dqv + p, dprimary + pp,
size * sizeof(Q), mgard_cuda::D2D,
queue_idx);
}
// printf("done copy primary\n");
pp += size;
p += size;
}
#define KERNELS(D, T, S, Q) \
template void SeparateOutlierAndPrimary<D, T, S, Q>( \
Handle<D, T> & handle, S * dqv, size_t n, size_t * outlier_idx,\
size_t outlier_count, \
size_t primary_count,\
S * doutlier, \
Q * dprimary, int queue_idx); \
template void CombineOutlierAndPrimary<D, T, S, Q>( \
Handle<D, T> & handle, S * dqv, size_t n, size_t * outlier_idx,\
size_t outlier_count, \
size_t primary_count,\
S * doutlier, \
Q * dprimary, int queue_idx);
KERNELS(1, double, int, uint32_t)
KERNELS(1, float, int, uint32_t)
KERNELS(2, double, int, uint32_t)
KERNELS(2, float, int, uint32_t)
KERNELS(3, double, int, uint32_t)
KERNELS(3, float, int, uint32_t)
KERNELS(4, double, int, uint32_t)
KERNELS(4, float, int, uint32_t)
KERNELS(5, double, int, uint32_t)
KERNELS(5, float, int, uint32_t)
#undef KERNELS
template <uint32_t D, typename T, typename S, typename Q, typename H>
void huffman_compress(Handle<D, T> &handle, S *input_data, size_t input_count,
std::vector<size_t> &outlier_idx, H *&out_meta,
size_t &out_meta_size, H *&out_data,
size_t &out_data_size, int chunk_size, int dict_size,
int queue_idx) {
HuffmanEncode<D, T, S, Q, H>(handle, input_data, input_count, outlier_idx,
out_meta, out_meta_size, out_data, out_data_size,
chunk_size, dict_size);
}
template <uint32_t D, typename T, typename S, typename Q, typename H>
void huffman_decompress(Handle<D, T> &handle, H *in_meta, size_t in_meta_size,
H *in_data, size_t in_data_size, S *&output_data,
size_t &output_count, int queue_idx) {
HuffmanDecode<D, T, S, Q, H>(handle, output_data, output_count, in_meta,
in_meta_size, in_data, in_data_size);
}
#define KERNELS(D, T, S, Q, H) \
template void huffman_compress<D, T, S, Q, H>( \
Handle<D, T> & handle, S * input_data, size_t input_count, \
std::vector<size_t> & outlier_idx, H * &out_meta, \
size_t & out_meta_size, H * &out_data, size_t & out_data_size, \
int chunk_size, int dict_size, int queue_idx); \
template void huffman_decompress<D, T, S, Q, H>( \
Handle<D, T> & handle, H * in_meta, size_t in_meta_size, H * in_data, \
size_t in_data_size, S * &output_data, size_t & output_count, \
int queue_idx);
KERNELS(1, double, int, uint32_t, uint32_t)
KERNELS(1, float, int, uint32_t, uint32_t)
KERNELS(2, double, int, uint32_t, uint32_t)
KERNELS(2, float, int, uint32_t, uint32_t)
KERNELS(3, double, int, uint32_t, uint32_t)
KERNELS(3, float, int, uint32_t, uint32_t)
KERNELS(4, double, int, uint32_t, uint32_t)
KERNELS(4, float, int, uint32_t, uint32_t)
KERNELS(5, double, int, uint32_t, uint32_t)
KERNELS(5, float, int, uint32_t, uint32_t)
KERNELS(1, double, int, uint32_t, uint64_t)
KERNELS(1, float, int, uint32_t, uint64_t)
KERNELS(2, double, int, uint32_t, uint64_t)
KERNELS(2, float, int, uint32_t, uint64_t)
KERNELS(3, double, int, uint32_t, uint64_t)
KERNELS(3, float, int, uint32_t, uint64_t)
KERNELS(4, double, int, uint32_t, uint64_t)
KERNELS(4, float, int, uint32_t, uint64_t)
KERNELS(5, double, int, uint32_t, uint64_t)
KERNELS(5, float, int, uint32_t, uint64_t)
template <uint32_t D, typename T, typename S, typename H>
void cpu_lossless_compression(Handle<D, T> &handle, S *input_data,
size_t input_count, H *&out_data,
size_t &out_data_size) {
int *int_vector = new int[input_count];
cudaMemcpyAsyncHelper(handle, int_vector, input_data, input_count * sizeof(S),
AUTO, 0);
handle.sync(0);
std::vector<long int> input_vector(input_count);
for (int i = 0; i < input_count; i++)
input_vector[i] = int_vector[i];
// printf("%u %u\n", sizeof(long int), sizeof(int));
// printf("dqv\n");
// print_matrix_cuda(1, input_count, input_data, input_count);
// printf("input_vector: ");
// for (int i = 0; i < input_vector.size(); i++) printf("%d ",
// input_vector[i]); printf("\n"); Compress an array of data using `zstd`.
std::size_t zstd_outsize;
unsigned char *buffer = compress_memory_huffman(
input_vector.data(), input_vector.size() * sizeof(long int),
zstd_outsize);
out_data_size = zstd_outsize;
cudaMallocHelper(handle, (void **)&out_data, out_data_size);
cudaMemcpyAsyncHelper(handle, out_data, buffer, out_data_size, AUTO, 0);
handle.sync(0);
delete[] int_vector;
}
template <uint32_t D, typename T, typename S, typename H>
void cpu_lossless_decompression(Handle<D, T> &handle, H *input_data,
size_t input_count, S *&out_data,
size_t output_count) {
// printf("cpu decompression: %llu\n", input_count);
std::vector<unsigned char> input_vector(input_count);
cudaMemcpyAsyncHelper(handle, input_vector.data(), input_data, input_count,
AUTO, 0);
handle.sync(0);
// printf("copy done\n");
long int *output_vector = new long int[output_count];
int *int_vector = new int[output_count];
decompress_memory_huffman(
reinterpret_cast<unsigned char *>(input_vector.data()),
input_vector.size(), output_vector,
output_count * sizeof(*output_vector));
for (int i = 0; i < output_count; i++)
int_vector[i] = output_vector[i];
cudaMallocHelper(handle, (void **)&out_data, output_count * sizeof(S));
cudaMemcpyAsyncHelper(handle, out_data, int_vector, output_count * sizeof(S),
AUTO, 0);
handle.sync(0);
delete[] output_vector;
delete[] int_vector;
// printf("dqv\n");
// print_matrix_cuda(1, output_count, out_data, output_count);
}
#define KERNELS(D, T, S, H) \
template void cpu_lossless_compression<D, T, S, H>( \
Handle<D, T> & handle, S * input_data, size_t input_count, \
H * &out_data, size_t & out_data_size); \
template void cpu_lossless_decompression<D, T, S, H>( \
Handle<D, T> & handle, H * input_data, size_t input_count, \
S * &out_data, size_t output_count);
KERNELS(1, double, int, unsigned char)
KERNELS(1, float, int, unsigned char)
KERNELS(2, double, int, unsigned char)
KERNELS(2, float, int, unsigned char)
KERNELS(3, double, int, unsigned char)
KERNELS(3, float, int, unsigned char)
KERNELS(4, double, int, unsigned char)
KERNELS(4, float, int, unsigned char)
KERNELS(5, double, int, unsigned char)
KERNELS(5, float, int, unsigned char)
} // namespace mgard_cuda |
8e7785b47d2ce289238b633d181586e553060143.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* POTEL Martin
ACKVA Adrian */
#include "utils.h"
#include <stdlib.h>
#include "life_kernel.cu"
void init_data(int * domain, int domain_x, int domain_y)
{
for(int i = 0; i != domain_y; ++i) {
for(int j = 0; j != domain_x; ++j) {
domain[i * domain_x + j] = rand() % 3;
}
}
}
// Color display code contributed by Louis Beziaud, Simon Bihel and Rmi Hutin, PPAR 2016/2017
void print_domain(int* domain, int domain_x, int domain_y, int* red, int* blue) {
if (red != NULL) *red = 0;
if (blue != NULL) *blue = 0;
for(int y = 0; y < domain_y; y++) {
for(int x = 0; x < domain_x; x++) {
int cell = domain[y * domain_x + x];
switch(cell) {
case 0:
printf("\033[40m \033[0m");
break;
case 1:
printf("\033[41m \033[0m");
break;
case 2:
printf("\033[44m \033[0m");
break;
default:
break;
}
if(red != NULL && cell == 1) {
(*red)++;
} else if(blue != NULL && cell == 2) {
(*blue)++;
}
}
printf("\n");
}
}
int main(int argc, char ** argv)
{
// Definition of parameters
int domain_x = 128; // Multiple of threads_per_block * cells_per_word
int domain_y = 128;
int cells_per_word = 1;
int steps = 2;
int threads_per_block = 128;
int blocks_x = domain_x / (threads_per_block * cells_per_word);
int blocks_y = domain_y;
dim3 grid(blocks_x, blocks_y); // CUDA grid dimensions
dim3 threads(threads_per_block); // CUDA block dimensions
// Allocation of arrays
int * domain_gpu[2] = {NULL, NULL};
// Arrays of dimensions domain.x * domain.y
size_t domain_size = domain_x * domain_y / cells_per_word * sizeof(int);
CUDA_SAFE_CALL(hipMalloc((void**)&domain_gpu[0], domain_size));
CUDA_SAFE_CALL(hipMalloc((void**)&domain_gpu[1], domain_size));
int * domain_cpu = (int*)malloc(domain_size);
// Arrays of dimensions pitch * domain.y
init_data(domain_cpu, domain_x, domain_y);
CUDA_SAFE_CALL(hipMemcpy(domain_gpu[0], domain_cpu, domain_size, hipMemcpyHostToDevice));
// Timer initialization
hipEvent_t start, stop;
CUDA_SAFE_CALL(hipEventCreate(&start));
CUDA_SAFE_CALL(hipEventCreate(&stop));
// Start timer
CUDA_SAFE_CALL(hipEventRecord(start, 0));
// Kernel execution
int shared_mem_size = 0;
for(int i = 0; i < steps; i++) {
hipLaunchKernelGGL(( life_kernel), dim3(grid), dim3(threads), shared_mem_size , 0, domain_gpu[i%2],
domain_gpu[(i+1)%2], domain_x, domain_y);
}
// Stop timer
CUDA_SAFE_CALL(hipEventRecord(stop, 0));
CUDA_SAFE_CALL(hipEventSynchronize(stop));
float elapsedTime;
CUDA_SAFE_CALL(hipEventElapsedTime(&elapsedTime, start, stop)); // In ms
printf("GPU time: %f ms\n", elapsedTime);
CUDA_SAFE_CALL(hipEventDestroy(start));
CUDA_SAFE_CALL(hipEventDestroy(stop));
// Get results back
CUDA_SAFE_CALL(hipMemcpy(domain_cpu, domain_gpu[steps%2], domain_size, hipMemcpyDeviceToHost));
CUDA_SAFE_CALL(hipFree(domain_gpu[0]));
CUDA_SAFE_CALL(hipFree(domain_gpu[1]));
// Count colors
int red = 0;
int blue = 0;
print_domain(domain_cpu, domain_x, domain_y, &red, &blue);
printf("Red/Blue cells: %d/%d\n", red, blue);
free(domain_cpu);
return 0;
}
| 8e7785b47d2ce289238b633d181586e553060143.cu | /* POTEL Martin
ACKVA Adrian */
#include "utils.h"
#include <stdlib.h>
#include "life_kernel.cu"
void init_data(int * domain, int domain_x, int domain_y)
{
for(int i = 0; i != domain_y; ++i) {
for(int j = 0; j != domain_x; ++j) {
domain[i * domain_x + j] = rand() % 3;
}
}
}
// Color display code contributed by Louis Beziaud, Simon Bihel and Rémi Hutin, PPAR 2016/2017
void print_domain(int* domain, int domain_x, int domain_y, int* red, int* blue) {
if (red != NULL) *red = 0;
if (blue != NULL) *blue = 0;
for(int y = 0; y < domain_y; y++) {
for(int x = 0; x < domain_x; x++) {
int cell = domain[y * domain_x + x];
switch(cell) {
case 0:
printf("\033[40m \033[0m");
break;
case 1:
printf("\033[41m \033[0m");
break;
case 2:
printf("\033[44m \033[0m");
break;
default:
break;
}
if(red != NULL && cell == 1) {
(*red)++;
} else if(blue != NULL && cell == 2) {
(*blue)++;
}
}
printf("\n");
}
}
int main(int argc, char ** argv)
{
// Definition of parameters
int domain_x = 128; // Multiple of threads_per_block * cells_per_word
int domain_y = 128;
int cells_per_word = 1;
int steps = 2;
int threads_per_block = 128;
int blocks_x = domain_x / (threads_per_block * cells_per_word);
int blocks_y = domain_y;
dim3 grid(blocks_x, blocks_y); // CUDA grid dimensions
dim3 threads(threads_per_block); // CUDA block dimensions
// Allocation of arrays
int * domain_gpu[2] = {NULL, NULL};
// Arrays of dimensions domain.x * domain.y
size_t domain_size = domain_x * domain_y / cells_per_word * sizeof(int);
CUDA_SAFE_CALL(cudaMalloc((void**)&domain_gpu[0], domain_size));
CUDA_SAFE_CALL(cudaMalloc((void**)&domain_gpu[1], domain_size));
int * domain_cpu = (int*)malloc(domain_size);
// Arrays of dimensions pitch * domain.y
init_data(domain_cpu, domain_x, domain_y);
CUDA_SAFE_CALL(cudaMemcpy(domain_gpu[0], domain_cpu, domain_size, cudaMemcpyHostToDevice));
// Timer initialization
cudaEvent_t start, stop;
CUDA_SAFE_CALL(cudaEventCreate(&start));
CUDA_SAFE_CALL(cudaEventCreate(&stop));
// Start timer
CUDA_SAFE_CALL(cudaEventRecord(start, 0));
// Kernel execution
int shared_mem_size = 0;
for(int i = 0; i < steps; i++) {
life_kernel<<< grid, threads, shared_mem_size >>>(domain_gpu[i%2],
domain_gpu[(i+1)%2], domain_x, domain_y);
}
// Stop timer
CUDA_SAFE_CALL(cudaEventRecord(stop, 0));
CUDA_SAFE_CALL(cudaEventSynchronize(stop));
float elapsedTime;
CUDA_SAFE_CALL(cudaEventElapsedTime(&elapsedTime, start, stop)); // In ms
printf("GPU time: %f ms\n", elapsedTime);
CUDA_SAFE_CALL(cudaEventDestroy(start));
CUDA_SAFE_CALL(cudaEventDestroy(stop));
// Get results back
CUDA_SAFE_CALL(cudaMemcpy(domain_cpu, domain_gpu[steps%2], domain_size, cudaMemcpyDeviceToHost));
CUDA_SAFE_CALL(cudaFree(domain_gpu[0]));
CUDA_SAFE_CALL(cudaFree(domain_gpu[1]));
// Count colors
int red = 0;
int blue = 0;
print_domain(domain_cpu, domain_x, domain_y, &red, &blue);
printf("Red/Blue cells: %d/%d\n", red, blue);
free(domain_cpu);
return 0;
}
|
ee7df4552a32558e50bd89e8317604c0d1ee9c20.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Copyright 2013 Yangqing Jia
#include <cmath>
#include <cstdlib>
#include <cstring>
#include "caffe/common.hpp"
#include "caffe/util/im2col.hpp"
namespace caffe {
template <typename Dtype>
__global__ void im2col_gpu_kernel(const int n, const Dtype* data_im,
const int height, const int width, const int ksize,
const int stride, const int height_col, const int width_col, Dtype* data_col) {
int index = threadIdx.x + blockIdx.x * blockDim.x;
if (index < n) {
int w_out = index % width_col;
index /= width_col;
int h_out = index % height_col;
int channel_in = index / height_col;
int channel_out = channel_in * ksize * ksize;
int h_in = h_out * stride;
int w_in = w_out * stride;
data_col += (channel_out * height_col + h_out) * width_col + w_out;
data_im += (channel_in * height + h_in) * width + w_in;
for (int i = 0; i < ksize; ++i) {
for (int j = 0; j < ksize; ++j) {
*data_col = data_im[i * width + j];
data_col += height_col * width_col;
}
}
}
}
template <typename Dtype>
void im2col_gpu(const Dtype* data_im, const int channels,
const int height, const int width, const int ksize, const int stride,
Dtype* data_col) {
// We are going to launch channels * height_col * width_col kernels, each
// kernel responsible for copying a single-channel grid.
int height_col = (height - ksize) / stride + 1;
int width_col = (width - ksize) / stride + 1;
int num_kernels = channels * height_col * width_col;
hipLaunchKernelGGL(( im2col_gpu_kernel<Dtype>), dim3(CAFFE_GET_BLOCKS(num_kernels)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
num_kernels, data_im, height, width, ksize, stride, height_col, width_col,
data_col);
CUDA_POST_KERNEL_CHECK;
}
// Explicit instantiation
template void im2col_gpu<float>(const float* data_im, const int channels,
const int height, const int width, const int ksize, const int stride,
float* data_col);
template void im2col_gpu<double>(const double* data_im, const int channels,
const int height, const int width, const int ksize, const int stride,
double* data_col);
template <typename Dtype>
__global__ void col2im_gpu_kernel(const int n, const Dtype* data_col,
const int height, const int width, const int channels, const int ksize,
const int stride, const int height_col, const int width_col, Dtype* data_im) {
int index = threadIdx.x + blockIdx.x * blockDim.x;
if (index < n) {
Dtype val = 0;
int w = index % width;
int h = (index / width) % height;
int c = index / (width * height);
// compute the start and end of the output
int w_col_start = (w < ksize) ? 0 : (w - ksize) / stride + 1;
int w_col_end = min(w / stride + 1, width_col);
int h_col_start = (h < ksize) ? 0 : (h - ksize) / stride + 1;
int h_col_end = min(h / stride + 1, height_col);
for (int h_col = h_col_start; h_col < h_col_end; ++h_col) {
for (int w_col = w_col_start; w_col < w_col_end; ++w_col) {
// the col location: [c * width * height + h_out, w_out]
int c_col = c * ksize * ksize + (h - h_col * stride) * ksize + (w - w_col * stride);
val += data_col[(c_col * height_col + h_col) * width_col + w_col];
}
}
data_im[index] = val;
}
}
template <typename Dtype>
void col2im_gpu(const Dtype* data_col, const int channels,
const int height, const int width, const int ksize, const int stride,
Dtype* data_im) {
//CUDA_CHECK(hipMemset(data_im, 0, sizeof(Dtype) * height * width * channels));
int height_col = (height - ksize) / stride + 1;
int width_col = (width - ksize) / stride + 1;
int num_kernels = channels * height * width;
// To avoid involving atomic operations, we will launch one kernel per
// bottom dimension, and then in the kernel add up the top dimensions.
hipLaunchKernelGGL(( col2im_gpu_kernel<Dtype>), dim3(CAFFE_GET_BLOCKS(num_kernels)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
num_kernels, data_col, height, width, channels, ksize, stride,
height_col, width_col, data_im);
CUDA_POST_KERNEL_CHECK;
}
// Explicit instantiation
template void col2im_gpu<float>(const float* data_col, const int channels,
const int height, const int width, const int psize, const int stride,
float* data_im);
template void col2im_gpu<double>(const double* data_col, const int channels,
const int height, const int width, const int psize, const int stride,
double* data_im);
} // namespace caffe
| ee7df4552a32558e50bd89e8317604c0d1ee9c20.cu | // Copyright 2013 Yangqing Jia
#include <cmath>
#include <cstdlib>
#include <cstring>
#include "caffe/common.hpp"
#include "caffe/util/im2col.hpp"
namespace caffe {
template <typename Dtype>
__global__ void im2col_gpu_kernel(const int n, const Dtype* data_im,
const int height, const int width, const int ksize,
const int stride, const int height_col, const int width_col, Dtype* data_col) {
int index = threadIdx.x + blockIdx.x * blockDim.x;
if (index < n) {
int w_out = index % width_col;
index /= width_col;
int h_out = index % height_col;
int channel_in = index / height_col;
int channel_out = channel_in * ksize * ksize;
int h_in = h_out * stride;
int w_in = w_out * stride;
data_col += (channel_out * height_col + h_out) * width_col + w_out;
data_im += (channel_in * height + h_in) * width + w_in;
for (int i = 0; i < ksize; ++i) {
for (int j = 0; j < ksize; ++j) {
*data_col = data_im[i * width + j];
data_col += height_col * width_col;
}
}
}
}
template <typename Dtype>
void im2col_gpu(const Dtype* data_im, const int channels,
const int height, const int width, const int ksize, const int stride,
Dtype* data_col) {
// We are going to launch channels * height_col * width_col kernels, each
// kernel responsible for copying a single-channel grid.
int height_col = (height - ksize) / stride + 1;
int width_col = (width - ksize) / stride + 1;
int num_kernels = channels * height_col * width_col;
im2col_gpu_kernel<Dtype><<<CAFFE_GET_BLOCKS(num_kernels), CAFFE_CUDA_NUM_THREADS>>>(
num_kernels, data_im, height, width, ksize, stride, height_col, width_col,
data_col);
CUDA_POST_KERNEL_CHECK;
}
// Explicit instantiation
template void im2col_gpu<float>(const float* data_im, const int channels,
const int height, const int width, const int ksize, const int stride,
float* data_col);
template void im2col_gpu<double>(const double* data_im, const int channels,
const int height, const int width, const int ksize, const int stride,
double* data_col);
template <typename Dtype>
__global__ void col2im_gpu_kernel(const int n, const Dtype* data_col,
const int height, const int width, const int channels, const int ksize,
const int stride, const int height_col, const int width_col, Dtype* data_im) {
int index = threadIdx.x + blockIdx.x * blockDim.x;
if (index < n) {
Dtype val = 0;
int w = index % width;
int h = (index / width) % height;
int c = index / (width * height);
// compute the start and end of the output
int w_col_start = (w < ksize) ? 0 : (w - ksize) / stride + 1;
int w_col_end = min(w / stride + 1, width_col);
int h_col_start = (h < ksize) ? 0 : (h - ksize) / stride + 1;
int h_col_end = min(h / stride + 1, height_col);
for (int h_col = h_col_start; h_col < h_col_end; ++h_col) {
for (int w_col = w_col_start; w_col < w_col_end; ++w_col) {
// the col location: [c * width * height + h_out, w_out]
int c_col = c * ksize * ksize + (h - h_col * stride) * ksize + (w - w_col * stride);
val += data_col[(c_col * height_col + h_col) * width_col + w_col];
}
}
data_im[index] = val;
}
}
template <typename Dtype>
void col2im_gpu(const Dtype* data_col, const int channels,
const int height, const int width, const int ksize, const int stride,
Dtype* data_im) {
//CUDA_CHECK(cudaMemset(data_im, 0, sizeof(Dtype) * height * width * channels));
int height_col = (height - ksize) / stride + 1;
int width_col = (width - ksize) / stride + 1;
int num_kernels = channels * height * width;
// To avoid involving atomic operations, we will launch one kernel per
// bottom dimension, and then in the kernel add up the top dimensions.
col2im_gpu_kernel<Dtype><<<CAFFE_GET_BLOCKS(num_kernels), CAFFE_CUDA_NUM_THREADS>>>(
num_kernels, data_col, height, width, channels, ksize, stride,
height_col, width_col, data_im);
CUDA_POST_KERNEL_CHECK;
}
// Explicit instantiation
template void col2im_gpu<float>(const float* data_col, const int channels,
const int height, const int width, const int psize, const int stride,
float* data_im);
template void col2im_gpu<double>(const double* data_col, const int channels,
const int height, const int width, const int psize, const int stride,
double* data_im);
} // namespace caffe
|
9dbb010dc76723d051fd075e5eca4967ed0e69c0.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
Copyright 2016 Frank Ye
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
#include "FPRadixTree.h"
namespace cuda_fp_growth {
/// Get the sign of a given value (-1 if the value is negative, +1 if positive and 0 if it is zero)
__device__
inline int sgn( int x ) { return ( 0 < x ) - ( x < 0 ); }
/// Counts the length of the shared prefix between two transactions
__device__
int pfx( int i, int j, const BitBlock* __restrict__ trans_map, size_type n_trans, size_type blocks_per_trans )
{
assert( std::numeric_limits<int>::digits == 31 );
if ( i >= 0 && i < n_trans && j >= 0 && j < n_trans ) {
int length = 0;
for ( int k = 0; k < blocks_per_trans; ++k ) {
BitBlock value_i = *( trans_map + i * blocks_per_trans + k );
BitBlock value_j = *( trans_map + j * blocks_per_trans + k );
BitBlock xor_value = value_i ^ value_j;
if ( xor_value != 0 ) {
length += ( __clz( xor_value ) - 24 );
break;
}
else length += 8;
}
return length;
}
else return -1;
}
/// Counts total number of transactions covered by a transaction bitmap range
__device__
size_type count_transactions( index_type i, index_type j, const size_type* __restrict__ trans_counts )
{
size_type n = 0;
for ( index_type pos = i; pos <= j; ++pos ) n += *( trans_counts + pos );
return n;
}
// Each inner node has 9x cuda_uint elements and each leaf node has 2x
#define InnerNodesSize( n_trans ) ( sizeof( cuda_uint ) * 9 * ( n_trans - 1 ) )
#define LeafNodesSize( n_trans ) ( sizeof( cuda_uint ) * 2 * n_trans )
#define RadixTreeBufferSize( n_trans ) ( sizeof( cuda_uint ) * ( 9 * ( n_trans - 1 ) + 2 * n_trans ) )
__global__
void construct_radix_tree( const BitBlock* __restrict__ trans_map, const size_type* __restrict__ trans_counts,
size_type n_trans, size_type blocks_per_trans, InnerNode* inner_nodes, LeafNode* leaf_nodes )
{
extern __shared__ cuda_uint buffer[];
if ( threadIdx.x == 0 ) memset( buffer, 0, RadixTreeBufferSize( n_trans ) );
__syncthreads();
InnerNode* _inner_nodes = reinterpret_cast<InnerNode*>( buffer );
LeafNode* _leaf_nodes = reinterpret_cast<LeafNode*>( (char*)buffer + InnerNodesSize( n_trans ) );
index_type i = threadIdx.x;
// determine direction (+1 or -1)
int d = sgn( pfx( i, i+1, trans_map, n_trans, blocks_per_trans ) - pfx( i, i-1, trans_map, n_trans, blocks_per_trans ) );
// find upper-bound
int min_pfx = pfx( i, i-d, trans_map, n_trans, blocks_per_trans );
int l_max = 2;
while ( pfx( i, i + l_max * d, trans_map, n_trans, blocks_per_trans ) > min_pfx ) l_max *= 2;
// find the other end
int l = 0;
for ( int t = l_max / 2; t >= 1; t /= 2 ) {
if ( pfx( i, i + ( l + t ) * d, trans_map, n_trans, blocks_per_trans ) > min_pfx ) l += t;
}
index_type j = i + l * d;
// find split position
int node_pfx = pfx( i, j, trans_map, n_trans, blocks_per_trans );
int s = 0;
for ( int t = l / 2; t >= 1; t /= 2 ) {
if ( pfx( i, i + ( s + t ) * d, trans_map, n_trans, blocks_per_trans ) > node_pfx ) s += t;
}
if ( pfx( i, i + ( s + 1 ) * d, trans_map, n_trans, blocks_per_trans ) > node_pfx ) s += 1;
index_type split = i + s * d + min( d, 0 );
InnerNode* node = _inner_nodes + i;
node->range_start = min( i, j );
node->range_end = max( i, j );
node->prefix_length = node_pfx;
node->trans_count = count_transactions( node->range_start, node->range_end, trans_counts );
// link left child
node->left_idx = split;
if ( min( i, j ) == split ) {
node->left_is_leaf = true;
( _leaf_nodes + split )->parent_idx = i;
( _leaf_nodes + split )->trans_count = *( trans_counts + split );
}
else {
node->left_is_leaf = false;
( _inner_nodes + split )->parent_idx = i;
}
// link right child
node->right_idx = split + 1;
if ( max( i, j ) == split + 1 ) {
node->right_is_leaf = true;
( _leaf_nodes + split + 1 )->parent_idx = i;
( _leaf_nodes + split + 1 )->trans_count = *( trans_counts + split + 1 );
}
else {
node->right_is_leaf = false;
( _inner_nodes + split + 1 )->parent_idx = i;
}
// copy results to output
__syncthreads();
if ( threadIdx.x == 0 ) {
memcpy( inner_nodes, _inner_nodes, InnerNodesSize( n_trans ) );
memcpy( leaf_nodes, _leaf_nodes, LeafNodesSize( n_trans ) );
}
}
FPRadixTree::FPRadixTree( const FPTransMap& trans_map )
: _inner_nodes( DInnerNodes( trans_map.size() - 1 ) ), _leaf_nodes( DLeafNodes( trans_map.size() ) )
{
size_type n_trans = trans_map.size(), blocks_per_trans = trans_map.blocks_per_transaction();
hipLaunchKernelGGL(( construct_radix_tree) , dim3(1), dim3(n_trans - 1), RadixTreeBufferSize( n_trans ) , 0,
trans_map.bitmap().data().get(), trans_map.transaction_counts().data().get(), n_trans, blocks_per_trans,
_inner_nodes.data().get(), _leaf_nodes.data().get()
);
}
}
| 9dbb010dc76723d051fd075e5eca4967ed0e69c0.cu | /*
Copyright 2016 Frank Ye
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
#include "FPRadixTree.h"
namespace cuda_fp_growth {
/// Get the sign of a given value (-1 if the value is negative, +1 if positive and 0 if it is zero)
__device__
inline int sgn( int x ) { return ( 0 < x ) - ( x < 0 ); }
/// Counts the length of the shared prefix between two transactions
__device__
int pfx( int i, int j, const BitBlock* __restrict__ trans_map, size_type n_trans, size_type blocks_per_trans )
{
assert( std::numeric_limits<int>::digits == 31 );
if ( i >= 0 && i < n_trans && j >= 0 && j < n_trans ) {
int length = 0;
for ( int k = 0; k < blocks_per_trans; ++k ) {
BitBlock value_i = *( trans_map + i * blocks_per_trans + k );
BitBlock value_j = *( trans_map + j * blocks_per_trans + k );
BitBlock xor_value = value_i ^ value_j;
if ( xor_value != 0 ) {
length += ( __clz( xor_value ) - 24 );
break;
}
else length += 8;
}
return length;
}
else return -1;
}
/// Counts total number of transactions covered by a transaction bitmap range
__device__
size_type count_transactions( index_type i, index_type j, const size_type* __restrict__ trans_counts )
{
size_type n = 0;
for ( index_type pos = i; pos <= j; ++pos ) n += *( trans_counts + pos );
return n;
}
// Each inner node has 9x cuda_uint elements and each leaf node has 2x
#define InnerNodesSize( n_trans ) ( sizeof( cuda_uint ) * 9 * ( n_trans - 1 ) )
#define LeafNodesSize( n_trans ) ( sizeof( cuda_uint ) * 2 * n_trans )
#define RadixTreeBufferSize( n_trans ) ( sizeof( cuda_uint ) * ( 9 * ( n_trans - 1 ) + 2 * n_trans ) )
__global__
void construct_radix_tree( const BitBlock* __restrict__ trans_map, const size_type* __restrict__ trans_counts,
size_type n_trans, size_type blocks_per_trans, InnerNode* inner_nodes, LeafNode* leaf_nodes )
{
extern __shared__ cuda_uint buffer[];
if ( threadIdx.x == 0 ) memset( buffer, 0, RadixTreeBufferSize( n_trans ) );
__syncthreads();
InnerNode* _inner_nodes = reinterpret_cast<InnerNode*>( buffer );
LeafNode* _leaf_nodes = reinterpret_cast<LeafNode*>( (char*)buffer + InnerNodesSize( n_trans ) );
index_type i = threadIdx.x;
// determine direction (+1 or -1)
int d = sgn( pfx( i, i+1, trans_map, n_trans, blocks_per_trans ) - pfx( i, i-1, trans_map, n_trans, blocks_per_trans ) );
// find upper-bound
int min_pfx = pfx( i, i-d, trans_map, n_trans, blocks_per_trans );
int l_max = 2;
while ( pfx( i, i + l_max * d, trans_map, n_trans, blocks_per_trans ) > min_pfx ) l_max *= 2;
// find the other end
int l = 0;
for ( int t = l_max / 2; t >= 1; t /= 2 ) {
if ( pfx( i, i + ( l + t ) * d, trans_map, n_trans, blocks_per_trans ) > min_pfx ) l += t;
}
index_type j = i + l * d;
// find split position
int node_pfx = pfx( i, j, trans_map, n_trans, blocks_per_trans );
int s = 0;
for ( int t = l / 2; t >= 1; t /= 2 ) {
if ( pfx( i, i + ( s + t ) * d, trans_map, n_trans, blocks_per_trans ) > node_pfx ) s += t;
}
if ( pfx( i, i + ( s + 1 ) * d, trans_map, n_trans, blocks_per_trans ) > node_pfx ) s += 1;
index_type split = i + s * d + min( d, 0 );
InnerNode* node = _inner_nodes + i;
node->range_start = min( i, j );
node->range_end = max( i, j );
node->prefix_length = node_pfx;
node->trans_count = count_transactions( node->range_start, node->range_end, trans_counts );
// link left child
node->left_idx = split;
if ( min( i, j ) == split ) {
node->left_is_leaf = true;
( _leaf_nodes + split )->parent_idx = i;
( _leaf_nodes + split )->trans_count = *( trans_counts + split );
}
else {
node->left_is_leaf = false;
( _inner_nodes + split )->parent_idx = i;
}
// link right child
node->right_idx = split + 1;
if ( max( i, j ) == split + 1 ) {
node->right_is_leaf = true;
( _leaf_nodes + split + 1 )->parent_idx = i;
( _leaf_nodes + split + 1 )->trans_count = *( trans_counts + split + 1 );
}
else {
node->right_is_leaf = false;
( _inner_nodes + split + 1 )->parent_idx = i;
}
// copy results to output
__syncthreads();
if ( threadIdx.x == 0 ) {
memcpy( inner_nodes, _inner_nodes, InnerNodesSize( n_trans ) );
memcpy( leaf_nodes, _leaf_nodes, LeafNodesSize( n_trans ) );
}
}
FPRadixTree::FPRadixTree( const FPTransMap& trans_map )
: _inner_nodes( DInnerNodes( trans_map.size() - 1 ) ), _leaf_nodes( DLeafNodes( trans_map.size() ) )
{
size_type n_trans = trans_map.size(), blocks_per_trans = trans_map.blocks_per_transaction();
construct_radix_tree <<< 1, n_trans - 1, RadixTreeBufferSize( n_trans ) >>>(
trans_map.bitmap().data().get(), trans_map.transaction_counts().data().get(), n_trans, blocks_per_trans,
_inner_nodes.data().get(), _leaf_nodes.data().get()
);
}
}
|
4c773c3e190f182a72944f47f46c5df78208ec03.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2018-2019, NVIDIA CORPORATION. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <thrust/device_vector.h>
#include <thrust/execution_policy.h>
#include <thrust/for_each.h>
#include <thrust/extrema.h>
#include <thrust/remove.h>
#include <thrust/sort.h>
#include <thrust/unique.h>
#include <locale.h>
#include <rmm/rmm.h>
#include <rmm/thrust_rmm_allocator.h>
#include "NVStrings.h"
#include "custring_view.cuh"
#include "custring.cuh"
#include "NVText.h"
static void printCudaError( hipError_t err, const char* prefix="\t" )
{
if( err != hipSuccess )
fprintf(stderr,"%s: %s(%d):%s\n",prefix,hipGetErrorName(err),(int)err,hipGetErrorString(err));
}
// return unique set of tokens within all the strings using the specified delimiter
NVStrings* NVText::unique_tokens(NVStrings& strs, const char* delimiter )
{
int bytes = (int)strlen(delimiter);
char* d_delimiter = 0;
auto execpol = rmm::exec_policy(0);
RMM_ALLOC(&d_delimiter,bytes,0);
hipMemcpy(d_delimiter,delimiter,bytes,hipMemcpyHostToDevice);
// need to count how many output strings per string
unsigned int count = strs.size();
rmm::device_vector<custring_view*> strings(count,nullptr);
custring_view** d_strings = strings.data().get();
strs.create_custring_index(d_strings);
rmm::device_vector<int> counts(count,0);
int* d_counts = counts.data().get();
thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), count,
[d_strings, d_delimiter, bytes, d_counts] __device__(unsigned int idx){
custring_view* dstr = d_strings[idx];
if( dstr )
d_counts[idx] = dstr->split_size(d_delimiter,bytes,0,-1);
});
int columnsCount = *thrust::max_element(execpol->on(0), counts.begin(), counts.end() );
// build an index for each column and then sort/unique it
rmm::device_vector< thrust::pair<const char*,size_t> > vocab;
for( int col=0; col < columnsCount; ++col )
{
// first, build a vector of pair<char*,int>'s' for each column
// each pair points to a string for this column for each row
rmm::device_vector< thrust::pair<const char*,size_t> > indexes(count);
thrust::pair<const char*,size_t>* d_indexes = indexes.data().get();
thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), count,
[d_strings, col, d_delimiter, bytes, d_counts, d_indexes] __device__(unsigned int idx){
custring_view* dstr = d_strings[idx];
d_indexes[idx].first = 0; // initialize to
d_indexes[idx].second = 0; // null string
if( !dstr )
return;
// dcount already accounts for the maxsplit value
int dcount = d_counts[idx];
if( col >= dcount )
return; // passed the end for this string
// skip delimiters until we reach this column
int spos = 0, nchars = dstr->chars_count();
int epos = nchars;
for( int c=0; c < (dcount-1); ++c )
{
epos = dstr->find(d_delimiter,bytes,spos);
if( epos < 0 )
{
epos = nchars;
break;
}
if( c==col ) // found our column
break;
spos = epos + bytes;
epos = nchars;
}
// this will be the string for this column
if( spos < epos )
{
spos = dstr->byte_offset_for(spos); // convert char pos
epos = dstr->byte_offset_for(epos); // to byte offset
d_indexes[idx].first = dstr->data() + spos;
d_indexes[idx].second = (epos-spos);
}
});
hipError_t err = hipDeviceSynchronize();
if( err != hipSuccess )
{
fprintf(stderr,"unique_tokens:col=%d\n",col);
printCudaError(err);
}
// add column values to vocab list
vocab.insert(vocab.end(),indexes.begin(),indexes.end());
//printf("vocab size = %lu\n",vocab.size());
thrust::pair<const char*,size_t>* d_vocab = vocab.data().get();
// sort the list
thrust::sort(execpol->on(0), d_vocab, d_vocab + vocab.size(),
[] __device__( thrust::pair<const char*,size_t>& lhs, thrust::pair<const char*,size_t>& rhs ) {
if( lhs.first==0 || rhs.first==0 )
return lhs.first==0; // non-null > null
return custr::compare(lhs.first,(unsigned int)lhs.second,rhs.first,(unsigned int)rhs.second) < 0;
});
// unique the list
thrust::pair<const char*,size_t>* newend = thrust::unique(execpol->on(0), d_vocab, d_vocab + vocab.size(),
[] __device__ ( thrust::pair<const char*,size_t> lhs, thrust::pair<const char*,size_t> rhs ) {
if( lhs.first==rhs.first )
return true;
if( lhs.second != rhs.second )
return false;
return custr::compare(lhs.first,(unsigned int)lhs.second,rhs.first,(unsigned int)rhs.second)==0;
});
// truncate list to the unique set
// the above unique() call does an implicit dev-sync
vocab.resize((size_t)(newend - d_vocab));
}
// remove the inevitable 'null' token
thrust::pair<const char*,size_t>* d_vocab = vocab.data().get();
auto end = thrust::remove_if(execpol->on(0), d_vocab, d_vocab + vocab.size(), [] __device__ ( thrust::pair<const char*,size_t> w ) { return w.first==0; } );
unsigned int vsize = (unsigned int)(end - d_vocab); // may need new size
// done
RMM_FREE(d_delimiter,0);
// build strings object from vocab elements
return NVStrings::create_from_index((std::pair<const char*,size_t>*)d_vocab,vsize);
}
// return a count of the number of tokens for each string when applying the specified delimiter
unsigned int NVText::token_count( NVStrings& strs, const char* delimiter, unsigned int* results, bool bdevmem )
{
int bytes = (int)strlen(delimiter);
char* d_delimiter = 0;
auto execpol = rmm::exec_policy(0);
RMM_ALLOC(&d_delimiter,bytes,0);
hipMemcpy(d_delimiter,delimiter,bytes,hipMemcpyHostToDevice);
unsigned int count = strs.size();
unsigned int* d_counts = results;
if( !bdevmem )
RMM_ALLOC(&d_counts,count*sizeof(unsigned int),0);
// count how many strings per string
rmm::device_vector<custring_view*> strings(count,nullptr);
custring_view** d_strings = strings.data().get();
strs.create_custring_index(d_strings);
thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), count,
[d_strings, d_delimiter, bytes, d_counts] __device__(unsigned int idx){
custring_view* dstr = d_strings[idx];
int tc = 0;
if( dstr )
tc = dstr->split_size(d_delimiter,bytes,0,-1);
d_counts[idx] = tc;
});
//
hipError_t err = hipDeviceSynchronize();
if( err != hipSuccess )
printCudaError(err,"token_count:");
//
RMM_FREE(d_delimiter,0);
if( !bdevmem )
{
hipMemcpy(results,d_counts,count*sizeof(unsigned int),hipMemcpyDeviceToHost);
RMM_FREE(d_counts,0);
}
return 0;
}
// return boolean value for each token if found in the provided strings
unsigned int NVText::contains_strings( NVStrings& strs, NVStrings& tkns, bool* results, bool todevice )
{
unsigned int count = strs.size();
unsigned int tcount = tkns.size();
if( results==0 || count==0 || tcount==0 )
return 0;
//
auto execpol = rmm::exec_policy(0);
bool* d_rtn = results;
if( !todevice )
RMM_ALLOC(&d_rtn,tcount*count*sizeof(bool),0);
//
rmm::device_vector<custring_view*> strings(count,nullptr);
rmm::device_vector<custring_view*> tokens(tcount,nullptr);
custring_view** d_strings = strings.data().get();
custring_view** d_tokens = tokens.data().get();
strs.create_custring_index(d_strings);
tkns.create_custring_index(d_tokens);
thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), count,
[d_strings, d_tokens, tcount, d_rtn] __device__(unsigned int idx){
custring_view* dstr = d_strings[idx];
for( int jdx=0; jdx < tcount; ++jdx )
{
custring_view* dtgt = d_tokens[jdx];
d_rtn[(idx*tcount)+jdx] = ((dstr && dtgt) ? dstr->find(*dtgt) : -2) >=0 ;
}
});
//
hipError_t err = hipDeviceSynchronize();
if( err != hipSuccess )
{
fprintf(stderr,"contains-strings(%u,%p,%d)\n",tcount,results,(int)todevice);
printCudaError(err);
}
//
if( !todevice )
{ // copy result back to host
hipMemcpy(results,d_rtn,sizeof(bool)*count*tcount,hipMemcpyDeviceToHost);
RMM_FREE(d_rtn,0);
}
return 0;
}
// return the number of occurrences of each string within a set of strings
// this will fill in the provided memory as a matrix:
// 'aa' 'bbb' 'c' ...
// "aaaabc" 2 0 1
// "aabbcc" 1 0 2
// "abbbbc" 0 1 1
// ...
unsigned int NVText::strings_counts( NVStrings& strs, NVStrings& tkns, unsigned int* results, bool todevice )
{
unsigned int count = strs.size();
unsigned int tcount = tkns.size();
if( results==0 || count==0 || tcount==0 )
return 0;
//
auto execpol = rmm::exec_policy(0);
unsigned int* d_rtn = results;
if( !todevice )
RMM_ALLOC(&d_rtn,tcount*count*sizeof(unsigned int),0);
//
rmm::device_vector<custring_view*> strings(count,nullptr);
rmm::device_vector<custring_view*> tokens(tcount,nullptr);
custring_view** d_strings = strings.data().get();
custring_view** d_tokens = tokens.data().get();
strs.create_custring_index(d_strings);
tkns.create_custring_index(d_tokens);
thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), count,
[d_strings, d_tokens, tcount, d_rtn] __device__(unsigned int idx){
custring_view* dstr = d_strings[idx];
for( int jdx=0; jdx < tcount; ++jdx )
{
custring_view* dtgt = d_tokens[jdx];
int fnd = 0;
if( dstr && dtgt )
{
int pos = dstr->find(*dtgt);
while( pos >= 0 )
{
pos = dstr->find(*dtgt,pos+dtgt->chars_count());
++fnd;
}
}
d_rtn[(idx*tcount)+jdx] = fnd;
}
});
//
hipError_t err = hipDeviceSynchronize();
if( err != hipSuccess )
{
fprintf(stderr,"strings-count(%u,%p,%d)\n",tcount,results,(int)todevice);
printCudaError(err);
}
//
if( !todevice )
{ // copy result back to host
hipMemcpy(results,d_rtn,sizeof(unsigned int)*count*tcount,hipMemcpyDeviceToHost);
RMM_FREE(d_rtn,0);
}
return 0;
}
// Documentation here: https://www.cuelogic.com/blog/the-levenshtein-algorithm
// And here: https://en.wikipedia.org/wiki/Levenshtein_distances
struct editdistance_levenshtein_algorithm
{
custring_view** d_strings; // trying match
custring_view* d_tgt; // match with this
custring_view** d_tgts; // or these
short* d_buffer; // compute buffer
size_t* d_offsets; // locate sub-buffer
unsigned int* d_results; // edit-distances
// single string
editdistance_levenshtein_algorithm( custring_view** strings, custring_view* tgt, short* buffer, size_t* offsets, unsigned int* results )
: d_strings(strings), d_tgt(tgt), d_tgts(0), d_buffer(buffer), d_offsets(offsets), d_results(results) {}
// multiple strings
editdistance_levenshtein_algorithm( custring_view** strings, custring_view** tgts, short* buffer, size_t* offsets, unsigned int* results )
: d_strings(strings), d_tgt(0), d_tgts(tgts), d_buffer(buffer), d_offsets(offsets), d_results(results) {}
__device__ void operator() (unsigned int idx)
{
custring_view* dstr = d_strings[idx];
short* buf = (short*)d_buffer + d_offsets[idx];
custring_view* dtgt = d_tgt;
if( !d_tgt )
dtgt = d_tgts[idx];
d_results[idx] = compute_distance(dstr,dtgt,buf);
}
__device__ unsigned int compute_distance( custring_view* dstr, custring_view* dtgt, short* buf )
{
if( !dstr || dstr->empty() )
return dtgt ? dtgt->chars_count() : 0;
if( !dtgt || dtgt->empty() )
return dstr->chars_count();
//
custring_view* strA = dstr;
custring_view* strB = dtgt;
int lenA = (int)dstr->chars_count();
int lenB = (int)dtgt->chars_count();
if( lenA > lenB )
{
lenB = lenA;
lenA = dtgt->chars_count();
strA = dtgt;
strB = dstr;
}
//
short* line2 = buf;
short* line1 = line2 + lenA;
short* line0 = line1 + lenA;
int range = lenA + lenB - 1;
for (int i = 0; i < range; i++)
{
short* tmp = line2;
line2 = line1;
line1 = line0;
line0 = tmp;
for(int x = (i < lenB ? 0 : i - lenB + 1); (x < lenA) && (x < i+1); x++)
{
int y = i - x;
short u = y > 0 ? line1[x] : x + 1;
short v = x > 0 ? line1[x - 1] : y + 1;
short w;
if((x > 0) && (y > 0))
w = line2[x - 1];
else if(x > y)
w = x;
else
w = y;
u++; v++;
Char c1 = strA->at(x);
Char c2 = strB->at(y);
if(c1 != c2)
w++;
short value = u;
if(v < value)
value = v;
if(w < value)
value = w;
line0[x] = value;
}
}
return (unsigned int)line0[lenA-1];
}
};
unsigned int NVText::edit_distance( distance_type algo, NVStrings& strs, const char* str, unsigned int* results, bool bdevmem )
{
if( algo != levenshtein || str==0 || results==0 )
throw std::invalid_argument("invalid algorithm");
unsigned int count = strs.size();
if( count==0 )
return 0; // nothing to do
auto execpol = rmm::exec_policy(0);
unsigned int len = strlen(str);
unsigned int alcsz = custring_view::alloc_size(str,len);
custring_view* d_tgt = 0;
RMM_ALLOC(&d_tgt,alcsz,0);
custring_view::create_from_host(d_tgt,str,len);
// setup results vector
unsigned int* d_rtn = results;
if( !bdevmem )
RMM_ALLOC(&d_rtn,count*sizeof(unsigned int),0);
// get the string pointers
rmm::device_vector<custring_view*> strings(count,nullptr);
custring_view** d_strings = strings.data().get();
strs.create_custring_index(d_strings);
// calculate the size of the compute-buffer: 6 * length of string
rmm::device_vector<size_t> sizes(count,0);
size_t* d_sizes = sizes.data().get();
thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), count,
[d_strings, d_tgt, d_sizes] __device__(unsigned int idx){
custring_view* dstr = d_strings[idx];
if( !dstr )
return;
int len = dstr->chars_count();
if( d_tgt->chars_count() < len )
len = d_tgt->chars_count();
d_sizes[idx] = len * 3;
});
//
size_t bufsize = thrust::reduce(execpol->on(0), d_sizes, d_sizes+count );
rmm::device_vector<short> buffer(bufsize,0);
short* d_buffer = buffer.data().get();
rmm::device_vector<size_t> offsets(count,0);
size_t* d_offsets = offsets.data().get();
thrust::exclusive_scan(execpol->on(0), sizes.begin(), sizes.end(), offsets.begin() );
// compute edit distance
thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), count,
editdistance_levenshtein_algorithm(d_strings, d_tgt, d_buffer, d_offsets, d_rtn));
//
hipError_t err = hipDeviceSynchronize();
if( err != hipSuccess )
printCudaError(err,"edit-distance1");
//
RMM_FREE(d_tgt,0);
if( !bdevmem )
{
hipMemcpy(results,d_rtn,count*sizeof(unsigned int),hipMemcpyDeviceToHost);
RMM_FREE(d_rtn,0);
}
return 0;
}
unsigned int NVText::edit_distance( distance_type algo, NVStrings& strs1, NVStrings& strs2, unsigned int* results, bool bdevmem )
{
if( algo != levenshtein )
throw std::invalid_argument("invalid algorithm");
unsigned int count = strs1.size();
if( count != strs2.size() )
throw std::invalid_argument("sizes must match");
if( count==0 )
return 0; // nothing to do
// setup results vector
auto execpol = rmm::exec_policy(0);
unsigned int* d_rtn = results;
if( !bdevmem )
RMM_ALLOC(&d_rtn,count*sizeof(unsigned int),0);
// get the string pointers
rmm::device_vector<custring_view*> strings1(count,nullptr);
custring_view** d_strings1 = strings1.data().get();
strs1.create_custring_index(d_strings1);
rmm::device_vector<custring_view*> strings2(count,nullptr);
custring_view** d_strings2 = strings2.data().get();
strs2.create_custring_index(d_strings2);
// calculate the size of the compute-buffer: 6 * length of string
rmm::device_vector<size_t> sizes(count,0);
size_t* d_sizes = sizes.data().get();
thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), count,
[d_strings1, d_strings2, d_sizes] __device__(unsigned int idx){
custring_view* dstr1 = d_strings1[idx];
custring_view* dstr2 = d_strings2[idx];
if( !dstr1 || !dstr2 )
return;
int len1 = dstr1->chars_count();
int len = dstr2->chars_count();
if( len1 < len )
len = len1;
d_sizes[idx] = len * 3;
});
//
size_t bufsize = thrust::reduce(execpol->on(0), d_sizes, d_sizes+count );
rmm::device_vector<short> buffer(bufsize,0);
short* d_buffer = buffer.data().get();
rmm::device_vector<size_t> offsets(count,0);
size_t* d_offsets = offsets.data().get();
thrust::exclusive_scan(execpol->on(0), sizes.begin(), sizes.end(), offsets.begin() );
// compute edit distance
thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), count,
editdistance_levenshtein_algorithm(d_strings1, d_strings2, d_buffer, d_offsets, d_rtn));
//
hipError_t err = hipDeviceSynchronize();
if( err != hipSuccess )
printCudaError(err,"edit-distance2");
//
if( !bdevmem )
{
hipMemcpy(results,d_rtn,count*sizeof(unsigned int),hipMemcpyDeviceToHost);
RMM_FREE(d_rtn,0);
}
return 0;
}
| 4c773c3e190f182a72944f47f46c5df78208ec03.cu | /*
* Copyright (c) 2018-2019, NVIDIA CORPORATION. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <stdlib.h>
#include <cuda_runtime.h>
#include <thrust/device_vector.h>
#include <thrust/execution_policy.h>
#include <thrust/for_each.h>
#include <thrust/extrema.h>
#include <thrust/remove.h>
#include <thrust/sort.h>
#include <thrust/unique.h>
#include <locale.h>
#include <rmm/rmm.h>
#include <rmm/thrust_rmm_allocator.h>
#include "NVStrings.h"
#include "custring_view.cuh"
#include "custring.cuh"
#include "NVText.h"
static void printCudaError( cudaError_t err, const char* prefix="\t" )
{
if( err != cudaSuccess )
fprintf(stderr,"%s: %s(%d):%s\n",prefix,cudaGetErrorName(err),(int)err,cudaGetErrorString(err));
}
// return unique set of tokens within all the strings using the specified delimiter
NVStrings* NVText::unique_tokens(NVStrings& strs, const char* delimiter )
{
int bytes = (int)strlen(delimiter);
char* d_delimiter = 0;
auto execpol = rmm::exec_policy(0);
RMM_ALLOC(&d_delimiter,bytes,0);
cudaMemcpy(d_delimiter,delimiter,bytes,cudaMemcpyHostToDevice);
// need to count how many output strings per string
unsigned int count = strs.size();
rmm::device_vector<custring_view*> strings(count,nullptr);
custring_view** d_strings = strings.data().get();
strs.create_custring_index(d_strings);
rmm::device_vector<int> counts(count,0);
int* d_counts = counts.data().get();
thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), count,
[d_strings, d_delimiter, bytes, d_counts] __device__(unsigned int idx){
custring_view* dstr = d_strings[idx];
if( dstr )
d_counts[idx] = dstr->split_size(d_delimiter,bytes,0,-1);
});
int columnsCount = *thrust::max_element(execpol->on(0), counts.begin(), counts.end() );
// build an index for each column and then sort/unique it
rmm::device_vector< thrust::pair<const char*,size_t> > vocab;
for( int col=0; col < columnsCount; ++col )
{
// first, build a vector of pair<char*,int>'s' for each column
// each pair points to a string for this column for each row
rmm::device_vector< thrust::pair<const char*,size_t> > indexes(count);
thrust::pair<const char*,size_t>* d_indexes = indexes.data().get();
thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), count,
[d_strings, col, d_delimiter, bytes, d_counts, d_indexes] __device__(unsigned int idx){
custring_view* dstr = d_strings[idx];
d_indexes[idx].first = 0; // initialize to
d_indexes[idx].second = 0; // null string
if( !dstr )
return;
// dcount already accounts for the maxsplit value
int dcount = d_counts[idx];
if( col >= dcount )
return; // passed the end for this string
// skip delimiters until we reach this column
int spos = 0, nchars = dstr->chars_count();
int epos = nchars;
for( int c=0; c < (dcount-1); ++c )
{
epos = dstr->find(d_delimiter,bytes,spos);
if( epos < 0 )
{
epos = nchars;
break;
}
if( c==col ) // found our column
break;
spos = epos + bytes;
epos = nchars;
}
// this will be the string for this column
if( spos < epos )
{
spos = dstr->byte_offset_for(spos); // convert char pos
epos = dstr->byte_offset_for(epos); // to byte offset
d_indexes[idx].first = dstr->data() + spos;
d_indexes[idx].second = (epos-spos);
}
});
cudaError_t err = cudaDeviceSynchronize();
if( err != cudaSuccess )
{
fprintf(stderr,"unique_tokens:col=%d\n",col);
printCudaError(err);
}
// add column values to vocab list
vocab.insert(vocab.end(),indexes.begin(),indexes.end());
//printf("vocab size = %lu\n",vocab.size());
thrust::pair<const char*,size_t>* d_vocab = vocab.data().get();
// sort the list
thrust::sort(execpol->on(0), d_vocab, d_vocab + vocab.size(),
[] __device__( thrust::pair<const char*,size_t>& lhs, thrust::pair<const char*,size_t>& rhs ) {
if( lhs.first==0 || rhs.first==0 )
return lhs.first==0; // non-null > null
return custr::compare(lhs.first,(unsigned int)lhs.second,rhs.first,(unsigned int)rhs.second) < 0;
});
// unique the list
thrust::pair<const char*,size_t>* newend = thrust::unique(execpol->on(0), d_vocab, d_vocab + vocab.size(),
[] __device__ ( thrust::pair<const char*,size_t> lhs, thrust::pair<const char*,size_t> rhs ) {
if( lhs.first==rhs.first )
return true;
if( lhs.second != rhs.second )
return false;
return custr::compare(lhs.first,(unsigned int)lhs.second,rhs.first,(unsigned int)rhs.second)==0;
});
// truncate list to the unique set
// the above unique() call does an implicit dev-sync
vocab.resize((size_t)(newend - d_vocab));
}
// remove the inevitable 'null' token
thrust::pair<const char*,size_t>* d_vocab = vocab.data().get();
auto end = thrust::remove_if(execpol->on(0), d_vocab, d_vocab + vocab.size(), [] __device__ ( thrust::pair<const char*,size_t> w ) { return w.first==0; } );
unsigned int vsize = (unsigned int)(end - d_vocab); // may need new size
// done
RMM_FREE(d_delimiter,0);
// build strings object from vocab elements
return NVStrings::create_from_index((std::pair<const char*,size_t>*)d_vocab,vsize);
}
// return a count of the number of tokens for each string when applying the specified delimiter
unsigned int NVText::token_count( NVStrings& strs, const char* delimiter, unsigned int* results, bool bdevmem )
{
int bytes = (int)strlen(delimiter);
char* d_delimiter = 0;
auto execpol = rmm::exec_policy(0);
RMM_ALLOC(&d_delimiter,bytes,0);
cudaMemcpy(d_delimiter,delimiter,bytes,cudaMemcpyHostToDevice);
unsigned int count = strs.size();
unsigned int* d_counts = results;
if( !bdevmem )
RMM_ALLOC(&d_counts,count*sizeof(unsigned int),0);
// count how many strings per string
rmm::device_vector<custring_view*> strings(count,nullptr);
custring_view** d_strings = strings.data().get();
strs.create_custring_index(d_strings);
thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), count,
[d_strings, d_delimiter, bytes, d_counts] __device__(unsigned int idx){
custring_view* dstr = d_strings[idx];
int tc = 0;
if( dstr )
tc = dstr->split_size(d_delimiter,bytes,0,-1);
d_counts[idx] = tc;
});
//
cudaError_t err = cudaDeviceSynchronize();
if( err != cudaSuccess )
printCudaError(err,"token_count:");
//
RMM_FREE(d_delimiter,0);
if( !bdevmem )
{
cudaMemcpy(results,d_counts,count*sizeof(unsigned int),cudaMemcpyDeviceToHost);
RMM_FREE(d_counts,0);
}
return 0;
}
// return boolean value for each token if found in the provided strings
unsigned int NVText::contains_strings( NVStrings& strs, NVStrings& tkns, bool* results, bool todevice )
{
unsigned int count = strs.size();
unsigned int tcount = tkns.size();
if( results==0 || count==0 || tcount==0 )
return 0;
//
auto execpol = rmm::exec_policy(0);
bool* d_rtn = results;
if( !todevice )
RMM_ALLOC(&d_rtn,tcount*count*sizeof(bool),0);
//
rmm::device_vector<custring_view*> strings(count,nullptr);
rmm::device_vector<custring_view*> tokens(tcount,nullptr);
custring_view** d_strings = strings.data().get();
custring_view** d_tokens = tokens.data().get();
strs.create_custring_index(d_strings);
tkns.create_custring_index(d_tokens);
thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), count,
[d_strings, d_tokens, tcount, d_rtn] __device__(unsigned int idx){
custring_view* dstr = d_strings[idx];
for( int jdx=0; jdx < tcount; ++jdx )
{
custring_view* dtgt = d_tokens[jdx];
d_rtn[(idx*tcount)+jdx] = ((dstr && dtgt) ? dstr->find(*dtgt) : -2) >=0 ;
}
});
//
cudaError_t err = cudaDeviceSynchronize();
if( err != cudaSuccess )
{
fprintf(stderr,"contains-strings(%u,%p,%d)\n",tcount,results,(int)todevice);
printCudaError(err);
}
//
if( !todevice )
{ // copy result back to host
cudaMemcpy(results,d_rtn,sizeof(bool)*count*tcount,cudaMemcpyDeviceToHost);
RMM_FREE(d_rtn,0);
}
return 0;
}
// return the number of occurrences of each string within a set of strings
// this will fill in the provided memory as a matrix:
// 'aa' 'bbb' 'c' ...
// "aaaabc" 2 0 1
// "aabbcc" 1 0 2
// "abbbbc" 0 1 1
// ...
unsigned int NVText::strings_counts( NVStrings& strs, NVStrings& tkns, unsigned int* results, bool todevice )
{
unsigned int count = strs.size();
unsigned int tcount = tkns.size();
if( results==0 || count==0 || tcount==0 )
return 0;
//
auto execpol = rmm::exec_policy(0);
unsigned int* d_rtn = results;
if( !todevice )
RMM_ALLOC(&d_rtn,tcount*count*sizeof(unsigned int),0);
//
rmm::device_vector<custring_view*> strings(count,nullptr);
rmm::device_vector<custring_view*> tokens(tcount,nullptr);
custring_view** d_strings = strings.data().get();
custring_view** d_tokens = tokens.data().get();
strs.create_custring_index(d_strings);
tkns.create_custring_index(d_tokens);
thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), count,
[d_strings, d_tokens, tcount, d_rtn] __device__(unsigned int idx){
custring_view* dstr = d_strings[idx];
for( int jdx=0; jdx < tcount; ++jdx )
{
custring_view* dtgt = d_tokens[jdx];
int fnd = 0;
if( dstr && dtgt )
{
int pos = dstr->find(*dtgt);
while( pos >= 0 )
{
pos = dstr->find(*dtgt,pos+dtgt->chars_count());
++fnd;
}
}
d_rtn[(idx*tcount)+jdx] = fnd;
}
});
//
cudaError_t err = cudaDeviceSynchronize();
if( err != cudaSuccess )
{
fprintf(stderr,"strings-count(%u,%p,%d)\n",tcount,results,(int)todevice);
printCudaError(err);
}
//
if( !todevice )
{ // copy result back to host
cudaMemcpy(results,d_rtn,sizeof(unsigned int)*count*tcount,cudaMemcpyDeviceToHost);
RMM_FREE(d_rtn,0);
}
return 0;
}
// Documentation here: https://www.cuelogic.com/blog/the-levenshtein-algorithm
// And here: https://en.wikipedia.org/wiki/Levenshtein_distances
struct editdistance_levenshtein_algorithm
{
custring_view** d_strings; // trying match
custring_view* d_tgt; // match with this
custring_view** d_tgts; // or these
short* d_buffer; // compute buffer
size_t* d_offsets; // locate sub-buffer
unsigned int* d_results; // edit-distances
// single string
editdistance_levenshtein_algorithm( custring_view** strings, custring_view* tgt, short* buffer, size_t* offsets, unsigned int* results )
: d_strings(strings), d_tgt(tgt), d_tgts(0), d_buffer(buffer), d_offsets(offsets), d_results(results) {}
// multiple strings
editdistance_levenshtein_algorithm( custring_view** strings, custring_view** tgts, short* buffer, size_t* offsets, unsigned int* results )
: d_strings(strings), d_tgt(0), d_tgts(tgts), d_buffer(buffer), d_offsets(offsets), d_results(results) {}
__device__ void operator() (unsigned int idx)
{
custring_view* dstr = d_strings[idx];
short* buf = (short*)d_buffer + d_offsets[idx];
custring_view* dtgt = d_tgt;
if( !d_tgt )
dtgt = d_tgts[idx];
d_results[idx] = compute_distance(dstr,dtgt,buf);
}
__device__ unsigned int compute_distance( custring_view* dstr, custring_view* dtgt, short* buf )
{
if( !dstr || dstr->empty() )
return dtgt ? dtgt->chars_count() : 0;
if( !dtgt || dtgt->empty() )
return dstr->chars_count();
//
custring_view* strA = dstr;
custring_view* strB = dtgt;
int lenA = (int)dstr->chars_count();
int lenB = (int)dtgt->chars_count();
if( lenA > lenB )
{
lenB = lenA;
lenA = dtgt->chars_count();
strA = dtgt;
strB = dstr;
}
//
short* line2 = buf;
short* line1 = line2 + lenA;
short* line0 = line1 + lenA;
int range = lenA + lenB - 1;
for (int i = 0; i < range; i++)
{
short* tmp = line2;
line2 = line1;
line1 = line0;
line0 = tmp;
for(int x = (i < lenB ? 0 : i - lenB + 1); (x < lenA) && (x < i+1); x++)
{
int y = i - x;
short u = y > 0 ? line1[x] : x + 1;
short v = x > 0 ? line1[x - 1] : y + 1;
short w;
if((x > 0) && (y > 0))
w = line2[x - 1];
else if(x > y)
w = x;
else
w = y;
u++; v++;
Char c1 = strA->at(x);
Char c2 = strB->at(y);
if(c1 != c2)
w++;
short value = u;
if(v < value)
value = v;
if(w < value)
value = w;
line0[x] = value;
}
}
return (unsigned int)line0[lenA-1];
}
};
unsigned int NVText::edit_distance( distance_type algo, NVStrings& strs, const char* str, unsigned int* results, bool bdevmem )
{
if( algo != levenshtein || str==0 || results==0 )
throw std::invalid_argument("invalid algorithm");
unsigned int count = strs.size();
if( count==0 )
return 0; // nothing to do
auto execpol = rmm::exec_policy(0);
unsigned int len = strlen(str);
unsigned int alcsz = custring_view::alloc_size(str,len);
custring_view* d_tgt = 0;
RMM_ALLOC(&d_tgt,alcsz,0);
custring_view::create_from_host(d_tgt,str,len);
// setup results vector
unsigned int* d_rtn = results;
if( !bdevmem )
RMM_ALLOC(&d_rtn,count*sizeof(unsigned int),0);
// get the string pointers
rmm::device_vector<custring_view*> strings(count,nullptr);
custring_view** d_strings = strings.data().get();
strs.create_custring_index(d_strings);
// calculate the size of the compute-buffer: 6 * length of string
rmm::device_vector<size_t> sizes(count,0);
size_t* d_sizes = sizes.data().get();
thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), count,
[d_strings, d_tgt, d_sizes] __device__(unsigned int idx){
custring_view* dstr = d_strings[idx];
if( !dstr )
return;
int len = dstr->chars_count();
if( d_tgt->chars_count() < len )
len = d_tgt->chars_count();
d_sizes[idx] = len * 3;
});
//
size_t bufsize = thrust::reduce(execpol->on(0), d_sizes, d_sizes+count );
rmm::device_vector<short> buffer(bufsize,0);
short* d_buffer = buffer.data().get();
rmm::device_vector<size_t> offsets(count,0);
size_t* d_offsets = offsets.data().get();
thrust::exclusive_scan(execpol->on(0), sizes.begin(), sizes.end(), offsets.begin() );
// compute edit distance
thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), count,
editdistance_levenshtein_algorithm(d_strings, d_tgt, d_buffer, d_offsets, d_rtn));
//
cudaError_t err = cudaDeviceSynchronize();
if( err != cudaSuccess )
printCudaError(err,"edit-distance1");
//
RMM_FREE(d_tgt,0);
if( !bdevmem )
{
cudaMemcpy(results,d_rtn,count*sizeof(unsigned int),cudaMemcpyDeviceToHost);
RMM_FREE(d_rtn,0);
}
return 0;
}
unsigned int NVText::edit_distance( distance_type algo, NVStrings& strs1, NVStrings& strs2, unsigned int* results, bool bdevmem )
{
if( algo != levenshtein )
throw std::invalid_argument("invalid algorithm");
unsigned int count = strs1.size();
if( count != strs2.size() )
throw std::invalid_argument("sizes must match");
if( count==0 )
return 0; // nothing to do
// setup results vector
auto execpol = rmm::exec_policy(0);
unsigned int* d_rtn = results;
if( !bdevmem )
RMM_ALLOC(&d_rtn,count*sizeof(unsigned int),0);
// get the string pointers
rmm::device_vector<custring_view*> strings1(count,nullptr);
custring_view** d_strings1 = strings1.data().get();
strs1.create_custring_index(d_strings1);
rmm::device_vector<custring_view*> strings2(count,nullptr);
custring_view** d_strings2 = strings2.data().get();
strs2.create_custring_index(d_strings2);
// calculate the size of the compute-buffer: 6 * length of string
rmm::device_vector<size_t> sizes(count,0);
size_t* d_sizes = sizes.data().get();
thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), count,
[d_strings1, d_strings2, d_sizes] __device__(unsigned int idx){
custring_view* dstr1 = d_strings1[idx];
custring_view* dstr2 = d_strings2[idx];
if( !dstr1 || !dstr2 )
return;
int len1 = dstr1->chars_count();
int len = dstr2->chars_count();
if( len1 < len )
len = len1;
d_sizes[idx] = len * 3;
});
//
size_t bufsize = thrust::reduce(execpol->on(0), d_sizes, d_sizes+count );
rmm::device_vector<short> buffer(bufsize,0);
short* d_buffer = buffer.data().get();
rmm::device_vector<size_t> offsets(count,0);
size_t* d_offsets = offsets.data().get();
thrust::exclusive_scan(execpol->on(0), sizes.begin(), sizes.end(), offsets.begin() );
// compute edit distance
thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), count,
editdistance_levenshtein_algorithm(d_strings1, d_strings2, d_buffer, d_offsets, d_rtn));
//
cudaError_t err = cudaDeviceSynchronize();
if( err != cudaSuccess )
printCudaError(err,"edit-distance2");
//
if( !bdevmem )
{
cudaMemcpy(results,d_rtn,count*sizeof(unsigned int),cudaMemcpyDeviceToHost);
RMM_FREE(d_rtn,0);
}
return 0;
}
|
df0afedd3f44ac3c21265d3d8cc11b82413346aa.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void kAddDiagonalScalar(float* mat, float val, float* tgtMat, unsigned int width) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < width; i += numThreads) {
tgtMat[width*i + i] = mat[width*i + i] + val;
}
} | df0afedd3f44ac3c21265d3d8cc11b82413346aa.cu | #include "includes.h"
__global__ void kAddDiagonalScalar(float* mat, float val, float* tgtMat, unsigned int width) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < width; i += numThreads) {
tgtMat[width*i + i] = mat[width*i + i] + val;
}
} |
ef1e5dbe6935493e6a40ffe05d1b8807004392e2.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/******************************************************************************
* Copyright (c) 2011, Duane Merrill. All rights reserved.
* Copyright (c) 2011-2018, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of the NVIDIA CORPORATION nor the
* names of its contributors may be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
******************************************************************************/
/******************************************************************************
* Test of BlockLoad and BlockStore utilities
******************************************************************************/
// Ensure printing of CUDA runtime errors to console
#define CUB_STDERR
#include <iterator>
#include <stdio.h>
#include <hipcub/hipcub.hpp>
#include <cub/block/block_store.cuh>
#include <cub/iterator/cache_modified_input_iterator.cuh>
#include <cub/iterator/cache_modified_output_iterator.cuh>
#include <cub/iterator/discard_output_iterator.cuh>
#include <hipcub/hipcub.hpp>
#include "test_util.h"
using namespace cub;
//---------------------------------------------------------------------
// Globals, constants and typedefs
//---------------------------------------------------------------------
bool g_verbose = false;
CachingDeviceAllocator g_allocator(true);
//---------------------------------------------------------------------
// Test kernels
//---------------------------------------------------------------------
/**
* Test load/store kernel.
*/
template <
int BLOCK_THREADS,
int ITEMS_PER_THREAD,
BlockLoadAlgorithm LOAD_ALGORITHM,
BlockStoreAlgorithm STORE_ALGORITHM,
typename InputIteratorT,
typename OutputIteratorT>
__launch_bounds__ (BLOCK_THREADS, 1)
__global__ void Kernel(
InputIteratorT d_in,
OutputIteratorT d_out_unguarded,
OutputIteratorT d_out_guarded,
int num_items)
{
enum
{
TILE_SIZE = BLOCK_THREADS * ITEMS_PER_THREAD
};
// The input value type
typedef typename std::iterator_traits<InputIteratorT>::value_type InputT;
// The output value type
typedef typename If<(Equals<typename std::iterator_traits<OutputIteratorT>::value_type, void>::VALUE), // OutputT = (if output iterator's value type is void) ?
typename std::iterator_traits<InputIteratorT>::value_type, // ... then the input iterator's value type,
typename std::iterator_traits<OutputIteratorT>::value_type>::Type OutputT; // ... else the output iterator's value type
// Threadblock load/store abstraction types
typedef BlockLoad<InputT, BLOCK_THREADS, ITEMS_PER_THREAD, LOAD_ALGORITHM> BlockLoad;
typedef BlockStore<OutputT, BLOCK_THREADS, ITEMS_PER_THREAD, STORE_ALGORITHM> BlockStore;
// Shared memory type for this thread block
union TempStorage
{
typename BlockLoad::TempStorage load;
typename BlockStore::TempStorage store;
};
// Allocate temp storage in shared memory
__shared__ TempStorage temp_storage;
// Threadblock work bounds
int block_offset = blockIdx.x * TILE_SIZE;
int guarded_elements = num_items - block_offset;
// Tile of items
OutputT data[ITEMS_PER_THREAD];
// Load data
BlockLoad(temp_storage.load).Load(d_in + block_offset, data);
__syncthreads();
// Store data
BlockStore(temp_storage.store).Store(d_out_unguarded + block_offset, data);
__syncthreads();
// reset data
#pragma unroll
for (int ITEM = 0; ITEM < ITEMS_PER_THREAD; ++ITEM)
data[ITEM] = OutputT();
__syncthreads();
// Load data
BlockLoad(temp_storage.load).Load(d_in + block_offset, data, guarded_elements);
__syncthreads();
// Store data
BlockStore(temp_storage.store).Store(d_out_guarded + block_offset, data, guarded_elements);
}
//---------------------------------------------------------------------
// Host testing subroutines
//---------------------------------------------------------------------
/**
* Test load/store variants
*/
template <
typename T,
int BLOCK_THREADS,
int ITEMS_PER_THREAD,
BlockLoadAlgorithm LOAD_ALGORITHM,
BlockStoreAlgorithm STORE_ALGORITHM,
typename InputIteratorT,
typename OutputIteratorT>
void TestKernel(
T *h_in,
InputIteratorT d_in,
OutputIteratorT d_out_unguarded_itr,
OutputIteratorT d_out_guarded_itr,
T *d_out_unguarded_ptr,
T *d_out_guarded_ptr,
int grid_size,
int guarded_elements)
{
int compare;
int unguarded_elements = grid_size * BLOCK_THREADS * ITEMS_PER_THREAD;
// Test with discard output iterator
typedef typename std::iterator_traits<InputIteratorT>::difference_type OffsetT;
DiscardOutputIterator<OffsetT> discard_itr;
hipLaunchKernelGGL(( Kernel<BLOCK_THREADS, ITEMS_PER_THREAD, LOAD_ALGORITHM, STORE_ALGORITHM>)
, dim3(grid_size), dim3(BLOCK_THREADS), 0, 0,
d_in,
discard_itr,
discard_itr,
guarded_elements);
// Test with regular output iterator
hipLaunchKernelGGL(( Kernel<BLOCK_THREADS, ITEMS_PER_THREAD, LOAD_ALGORITHM, STORE_ALGORITHM>)
, dim3(grid_size), dim3(BLOCK_THREADS), 0, 0,
d_in,
d_out_unguarded_itr,
d_out_guarded_itr,
guarded_elements);
CubDebugExit(hipPeekAtLastError());
CubDebugExit(hipDeviceSynchronize());
// Check results
compare = CompareDeviceResults(h_in, d_out_guarded_ptr, guarded_elements, g_verbose, g_verbose);
printf("\tGuarded: %s\n", (compare) ? "FAIL" : "PASS");
AssertEquals(0, compare);
// Check results
compare = CompareDeviceResults(h_in, d_out_unguarded_ptr, unguarded_elements, g_verbose, g_verbose);
printf("\tUnguarded: %s\n", (compare) ? "FAIL" : "PASS");
AssertEquals(0, compare);
}
/**
* Test native pointer. Specialized for sufficient resources
*/
template <
typename T,
int BLOCK_THREADS,
int ITEMS_PER_THREAD,
BlockLoadAlgorithm LOAD_ALGORITHM,
BlockStoreAlgorithm STORE_ALGORITHM>
void TestNative(
int grid_size,
float fraction_valid,
Int2Type<true> /*sufficient_resources*/)
{
int unguarded_elements = grid_size * BLOCK_THREADS * ITEMS_PER_THREAD;
int guarded_elements = int(fraction_valid * float(unguarded_elements));
// Allocate host arrays
T *h_in = (T*) malloc(unguarded_elements * sizeof(T));
// Allocate device arrays
T *d_in = NULL;
T *d_out_unguarded = NULL;
T *d_out_guarded = NULL;
CubDebugExit(g_allocator.DeviceAllocate((void**)&d_in, sizeof(T) * unguarded_elements));
CubDebugExit(g_allocator.DeviceAllocate((void**)&d_out_unguarded, sizeof(T) * unguarded_elements));
CubDebugExit(g_allocator.DeviceAllocate((void**)&d_out_guarded, sizeof(T) * guarded_elements));
CubDebugExit(hipMemset(d_out_unguarded, 0, sizeof(T) * unguarded_elements));
CubDebugExit(hipMemset(d_out_guarded, 0, sizeof(T) * guarded_elements));
// Initialize problem on host and device
for (int i = 0; i < unguarded_elements; ++i)
{
InitValue(INTEGER_SEED, h_in[i], i);
}
CubDebugExit(hipMemcpy(d_in, h_in, sizeof(T) * unguarded_elements, hipMemcpyHostToDevice));
printf("TestNative "
"grid_size(%d) "
"guarded_elements(%d) "
"unguarded_elements(%d) "
"BLOCK_THREADS(%d) "
"ITEMS_PER_THREAD(%d) "
"LOAD_ALGORITHM(%d) "
"STORE_ALGORITHM(%d) "
"sizeof(T)(%d)\n",
grid_size, guarded_elements, unguarded_elements, BLOCK_THREADS, ITEMS_PER_THREAD, LOAD_ALGORITHM, STORE_ALGORITHM, (int) sizeof(T));
TestKernel<T, BLOCK_THREADS, ITEMS_PER_THREAD, LOAD_ALGORITHM, STORE_ALGORITHM>(
h_in,
(T const *) d_in, // Test const
d_out_unguarded,
d_out_guarded,
d_out_unguarded,
d_out_guarded,
grid_size,
guarded_elements);
// Cleanup
if (h_in) free(h_in);
if (d_in) CubDebugExit(g_allocator.DeviceFree(d_in));
if (d_out_unguarded) CubDebugExit(g_allocator.DeviceFree(d_out_unguarded));
if (d_out_guarded) CubDebugExit(g_allocator.DeviceFree(d_out_guarded));
}
/**
* Test native pointer. Specialized for insufficient resources
*/
template <
typename T,
int BLOCK_THREADS,
int ITEMS_PER_THREAD,
BlockLoadAlgorithm LOAD_ALGORITHM,
BlockStoreAlgorithm STORE_ALGORITHM>
void TestNative(
int /*grid_size*/,
float /*fraction_valid*/,
Int2Type<false> /*sufficient_resources*/)
{}
/**
* Test iterator. Specialized for sufficient resources.
*/
template <
typename T,
int BLOCK_THREADS,
int ITEMS_PER_THREAD,
BlockLoadAlgorithm LOAD_ALGORITHM,
BlockStoreAlgorithm STORE_ALGORITHM,
CacheLoadModifier LOAD_MODIFIER,
CacheStoreModifier STORE_MODIFIER>
void TestIterator(
int grid_size,
float fraction_valid,
Int2Type<true> /*sufficient_resources*/)
{
int unguarded_elements = grid_size * BLOCK_THREADS * ITEMS_PER_THREAD;
int guarded_elements = int(fraction_valid * float(unguarded_elements));
// Allocate host arrays
T *h_in = (T*) malloc(unguarded_elements * sizeof(T));
// Allocate device arrays
T *d_in = NULL;
T *d_out_unguarded = NULL;
T *d_out_guarded = NULL;
CubDebugExit(g_allocator.DeviceAllocate((void**)&d_in, sizeof(T) * unguarded_elements));
CubDebugExit(g_allocator.DeviceAllocate((void**)&d_out_unguarded, sizeof(T) * unguarded_elements));
CubDebugExit(g_allocator.DeviceAllocate((void**)&d_out_guarded, sizeof(T) * guarded_elements));
CubDebugExit(hipMemset(d_out_unguarded, 0, sizeof(T) * unguarded_elements));
CubDebugExit(hipMemset(d_out_guarded, 0, sizeof(T) * guarded_elements));
// Initialize problem on host and device
for (int i = 0; i < unguarded_elements; ++i)
{
InitValue(INTEGER_SEED, h_in[i], i);
}
CubDebugExit(hipMemcpy(d_in, h_in, sizeof(T) * unguarded_elements, hipMemcpyHostToDevice));
printf("TestIterator "
"grid_size(%d) "
"guarded_elements(%d) "
"unguarded_elements(%d) "
"BLOCK_THREADS(%d) "
"ITEMS_PER_THREAD(%d) "
"LOAD_ALGORITHM(%d) "
"STORE_ALGORITHM(%d) "
"LOAD_MODIFIER(%d) "
"STORE_MODIFIER(%d) "
"sizeof(T)(%d)\n",
grid_size, guarded_elements, unguarded_elements, BLOCK_THREADS, ITEMS_PER_THREAD, LOAD_ALGORITHM, STORE_ALGORITHM, LOAD_MODIFIER, STORE_MODIFIER, (int) sizeof(T));
TestKernel<T, BLOCK_THREADS, ITEMS_PER_THREAD, LOAD_ALGORITHM, STORE_ALGORITHM>(
h_in,
CacheModifiedInputIterator<LOAD_MODIFIER, T>(d_in),
CacheModifiedOutputIterator<STORE_MODIFIER, T>(d_out_unguarded),
CacheModifiedOutputIterator<STORE_MODIFIER, T>(d_out_guarded),
d_out_unguarded,
d_out_guarded,
grid_size,
guarded_elements);
// Cleanup
if (h_in) free(h_in);
if (d_in) CubDebugExit(g_allocator.DeviceFree(d_in));
if (d_out_unguarded) CubDebugExit(g_allocator.DeviceFree(d_out_unguarded));
if (d_out_guarded) CubDebugExit(g_allocator.DeviceFree(d_out_guarded));
}
/**
* Test iterator. Specialized for insufficient resources.
*/
template <
typename T,
int BLOCK_THREADS,
int ITEMS_PER_THREAD,
BlockLoadAlgorithm LOAD_ALGORITHM,
BlockStoreAlgorithm STORE_ALGORITHM,
CacheLoadModifier LOAD_MODIFIER,
CacheStoreModifier STORE_MODIFIER>
void TestIterator(
int /*grid_size*/,
float /*fraction_valid*/,
Int2Type<false> /*sufficient_resources*/)
{}
/**
* Evaluate different pointer access types
*/
template <
typename T,
int BLOCK_THREADS,
int ITEMS_PER_THREAD,
BlockLoadAlgorithm LOAD_ALGORITHM,
BlockStoreAlgorithm STORE_ALGORITHM>
void TestPointerType(
int grid_size,
float fraction_valid)
{
// Threadblock load/store abstraction types
typedef BlockLoad<T, BLOCK_THREADS, ITEMS_PER_THREAD, LOAD_ALGORITHM> BlockLoad;
typedef BlockStore<T, BLOCK_THREADS, ITEMS_PER_THREAD, STORE_ALGORITHM> BlockStore;
#if defined(SM100) || defined(SM110) || defined(SM130)
static const bool sufficient_load_smem = sizeof(typename BlockLoad::TempStorage) <= 1024 * 16;
static const bool sufficient_store_smem = sizeof(typename BlockStore::TempStorage) <= 1024 * 16;
static const bool sufficient_threads = BLOCK_THREADS <= 512;
#else
static const bool sufficient_load_smem = sizeof(typename BlockLoad::TempStorage) <= 1024 * 48;
static const bool sufficient_store_smem = sizeof(typename BlockStore::TempStorage) <= 1024 * 48;
static const bool sufficient_threads = BLOCK_THREADS <= 1024;
#endif
static const bool sufficient_resources = sufficient_load_smem && sufficient_store_smem && sufficient_threads;
TestNative<T, BLOCK_THREADS, ITEMS_PER_THREAD, LOAD_ALGORITHM, STORE_ALGORITHM>(grid_size, fraction_valid, Int2Type<sufficient_resources>());
TestIterator<T, BLOCK_THREADS, ITEMS_PER_THREAD, LOAD_ALGORITHM, STORE_ALGORITHM, LOAD_DEFAULT, STORE_DEFAULT>(grid_size, fraction_valid, Int2Type<sufficient_resources>());
}
/**
* Evaluate different time-slicing strategies
*/
template <
typename T,
int BLOCK_THREADS,
int ITEMS_PER_THREAD,
BlockLoadAlgorithm LOAD_ALGORITHM,
BlockStoreAlgorithm STORE_ALGORITHM>
void TestSlicedStrategy(
int grid_size,
float fraction_valid)
{
TestPointerType<T, BLOCK_THREADS, ITEMS_PER_THREAD, LOAD_ALGORITHM, STORE_ALGORITHM, true>(grid_size, fraction_valid);
TestPointerType<T, BLOCK_THREADS, ITEMS_PER_THREAD, LOAD_ALGORITHM, STORE_ALGORITHM, false>(grid_size, fraction_valid);
}
/**
* Evaluate different load/store strategies (specialized for block sizes that are not a multiple of 32)
*/
template <
typename T,
int BLOCK_THREADS,
int ITEMS_PER_THREAD>
void TestStrategy(
int grid_size,
float fraction_valid,
Int2Type<false> /*is_warp_multiple*/)
{
TestPointerType<T, BLOCK_THREADS, ITEMS_PER_THREAD, BLOCK_LOAD_DIRECT, BLOCK_STORE_DIRECT>(grid_size, fraction_valid);
TestPointerType<T, BLOCK_THREADS, ITEMS_PER_THREAD, BLOCK_LOAD_TRANSPOSE, BLOCK_STORE_TRANSPOSE>(grid_size, fraction_valid);
TestPointerType<T, BLOCK_THREADS, ITEMS_PER_THREAD, BLOCK_LOAD_VECTORIZE, BLOCK_STORE_VECTORIZE>(grid_size, fraction_valid);
TestPointerType<T, BLOCK_THREADS, ITEMS_PER_THREAD, BLOCK_LOAD_STRIPED, BLOCK_STORE_STRIPED>(grid_size, fraction_valid);
}
/**
* Evaluate different load/store strategies (specialized for block sizes that are a multiple of 32)
*/
template <
typename T,
int BLOCK_THREADS,
int ITEMS_PER_THREAD>
void TestStrategy(
int grid_size,
float fraction_valid,
Int2Type<true> /*is_warp_multiple*/)
{
TestStrategy<T, BLOCK_THREADS, ITEMS_PER_THREAD>(grid_size, fraction_valid, Int2Type<false>());
TestPointerType<T, BLOCK_THREADS, ITEMS_PER_THREAD, BLOCK_LOAD_WARP_TRANSPOSE, BLOCK_STORE_WARP_TRANSPOSE>(grid_size, fraction_valid);
TestPointerType<T, BLOCK_THREADS, ITEMS_PER_THREAD, BLOCK_LOAD_WARP_TRANSPOSE_TIMESLICED, BLOCK_STORE_WARP_TRANSPOSE_TIMESLICED>(grid_size, fraction_valid);
}
/**
* Evaluate different register blocking
*/
template <
typename T,
int BLOCK_THREADS>
void TestItemsPerThread(
int grid_size,
float fraction_valid)
{
Int2Type<BLOCK_THREADS % 32 == 0> is_warp_multiple;
TestStrategy<T, BLOCK_THREADS, 1>(grid_size, fraction_valid, is_warp_multiple);
TestStrategy<T, BLOCK_THREADS, 3>(grid_size, fraction_valid, is_warp_multiple);
TestStrategy<T, BLOCK_THREADS, 4>(grid_size, fraction_valid, is_warp_multiple);
TestStrategy<T, BLOCK_THREADS, 11>(grid_size, fraction_valid, is_warp_multiple);
}
/**
* Evaluate different thread block sizes
*/
template <typename T>
void TestThreads(
int grid_size,
float fraction_valid)
{
TestItemsPerThread<T, 15>(grid_size, fraction_valid);
TestItemsPerThread<T, 32>(grid_size, fraction_valid);
TestItemsPerThread<T, 72>(grid_size, fraction_valid);
TestItemsPerThread<T, 96>(grid_size, fraction_valid);
TestItemsPerThread<T, 128>(grid_size, fraction_valid);
}
/**
* Main
*/
int main(int argc, char** argv)
{
// Initialize command line
CommandLineArgs args(argc, argv);
g_verbose = args.CheckCmdLineFlag("v");
// Print usage
if (args.CheckCmdLineFlag("help"))
{
printf("%s "
"[--device=<device-id>] "
"[--v] "
"\n", argv[0]);
exit(0);
}
// Initialize device
CubDebugExit(args.DeviceInit());
// Get ptx version
int ptx_version = 0;
CubDebugExit(PtxVersion(ptx_version));
#ifdef CUB_TEST_BENCHMARK
// Compile/run quick tests
TestNative< int, 64, 2, BLOCK_LOAD_WARP_TRANSPOSE, BLOCK_STORE_WARP_TRANSPOSE>(1, 0.8f, Int2Type<true>());
TestIterator< int, 64, 2, BLOCK_LOAD_WARP_TRANSPOSE, BLOCK_STORE_WARP_TRANSPOSE, LOAD_DEFAULT, STORE_DEFAULT>(1, 0.8f, Int2Type<true>());
#else
// Compile/run thorough tests
TestThreads<char>(2, 0.8f);
TestThreads<int>(2, 0.8f);
TestThreads<long>(2, 0.8f);
TestThreads<long2>(2, 0.8f);
if (ptx_version > 120) // Don't check doubles on PTX120 or below because they're down-converted
TestThreads<double2>(2, 0.8f);
TestThreads<TestFoo>(2, 0.8f);
TestThreads<TestBar>(2, 0.8f);
#endif
return 0;
}
| ef1e5dbe6935493e6a40ffe05d1b8807004392e2.cu | /******************************************************************************
* Copyright (c) 2011, Duane Merrill. All rights reserved.
* Copyright (c) 2011-2018, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of the NVIDIA CORPORATION nor the
* names of its contributors may be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
******************************************************************************/
/******************************************************************************
* Test of BlockLoad and BlockStore utilities
******************************************************************************/
// Ensure printing of CUDA runtime errors to console
#define CUB_STDERR
#include <iterator>
#include <stdio.h>
#include <cub/block/block_load.cuh>
#include <cub/block/block_store.cuh>
#include <cub/iterator/cache_modified_input_iterator.cuh>
#include <cub/iterator/cache_modified_output_iterator.cuh>
#include <cub/iterator/discard_output_iterator.cuh>
#include <cub/util_allocator.cuh>
#include "test_util.h"
using namespace cub;
//---------------------------------------------------------------------
// Globals, constants and typedefs
//---------------------------------------------------------------------
bool g_verbose = false;
CachingDeviceAllocator g_allocator(true);
//---------------------------------------------------------------------
// Test kernels
//---------------------------------------------------------------------
/**
* Test load/store kernel.
*/
template <
int BLOCK_THREADS,
int ITEMS_PER_THREAD,
BlockLoadAlgorithm LOAD_ALGORITHM,
BlockStoreAlgorithm STORE_ALGORITHM,
typename InputIteratorT,
typename OutputIteratorT>
__launch_bounds__ (BLOCK_THREADS, 1)
__global__ void Kernel(
InputIteratorT d_in,
OutputIteratorT d_out_unguarded,
OutputIteratorT d_out_guarded,
int num_items)
{
enum
{
TILE_SIZE = BLOCK_THREADS * ITEMS_PER_THREAD
};
// The input value type
typedef typename std::iterator_traits<InputIteratorT>::value_type InputT;
// The output value type
typedef typename If<(Equals<typename std::iterator_traits<OutputIteratorT>::value_type, void>::VALUE), // OutputT = (if output iterator's value type is void) ?
typename std::iterator_traits<InputIteratorT>::value_type, // ... then the input iterator's value type,
typename std::iterator_traits<OutputIteratorT>::value_type>::Type OutputT; // ... else the output iterator's value type
// Threadblock load/store abstraction types
typedef BlockLoad<InputT, BLOCK_THREADS, ITEMS_PER_THREAD, LOAD_ALGORITHM> BlockLoad;
typedef BlockStore<OutputT, BLOCK_THREADS, ITEMS_PER_THREAD, STORE_ALGORITHM> BlockStore;
// Shared memory type for this thread block
union TempStorage
{
typename BlockLoad::TempStorage load;
typename BlockStore::TempStorage store;
};
// Allocate temp storage in shared memory
__shared__ TempStorage temp_storage;
// Threadblock work bounds
int block_offset = blockIdx.x * TILE_SIZE;
int guarded_elements = num_items - block_offset;
// Tile of items
OutputT data[ITEMS_PER_THREAD];
// Load data
BlockLoad(temp_storage.load).Load(d_in + block_offset, data);
__syncthreads();
// Store data
BlockStore(temp_storage.store).Store(d_out_unguarded + block_offset, data);
__syncthreads();
// reset data
#pragma unroll
for (int ITEM = 0; ITEM < ITEMS_PER_THREAD; ++ITEM)
data[ITEM] = OutputT();
__syncthreads();
// Load data
BlockLoad(temp_storage.load).Load(d_in + block_offset, data, guarded_elements);
__syncthreads();
// Store data
BlockStore(temp_storage.store).Store(d_out_guarded + block_offset, data, guarded_elements);
}
//---------------------------------------------------------------------
// Host testing subroutines
//---------------------------------------------------------------------
/**
* Test load/store variants
*/
template <
typename T,
int BLOCK_THREADS,
int ITEMS_PER_THREAD,
BlockLoadAlgorithm LOAD_ALGORITHM,
BlockStoreAlgorithm STORE_ALGORITHM,
typename InputIteratorT,
typename OutputIteratorT>
void TestKernel(
T *h_in,
InputIteratorT d_in,
OutputIteratorT d_out_unguarded_itr,
OutputIteratorT d_out_guarded_itr,
T *d_out_unguarded_ptr,
T *d_out_guarded_ptr,
int grid_size,
int guarded_elements)
{
int compare;
int unguarded_elements = grid_size * BLOCK_THREADS * ITEMS_PER_THREAD;
// Test with discard output iterator
typedef typename std::iterator_traits<InputIteratorT>::difference_type OffsetT;
DiscardOutputIterator<OffsetT> discard_itr;
Kernel<BLOCK_THREADS, ITEMS_PER_THREAD, LOAD_ALGORITHM, STORE_ALGORITHM>
<<<grid_size, BLOCK_THREADS>>>(
d_in,
discard_itr,
discard_itr,
guarded_elements);
// Test with regular output iterator
Kernel<BLOCK_THREADS, ITEMS_PER_THREAD, LOAD_ALGORITHM, STORE_ALGORITHM>
<<<grid_size, BLOCK_THREADS>>>(
d_in,
d_out_unguarded_itr,
d_out_guarded_itr,
guarded_elements);
CubDebugExit(cudaPeekAtLastError());
CubDebugExit(cudaDeviceSynchronize());
// Check results
compare = CompareDeviceResults(h_in, d_out_guarded_ptr, guarded_elements, g_verbose, g_verbose);
printf("\tGuarded: %s\n", (compare) ? "FAIL" : "PASS");
AssertEquals(0, compare);
// Check results
compare = CompareDeviceResults(h_in, d_out_unguarded_ptr, unguarded_elements, g_verbose, g_verbose);
printf("\tUnguarded: %s\n", (compare) ? "FAIL" : "PASS");
AssertEquals(0, compare);
}
/**
* Test native pointer. Specialized for sufficient resources
*/
template <
typename T,
int BLOCK_THREADS,
int ITEMS_PER_THREAD,
BlockLoadAlgorithm LOAD_ALGORITHM,
BlockStoreAlgorithm STORE_ALGORITHM>
void TestNative(
int grid_size,
float fraction_valid,
Int2Type<true> /*sufficient_resources*/)
{
int unguarded_elements = grid_size * BLOCK_THREADS * ITEMS_PER_THREAD;
int guarded_elements = int(fraction_valid * float(unguarded_elements));
// Allocate host arrays
T *h_in = (T*) malloc(unguarded_elements * sizeof(T));
// Allocate device arrays
T *d_in = NULL;
T *d_out_unguarded = NULL;
T *d_out_guarded = NULL;
CubDebugExit(g_allocator.DeviceAllocate((void**)&d_in, sizeof(T) * unguarded_elements));
CubDebugExit(g_allocator.DeviceAllocate((void**)&d_out_unguarded, sizeof(T) * unguarded_elements));
CubDebugExit(g_allocator.DeviceAllocate((void**)&d_out_guarded, sizeof(T) * guarded_elements));
CubDebugExit(cudaMemset(d_out_unguarded, 0, sizeof(T) * unguarded_elements));
CubDebugExit(cudaMemset(d_out_guarded, 0, sizeof(T) * guarded_elements));
// Initialize problem on host and device
for (int i = 0; i < unguarded_elements; ++i)
{
InitValue(INTEGER_SEED, h_in[i], i);
}
CubDebugExit(cudaMemcpy(d_in, h_in, sizeof(T) * unguarded_elements, cudaMemcpyHostToDevice));
printf("TestNative "
"grid_size(%d) "
"guarded_elements(%d) "
"unguarded_elements(%d) "
"BLOCK_THREADS(%d) "
"ITEMS_PER_THREAD(%d) "
"LOAD_ALGORITHM(%d) "
"STORE_ALGORITHM(%d) "
"sizeof(T)(%d)\n",
grid_size, guarded_elements, unguarded_elements, BLOCK_THREADS, ITEMS_PER_THREAD, LOAD_ALGORITHM, STORE_ALGORITHM, (int) sizeof(T));
TestKernel<T, BLOCK_THREADS, ITEMS_PER_THREAD, LOAD_ALGORITHM, STORE_ALGORITHM>(
h_in,
(T const *) d_in, // Test const
d_out_unguarded,
d_out_guarded,
d_out_unguarded,
d_out_guarded,
grid_size,
guarded_elements);
// Cleanup
if (h_in) free(h_in);
if (d_in) CubDebugExit(g_allocator.DeviceFree(d_in));
if (d_out_unguarded) CubDebugExit(g_allocator.DeviceFree(d_out_unguarded));
if (d_out_guarded) CubDebugExit(g_allocator.DeviceFree(d_out_guarded));
}
/**
* Test native pointer. Specialized for insufficient resources
*/
template <
typename T,
int BLOCK_THREADS,
int ITEMS_PER_THREAD,
BlockLoadAlgorithm LOAD_ALGORITHM,
BlockStoreAlgorithm STORE_ALGORITHM>
void TestNative(
int /*grid_size*/,
float /*fraction_valid*/,
Int2Type<false> /*sufficient_resources*/)
{}
/**
* Test iterator. Specialized for sufficient resources.
*/
template <
typename T,
int BLOCK_THREADS,
int ITEMS_PER_THREAD,
BlockLoadAlgorithm LOAD_ALGORITHM,
BlockStoreAlgorithm STORE_ALGORITHM,
CacheLoadModifier LOAD_MODIFIER,
CacheStoreModifier STORE_MODIFIER>
void TestIterator(
int grid_size,
float fraction_valid,
Int2Type<true> /*sufficient_resources*/)
{
int unguarded_elements = grid_size * BLOCK_THREADS * ITEMS_PER_THREAD;
int guarded_elements = int(fraction_valid * float(unguarded_elements));
// Allocate host arrays
T *h_in = (T*) malloc(unguarded_elements * sizeof(T));
// Allocate device arrays
T *d_in = NULL;
T *d_out_unguarded = NULL;
T *d_out_guarded = NULL;
CubDebugExit(g_allocator.DeviceAllocate((void**)&d_in, sizeof(T) * unguarded_elements));
CubDebugExit(g_allocator.DeviceAllocate((void**)&d_out_unguarded, sizeof(T) * unguarded_elements));
CubDebugExit(g_allocator.DeviceAllocate((void**)&d_out_guarded, sizeof(T) * guarded_elements));
CubDebugExit(cudaMemset(d_out_unguarded, 0, sizeof(T) * unguarded_elements));
CubDebugExit(cudaMemset(d_out_guarded, 0, sizeof(T) * guarded_elements));
// Initialize problem on host and device
for (int i = 0; i < unguarded_elements; ++i)
{
InitValue(INTEGER_SEED, h_in[i], i);
}
CubDebugExit(cudaMemcpy(d_in, h_in, sizeof(T) * unguarded_elements, cudaMemcpyHostToDevice));
printf("TestIterator "
"grid_size(%d) "
"guarded_elements(%d) "
"unguarded_elements(%d) "
"BLOCK_THREADS(%d) "
"ITEMS_PER_THREAD(%d) "
"LOAD_ALGORITHM(%d) "
"STORE_ALGORITHM(%d) "
"LOAD_MODIFIER(%d) "
"STORE_MODIFIER(%d) "
"sizeof(T)(%d)\n",
grid_size, guarded_elements, unguarded_elements, BLOCK_THREADS, ITEMS_PER_THREAD, LOAD_ALGORITHM, STORE_ALGORITHM, LOAD_MODIFIER, STORE_MODIFIER, (int) sizeof(T));
TestKernel<T, BLOCK_THREADS, ITEMS_PER_THREAD, LOAD_ALGORITHM, STORE_ALGORITHM>(
h_in,
CacheModifiedInputIterator<LOAD_MODIFIER, T>(d_in),
CacheModifiedOutputIterator<STORE_MODIFIER, T>(d_out_unguarded),
CacheModifiedOutputIterator<STORE_MODIFIER, T>(d_out_guarded),
d_out_unguarded,
d_out_guarded,
grid_size,
guarded_elements);
// Cleanup
if (h_in) free(h_in);
if (d_in) CubDebugExit(g_allocator.DeviceFree(d_in));
if (d_out_unguarded) CubDebugExit(g_allocator.DeviceFree(d_out_unguarded));
if (d_out_guarded) CubDebugExit(g_allocator.DeviceFree(d_out_guarded));
}
/**
* Test iterator. Specialized for insufficient resources.
*/
template <
typename T,
int BLOCK_THREADS,
int ITEMS_PER_THREAD,
BlockLoadAlgorithm LOAD_ALGORITHM,
BlockStoreAlgorithm STORE_ALGORITHM,
CacheLoadModifier LOAD_MODIFIER,
CacheStoreModifier STORE_MODIFIER>
void TestIterator(
int /*grid_size*/,
float /*fraction_valid*/,
Int2Type<false> /*sufficient_resources*/)
{}
/**
* Evaluate different pointer access types
*/
template <
typename T,
int BLOCK_THREADS,
int ITEMS_PER_THREAD,
BlockLoadAlgorithm LOAD_ALGORITHM,
BlockStoreAlgorithm STORE_ALGORITHM>
void TestPointerType(
int grid_size,
float fraction_valid)
{
// Threadblock load/store abstraction types
typedef BlockLoad<T, BLOCK_THREADS, ITEMS_PER_THREAD, LOAD_ALGORITHM> BlockLoad;
typedef BlockStore<T, BLOCK_THREADS, ITEMS_PER_THREAD, STORE_ALGORITHM> BlockStore;
#if defined(SM100) || defined(SM110) || defined(SM130)
static const bool sufficient_load_smem = sizeof(typename BlockLoad::TempStorage) <= 1024 * 16;
static const bool sufficient_store_smem = sizeof(typename BlockStore::TempStorage) <= 1024 * 16;
static const bool sufficient_threads = BLOCK_THREADS <= 512;
#else
static const bool sufficient_load_smem = sizeof(typename BlockLoad::TempStorage) <= 1024 * 48;
static const bool sufficient_store_smem = sizeof(typename BlockStore::TempStorage) <= 1024 * 48;
static const bool sufficient_threads = BLOCK_THREADS <= 1024;
#endif
static const bool sufficient_resources = sufficient_load_smem && sufficient_store_smem && sufficient_threads;
TestNative<T, BLOCK_THREADS, ITEMS_PER_THREAD, LOAD_ALGORITHM, STORE_ALGORITHM>(grid_size, fraction_valid, Int2Type<sufficient_resources>());
TestIterator<T, BLOCK_THREADS, ITEMS_PER_THREAD, LOAD_ALGORITHM, STORE_ALGORITHM, LOAD_DEFAULT, STORE_DEFAULT>(grid_size, fraction_valid, Int2Type<sufficient_resources>());
}
/**
* Evaluate different time-slicing strategies
*/
template <
typename T,
int BLOCK_THREADS,
int ITEMS_PER_THREAD,
BlockLoadAlgorithm LOAD_ALGORITHM,
BlockStoreAlgorithm STORE_ALGORITHM>
void TestSlicedStrategy(
int grid_size,
float fraction_valid)
{
TestPointerType<T, BLOCK_THREADS, ITEMS_PER_THREAD, LOAD_ALGORITHM, STORE_ALGORITHM, true>(grid_size, fraction_valid);
TestPointerType<T, BLOCK_THREADS, ITEMS_PER_THREAD, LOAD_ALGORITHM, STORE_ALGORITHM, false>(grid_size, fraction_valid);
}
/**
* Evaluate different load/store strategies (specialized for block sizes that are not a multiple of 32)
*/
template <
typename T,
int BLOCK_THREADS,
int ITEMS_PER_THREAD>
void TestStrategy(
int grid_size,
float fraction_valid,
Int2Type<false> /*is_warp_multiple*/)
{
TestPointerType<T, BLOCK_THREADS, ITEMS_PER_THREAD, BLOCK_LOAD_DIRECT, BLOCK_STORE_DIRECT>(grid_size, fraction_valid);
TestPointerType<T, BLOCK_THREADS, ITEMS_PER_THREAD, BLOCK_LOAD_TRANSPOSE, BLOCK_STORE_TRANSPOSE>(grid_size, fraction_valid);
TestPointerType<T, BLOCK_THREADS, ITEMS_PER_THREAD, BLOCK_LOAD_VECTORIZE, BLOCK_STORE_VECTORIZE>(grid_size, fraction_valid);
TestPointerType<T, BLOCK_THREADS, ITEMS_PER_THREAD, BLOCK_LOAD_STRIPED, BLOCK_STORE_STRIPED>(grid_size, fraction_valid);
}
/**
* Evaluate different load/store strategies (specialized for block sizes that are a multiple of 32)
*/
template <
typename T,
int BLOCK_THREADS,
int ITEMS_PER_THREAD>
void TestStrategy(
int grid_size,
float fraction_valid,
Int2Type<true> /*is_warp_multiple*/)
{
TestStrategy<T, BLOCK_THREADS, ITEMS_PER_THREAD>(grid_size, fraction_valid, Int2Type<false>());
TestPointerType<T, BLOCK_THREADS, ITEMS_PER_THREAD, BLOCK_LOAD_WARP_TRANSPOSE, BLOCK_STORE_WARP_TRANSPOSE>(grid_size, fraction_valid);
TestPointerType<T, BLOCK_THREADS, ITEMS_PER_THREAD, BLOCK_LOAD_WARP_TRANSPOSE_TIMESLICED, BLOCK_STORE_WARP_TRANSPOSE_TIMESLICED>(grid_size, fraction_valid);
}
/**
* Evaluate different register blocking
*/
template <
typename T,
int BLOCK_THREADS>
void TestItemsPerThread(
int grid_size,
float fraction_valid)
{
Int2Type<BLOCK_THREADS % 32 == 0> is_warp_multiple;
TestStrategy<T, BLOCK_THREADS, 1>(grid_size, fraction_valid, is_warp_multiple);
TestStrategy<T, BLOCK_THREADS, 3>(grid_size, fraction_valid, is_warp_multiple);
TestStrategy<T, BLOCK_THREADS, 4>(grid_size, fraction_valid, is_warp_multiple);
TestStrategy<T, BLOCK_THREADS, 11>(grid_size, fraction_valid, is_warp_multiple);
}
/**
* Evaluate different thread block sizes
*/
template <typename T>
void TestThreads(
int grid_size,
float fraction_valid)
{
TestItemsPerThread<T, 15>(grid_size, fraction_valid);
TestItemsPerThread<T, 32>(grid_size, fraction_valid);
TestItemsPerThread<T, 72>(grid_size, fraction_valid);
TestItemsPerThread<T, 96>(grid_size, fraction_valid);
TestItemsPerThread<T, 128>(grid_size, fraction_valid);
}
/**
* Main
*/
int main(int argc, char** argv)
{
// Initialize command line
CommandLineArgs args(argc, argv);
g_verbose = args.CheckCmdLineFlag("v");
// Print usage
if (args.CheckCmdLineFlag("help"))
{
printf("%s "
"[--device=<device-id>] "
"[--v] "
"\n", argv[0]);
exit(0);
}
// Initialize device
CubDebugExit(args.DeviceInit());
// Get ptx version
int ptx_version = 0;
CubDebugExit(PtxVersion(ptx_version));
#ifdef CUB_TEST_BENCHMARK
// Compile/run quick tests
TestNative< int, 64, 2, BLOCK_LOAD_WARP_TRANSPOSE, BLOCK_STORE_WARP_TRANSPOSE>(1, 0.8f, Int2Type<true>());
TestIterator< int, 64, 2, BLOCK_LOAD_WARP_TRANSPOSE, BLOCK_STORE_WARP_TRANSPOSE, LOAD_DEFAULT, STORE_DEFAULT>(1, 0.8f, Int2Type<true>());
#else
// Compile/run thorough tests
TestThreads<char>(2, 0.8f);
TestThreads<int>(2, 0.8f);
TestThreads<long>(2, 0.8f);
TestThreads<long2>(2, 0.8f);
if (ptx_version > 120) // Don't check doubles on PTX120 or below because they're down-converted
TestThreads<double2>(2, 0.8f);
TestThreads<TestFoo>(2, 0.8f);
TestThreads<TestBar>(2, 0.8f);
#endif
return 0;
}
|
eb112b82018fac42a4ab8d27f941e03c837bc874.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//
// auto-generated by ops.py
//
__constant__ int xdim0_reset_field_kernel1;
int xdim0_reset_field_kernel1_h = -1;
__constant__ int ydim0_reset_field_kernel1;
int ydim0_reset_field_kernel1_h = -1;
__constant__ int xdim1_reset_field_kernel1;
int xdim1_reset_field_kernel1_h = -1;
__constant__ int ydim1_reset_field_kernel1;
int ydim1_reset_field_kernel1_h = -1;
__constant__ int xdim2_reset_field_kernel1;
int xdim2_reset_field_kernel1_h = -1;
__constant__ int ydim2_reset_field_kernel1;
int ydim2_reset_field_kernel1_h = -1;
__constant__ int xdim3_reset_field_kernel1;
int xdim3_reset_field_kernel1_h = -1;
__constant__ int ydim3_reset_field_kernel1;
int ydim3_reset_field_kernel1_h = -1;
#undef OPS_ACC0
#undef OPS_ACC1
#undef OPS_ACC2
#undef OPS_ACC3
#define OPS_ACC0(x, y, z) \
(x + xdim0_reset_field_kernel1 * (y) + \
xdim0_reset_field_kernel1 * ydim0_reset_field_kernel1 * (z))
#define OPS_ACC1(x, y, z) \
(x + xdim1_reset_field_kernel1 * (y) + \
xdim1_reset_field_kernel1 * ydim1_reset_field_kernel1 * (z))
#define OPS_ACC2(x, y, z) \
(x + xdim2_reset_field_kernel1 * (y) + \
xdim2_reset_field_kernel1 * ydim2_reset_field_kernel1 * (z))
#define OPS_ACC3(x, y, z) \
(x + xdim3_reset_field_kernel1 * (y) + \
xdim3_reset_field_kernel1 * ydim3_reset_field_kernel1 * (z))
// user function
__device__
void
reset_field_kernel1_gpu(double *density0, const double *density1,
double *energy0, const double *energy1) {
density0[OPS_ACC0(0, 0, 0)] = density1[OPS_ACC1(0, 0, 0)];
energy0[OPS_ACC2(0, 0, 0)] = energy1[OPS_ACC3(0, 0, 0)];
}
#undef OPS_ACC0
#undef OPS_ACC1
#undef OPS_ACC2
#undef OPS_ACC3
__global__ void ops_reset_field_kernel1(double *__restrict arg0,
const double *__restrict arg1,
double *__restrict arg2,
const double *__restrict arg3,
int size0, int size1, int size2) {
int idx_z = blockDim.z * blockIdx.z + threadIdx.z;
int idx_y = blockDim.y * blockIdx.y + threadIdx.y;
int idx_x = blockDim.x * blockIdx.x + threadIdx.x;
arg0 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim0_reset_field_kernel1 +
idx_z * 1 * 1 * xdim0_reset_field_kernel1 * ydim0_reset_field_kernel1;
arg1 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim1_reset_field_kernel1 +
idx_z * 1 * 1 * xdim1_reset_field_kernel1 * ydim1_reset_field_kernel1;
arg2 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim2_reset_field_kernel1 +
idx_z * 1 * 1 * xdim2_reset_field_kernel1 * ydim2_reset_field_kernel1;
arg3 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim3_reset_field_kernel1 +
idx_z * 1 * 1 * xdim3_reset_field_kernel1 * ydim3_reset_field_kernel1;
if (idx_x < size0 && idx_y < size1 && idx_z < size2) {
reset_field_kernel1_gpu(arg0, arg1, arg2, arg3);
}
}
// host stub function
void ops_par_loop_reset_field_kernel1(char const *name, ops_block block,
int dim, int *range, ops_arg arg0,
ops_arg arg1, ops_arg arg2,
ops_arg arg3) {
// Timing
double t1, t2, c1, c2;
ops_arg args[4] = {arg0, arg1, arg2, arg3};
#ifdef CHECKPOINTING
if (!ops_checkpointing_before(args, 4, range, 1))
return;
#endif
if (OPS_diags > 1) {
ops_timing_realloc(1, "reset_field_kernel1");
OPS_kernels[1].count++;
ops_timers_core(&c1, &t1);
}
// compute locally allocated range for the sub-block
int start[3];
int end[3];
#ifdef OPS_MPI
sub_block_list sb = OPS_sub_block_list[block->index];
if (!sb->owned)
return;
for (int n = 0; n < 3; n++) {
start[n] = sb->decomp_disp[n];
end[n] = sb->decomp_disp[n] + sb->decomp_size[n];
if (start[n] >= range[2 * n]) {
start[n] = 0;
} else {
start[n] = range[2 * n] - start[n];
}
if (sb->id_m[n] == MPI_PROC_NULL && range[2 * n] < 0)
start[n] = range[2 * n];
if (end[n] >= range[2 * n + 1]) {
end[n] = range[2 * n + 1] - sb->decomp_disp[n];
} else {
end[n] = sb->decomp_size[n];
}
if (sb->id_p[n] == MPI_PROC_NULL &&
(range[2 * n + 1] > sb->decomp_disp[n] + sb->decomp_size[n]))
end[n] += (range[2 * n + 1] - sb->decomp_disp[n] - sb->decomp_size[n]);
}
#else
for (int n = 0; n < 3; n++) {
start[n] = range[2 * n];
end[n] = range[2 * n + 1];
}
#endif
int x_size = MAX(0, end[0] - start[0]);
int y_size = MAX(0, end[1] - start[1]);
int z_size = MAX(0, end[2] - start[2]);
int xdim0 = args[0].dat->size[0];
int ydim0 = args[0].dat->size[1];
int xdim1 = args[1].dat->size[0];
int ydim1 = args[1].dat->size[1];
int xdim2 = args[2].dat->size[0];
int ydim2 = args[2].dat->size[1];
int xdim3 = args[3].dat->size[0];
int ydim3 = args[3].dat->size[1];
if (xdim0 != xdim0_reset_field_kernel1_h ||
ydim0 != ydim0_reset_field_kernel1_h ||
xdim1 != xdim1_reset_field_kernel1_h ||
ydim1 != ydim1_reset_field_kernel1_h ||
xdim2 != xdim2_reset_field_kernel1_h ||
ydim2 != ydim2_reset_field_kernel1_h ||
xdim3 != xdim3_reset_field_kernel1_h ||
ydim3 != ydim3_reset_field_kernel1_h) {
hipMemcpyToSymbol(xdim0_reset_field_kernel1, &xdim0, sizeof(int));
xdim0_reset_field_kernel1_h = xdim0;
hipMemcpyToSymbol(ydim0_reset_field_kernel1, &ydim0, sizeof(int));
ydim0_reset_field_kernel1_h = ydim0;
hipMemcpyToSymbol(xdim1_reset_field_kernel1, &xdim1, sizeof(int));
xdim1_reset_field_kernel1_h = xdim1;
hipMemcpyToSymbol(ydim1_reset_field_kernel1, &ydim1, sizeof(int));
ydim1_reset_field_kernel1_h = ydim1;
hipMemcpyToSymbol(xdim2_reset_field_kernel1, &xdim2, sizeof(int));
xdim2_reset_field_kernel1_h = xdim2;
hipMemcpyToSymbol(ydim2_reset_field_kernel1, &ydim2, sizeof(int));
ydim2_reset_field_kernel1_h = ydim2;
hipMemcpyToSymbol(xdim3_reset_field_kernel1, &xdim3, sizeof(int));
xdim3_reset_field_kernel1_h = xdim3;
hipMemcpyToSymbol(ydim3_reset_field_kernel1, &ydim3, sizeof(int));
ydim3_reset_field_kernel1_h = ydim3;
}
dim3 grid((x_size - 1) / OPS_block_size_x + 1,
(y_size - 1) / OPS_block_size_y + 1, z_size);
dim3 tblock(OPS_block_size_x, OPS_block_size_y, 1);
int dat0 = args[0].dat->elem_size;
int dat1 = args[1].dat->elem_size;
int dat2 = args[2].dat->elem_size;
int dat3 = args[3].dat->elem_size;
char *p_a[4];
// set up initial pointers
int d_m[OPS_MAX_DIM];
#ifdef OPS_MPI
for (int d = 0; d < dim; d++)
d_m[d] =
args[0].dat->d_m[d] + OPS_sub_dat_list[args[0].dat->index]->d_im[d];
#else
for (int d = 0; d < dim; d++)
d_m[d] = args[0].dat->d_m[d];
#endif
int base0 = dat0 * 1 * (start[0] * args[0].stencil->stride[0] -
args[0].dat->base[0] - d_m[0]);
base0 = base0 +
dat0 * args[0].dat->size[0] * (start[1] * args[0].stencil->stride[1] -
args[0].dat->base[1] - d_m[1]);
base0 = base0 +
dat0 * args[0].dat->size[0] * args[0].dat->size[1] *
(start[2] * args[0].stencil->stride[2] - args[0].dat->base[2] -
d_m[2]);
p_a[0] = (char *)args[0].data_d + base0;
#ifdef OPS_MPI
for (int d = 0; d < dim; d++)
d_m[d] =
args[1].dat->d_m[d] + OPS_sub_dat_list[args[1].dat->index]->d_im[d];
#else
for (int d = 0; d < dim; d++)
d_m[d] = args[1].dat->d_m[d];
#endif
int base1 = dat1 * 1 * (start[0] * args[1].stencil->stride[0] -
args[1].dat->base[0] - d_m[0]);
base1 = base1 +
dat1 * args[1].dat->size[0] * (start[1] * args[1].stencil->stride[1] -
args[1].dat->base[1] - d_m[1]);
base1 = base1 +
dat1 * args[1].dat->size[0] * args[1].dat->size[1] *
(start[2] * args[1].stencil->stride[2] - args[1].dat->base[2] -
d_m[2]);
p_a[1] = (char *)args[1].data_d + base1;
#ifdef OPS_MPI
for (int d = 0; d < dim; d++)
d_m[d] =
args[2].dat->d_m[d] + OPS_sub_dat_list[args[2].dat->index]->d_im[d];
#else
for (int d = 0; d < dim; d++)
d_m[d] = args[2].dat->d_m[d];
#endif
int base2 = dat2 * 1 * (start[0] * args[2].stencil->stride[0] -
args[2].dat->base[0] - d_m[0]);
base2 = base2 +
dat2 * args[2].dat->size[0] * (start[1] * args[2].stencil->stride[1] -
args[2].dat->base[1] - d_m[1]);
base2 = base2 +
dat2 * args[2].dat->size[0] * args[2].dat->size[1] *
(start[2] * args[2].stencil->stride[2] - args[2].dat->base[2] -
d_m[2]);
p_a[2] = (char *)args[2].data_d + base2;
#ifdef OPS_MPI
for (int d = 0; d < dim; d++)
d_m[d] =
args[3].dat->d_m[d] + OPS_sub_dat_list[args[3].dat->index]->d_im[d];
#else
for (int d = 0; d < dim; d++)
d_m[d] = args[3].dat->d_m[d];
#endif
int base3 = dat3 * 1 * (start[0] * args[3].stencil->stride[0] -
args[3].dat->base[0] - d_m[0]);
base3 = base3 +
dat3 * args[3].dat->size[0] * (start[1] * args[3].stencil->stride[1] -
args[3].dat->base[1] - d_m[1]);
base3 = base3 +
dat3 * args[3].dat->size[0] * args[3].dat->size[1] *
(start[2] * args[3].stencil->stride[2] - args[3].dat->base[2] -
d_m[2]);
p_a[3] = (char *)args[3].data_d + base3;
ops_H_D_exchanges_device(args, 4);
ops_halo_exchanges(args, 4, range);
if (OPS_diags > 1) {
ops_timers_core(&c2, &t2);
OPS_kernels[1].mpi_time += t2 - t1;
}
// call kernel wrapper function, passing in pointers to data
hipLaunchKernelGGL(( ops_reset_field_kernel1), dim3(grid), dim3(tblock), 0, 0, (double *)p_a[0], (double *)p_a[1],
(double *)p_a[2], (double *)p_a[3],
x_size, y_size, z_size);
if (OPS_diags > 1) {
cutilSafeCall(hipDeviceSynchronize());
ops_timers_core(&c1, &t1);
OPS_kernels[1].time += t1 - t2;
}
ops_set_dirtybit_device(args, 4);
ops_set_halo_dirtybit3(&args[0], range);
ops_set_halo_dirtybit3(&args[2], range);
if (OPS_diags > 1) {
// Update kernel record
ops_timers_core(&c2, &t2);
OPS_kernels[1].mpi_time += t2 - t1;
OPS_kernels[1].transfer += ops_compute_transfer(dim, start, end, &arg0);
OPS_kernels[1].transfer += ops_compute_transfer(dim, start, end, &arg1);
OPS_kernels[1].transfer += ops_compute_transfer(dim, start, end, &arg2);
OPS_kernels[1].transfer += ops_compute_transfer(dim, start, end, &arg3);
}
}
| eb112b82018fac42a4ab8d27f941e03c837bc874.cu | //
// auto-generated by ops.py
//
__constant__ int xdim0_reset_field_kernel1;
int xdim0_reset_field_kernel1_h = -1;
__constant__ int ydim0_reset_field_kernel1;
int ydim0_reset_field_kernel1_h = -1;
__constant__ int xdim1_reset_field_kernel1;
int xdim1_reset_field_kernel1_h = -1;
__constant__ int ydim1_reset_field_kernel1;
int ydim1_reset_field_kernel1_h = -1;
__constant__ int xdim2_reset_field_kernel1;
int xdim2_reset_field_kernel1_h = -1;
__constant__ int ydim2_reset_field_kernel1;
int ydim2_reset_field_kernel1_h = -1;
__constant__ int xdim3_reset_field_kernel1;
int xdim3_reset_field_kernel1_h = -1;
__constant__ int ydim3_reset_field_kernel1;
int ydim3_reset_field_kernel1_h = -1;
#undef OPS_ACC0
#undef OPS_ACC1
#undef OPS_ACC2
#undef OPS_ACC3
#define OPS_ACC0(x, y, z) \
(x + xdim0_reset_field_kernel1 * (y) + \
xdim0_reset_field_kernel1 * ydim0_reset_field_kernel1 * (z))
#define OPS_ACC1(x, y, z) \
(x + xdim1_reset_field_kernel1 * (y) + \
xdim1_reset_field_kernel1 * ydim1_reset_field_kernel1 * (z))
#define OPS_ACC2(x, y, z) \
(x + xdim2_reset_field_kernel1 * (y) + \
xdim2_reset_field_kernel1 * ydim2_reset_field_kernel1 * (z))
#define OPS_ACC3(x, y, z) \
(x + xdim3_reset_field_kernel1 * (y) + \
xdim3_reset_field_kernel1 * ydim3_reset_field_kernel1 * (z))
// user function
__device__
void
reset_field_kernel1_gpu(double *density0, const double *density1,
double *energy0, const double *energy1) {
density0[OPS_ACC0(0, 0, 0)] = density1[OPS_ACC1(0, 0, 0)];
energy0[OPS_ACC2(0, 0, 0)] = energy1[OPS_ACC3(0, 0, 0)];
}
#undef OPS_ACC0
#undef OPS_ACC1
#undef OPS_ACC2
#undef OPS_ACC3
__global__ void ops_reset_field_kernel1(double *__restrict arg0,
const double *__restrict arg1,
double *__restrict arg2,
const double *__restrict arg3,
int size0, int size1, int size2) {
int idx_z = blockDim.z * blockIdx.z + threadIdx.z;
int idx_y = blockDim.y * blockIdx.y + threadIdx.y;
int idx_x = blockDim.x * blockIdx.x + threadIdx.x;
arg0 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim0_reset_field_kernel1 +
idx_z * 1 * 1 * xdim0_reset_field_kernel1 * ydim0_reset_field_kernel1;
arg1 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim1_reset_field_kernel1 +
idx_z * 1 * 1 * xdim1_reset_field_kernel1 * ydim1_reset_field_kernel1;
arg2 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim2_reset_field_kernel1 +
idx_z * 1 * 1 * xdim2_reset_field_kernel1 * ydim2_reset_field_kernel1;
arg3 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim3_reset_field_kernel1 +
idx_z * 1 * 1 * xdim3_reset_field_kernel1 * ydim3_reset_field_kernel1;
if (idx_x < size0 && idx_y < size1 && idx_z < size2) {
reset_field_kernel1_gpu(arg0, arg1, arg2, arg3);
}
}
// host stub function
void ops_par_loop_reset_field_kernel1(char const *name, ops_block block,
int dim, int *range, ops_arg arg0,
ops_arg arg1, ops_arg arg2,
ops_arg arg3) {
// Timing
double t1, t2, c1, c2;
ops_arg args[4] = {arg0, arg1, arg2, arg3};
#ifdef CHECKPOINTING
if (!ops_checkpointing_before(args, 4, range, 1))
return;
#endif
if (OPS_diags > 1) {
ops_timing_realloc(1, "reset_field_kernel1");
OPS_kernels[1].count++;
ops_timers_core(&c1, &t1);
}
// compute locally allocated range for the sub-block
int start[3];
int end[3];
#ifdef OPS_MPI
sub_block_list sb = OPS_sub_block_list[block->index];
if (!sb->owned)
return;
for (int n = 0; n < 3; n++) {
start[n] = sb->decomp_disp[n];
end[n] = sb->decomp_disp[n] + sb->decomp_size[n];
if (start[n] >= range[2 * n]) {
start[n] = 0;
} else {
start[n] = range[2 * n] - start[n];
}
if (sb->id_m[n] == MPI_PROC_NULL && range[2 * n] < 0)
start[n] = range[2 * n];
if (end[n] >= range[2 * n + 1]) {
end[n] = range[2 * n + 1] - sb->decomp_disp[n];
} else {
end[n] = sb->decomp_size[n];
}
if (sb->id_p[n] == MPI_PROC_NULL &&
(range[2 * n + 1] > sb->decomp_disp[n] + sb->decomp_size[n]))
end[n] += (range[2 * n + 1] - sb->decomp_disp[n] - sb->decomp_size[n]);
}
#else
for (int n = 0; n < 3; n++) {
start[n] = range[2 * n];
end[n] = range[2 * n + 1];
}
#endif
int x_size = MAX(0, end[0] - start[0]);
int y_size = MAX(0, end[1] - start[1]);
int z_size = MAX(0, end[2] - start[2]);
int xdim0 = args[0].dat->size[0];
int ydim0 = args[0].dat->size[1];
int xdim1 = args[1].dat->size[0];
int ydim1 = args[1].dat->size[1];
int xdim2 = args[2].dat->size[0];
int ydim2 = args[2].dat->size[1];
int xdim3 = args[3].dat->size[0];
int ydim3 = args[3].dat->size[1];
if (xdim0 != xdim0_reset_field_kernel1_h ||
ydim0 != ydim0_reset_field_kernel1_h ||
xdim1 != xdim1_reset_field_kernel1_h ||
ydim1 != ydim1_reset_field_kernel1_h ||
xdim2 != xdim2_reset_field_kernel1_h ||
ydim2 != ydim2_reset_field_kernel1_h ||
xdim3 != xdim3_reset_field_kernel1_h ||
ydim3 != ydim3_reset_field_kernel1_h) {
cudaMemcpyToSymbol(xdim0_reset_field_kernel1, &xdim0, sizeof(int));
xdim0_reset_field_kernel1_h = xdim0;
cudaMemcpyToSymbol(ydim0_reset_field_kernel1, &ydim0, sizeof(int));
ydim0_reset_field_kernel1_h = ydim0;
cudaMemcpyToSymbol(xdim1_reset_field_kernel1, &xdim1, sizeof(int));
xdim1_reset_field_kernel1_h = xdim1;
cudaMemcpyToSymbol(ydim1_reset_field_kernel1, &ydim1, sizeof(int));
ydim1_reset_field_kernel1_h = ydim1;
cudaMemcpyToSymbol(xdim2_reset_field_kernel1, &xdim2, sizeof(int));
xdim2_reset_field_kernel1_h = xdim2;
cudaMemcpyToSymbol(ydim2_reset_field_kernel1, &ydim2, sizeof(int));
ydim2_reset_field_kernel1_h = ydim2;
cudaMemcpyToSymbol(xdim3_reset_field_kernel1, &xdim3, sizeof(int));
xdim3_reset_field_kernel1_h = xdim3;
cudaMemcpyToSymbol(ydim3_reset_field_kernel1, &ydim3, sizeof(int));
ydim3_reset_field_kernel1_h = ydim3;
}
dim3 grid((x_size - 1) / OPS_block_size_x + 1,
(y_size - 1) / OPS_block_size_y + 1, z_size);
dim3 tblock(OPS_block_size_x, OPS_block_size_y, 1);
int dat0 = args[0].dat->elem_size;
int dat1 = args[1].dat->elem_size;
int dat2 = args[2].dat->elem_size;
int dat3 = args[3].dat->elem_size;
char *p_a[4];
// set up initial pointers
int d_m[OPS_MAX_DIM];
#ifdef OPS_MPI
for (int d = 0; d < dim; d++)
d_m[d] =
args[0].dat->d_m[d] + OPS_sub_dat_list[args[0].dat->index]->d_im[d];
#else
for (int d = 0; d < dim; d++)
d_m[d] = args[0].dat->d_m[d];
#endif
int base0 = dat0 * 1 * (start[0] * args[0].stencil->stride[0] -
args[0].dat->base[0] - d_m[0]);
base0 = base0 +
dat0 * args[0].dat->size[0] * (start[1] * args[0].stencil->stride[1] -
args[0].dat->base[1] - d_m[1]);
base0 = base0 +
dat0 * args[0].dat->size[0] * args[0].dat->size[1] *
(start[2] * args[0].stencil->stride[2] - args[0].dat->base[2] -
d_m[2]);
p_a[0] = (char *)args[0].data_d + base0;
#ifdef OPS_MPI
for (int d = 0; d < dim; d++)
d_m[d] =
args[1].dat->d_m[d] + OPS_sub_dat_list[args[1].dat->index]->d_im[d];
#else
for (int d = 0; d < dim; d++)
d_m[d] = args[1].dat->d_m[d];
#endif
int base1 = dat1 * 1 * (start[0] * args[1].stencil->stride[0] -
args[1].dat->base[0] - d_m[0]);
base1 = base1 +
dat1 * args[1].dat->size[0] * (start[1] * args[1].stencil->stride[1] -
args[1].dat->base[1] - d_m[1]);
base1 = base1 +
dat1 * args[1].dat->size[0] * args[1].dat->size[1] *
(start[2] * args[1].stencil->stride[2] - args[1].dat->base[2] -
d_m[2]);
p_a[1] = (char *)args[1].data_d + base1;
#ifdef OPS_MPI
for (int d = 0; d < dim; d++)
d_m[d] =
args[2].dat->d_m[d] + OPS_sub_dat_list[args[2].dat->index]->d_im[d];
#else
for (int d = 0; d < dim; d++)
d_m[d] = args[2].dat->d_m[d];
#endif
int base2 = dat2 * 1 * (start[0] * args[2].stencil->stride[0] -
args[2].dat->base[0] - d_m[0]);
base2 = base2 +
dat2 * args[2].dat->size[0] * (start[1] * args[2].stencil->stride[1] -
args[2].dat->base[1] - d_m[1]);
base2 = base2 +
dat2 * args[2].dat->size[0] * args[2].dat->size[1] *
(start[2] * args[2].stencil->stride[2] - args[2].dat->base[2] -
d_m[2]);
p_a[2] = (char *)args[2].data_d + base2;
#ifdef OPS_MPI
for (int d = 0; d < dim; d++)
d_m[d] =
args[3].dat->d_m[d] + OPS_sub_dat_list[args[3].dat->index]->d_im[d];
#else
for (int d = 0; d < dim; d++)
d_m[d] = args[3].dat->d_m[d];
#endif
int base3 = dat3 * 1 * (start[0] * args[3].stencil->stride[0] -
args[3].dat->base[0] - d_m[0]);
base3 = base3 +
dat3 * args[3].dat->size[0] * (start[1] * args[3].stencil->stride[1] -
args[3].dat->base[1] - d_m[1]);
base3 = base3 +
dat3 * args[3].dat->size[0] * args[3].dat->size[1] *
(start[2] * args[3].stencil->stride[2] - args[3].dat->base[2] -
d_m[2]);
p_a[3] = (char *)args[3].data_d + base3;
ops_H_D_exchanges_device(args, 4);
ops_halo_exchanges(args, 4, range);
if (OPS_diags > 1) {
ops_timers_core(&c2, &t2);
OPS_kernels[1].mpi_time += t2 - t1;
}
// call kernel wrapper function, passing in pointers to data
ops_reset_field_kernel1<<<grid, tblock>>>((double *)p_a[0], (double *)p_a[1],
(double *)p_a[2], (double *)p_a[3],
x_size, y_size, z_size);
if (OPS_diags > 1) {
cutilSafeCall(cudaDeviceSynchronize());
ops_timers_core(&c1, &t1);
OPS_kernels[1].time += t1 - t2;
}
ops_set_dirtybit_device(args, 4);
ops_set_halo_dirtybit3(&args[0], range);
ops_set_halo_dirtybit3(&args[2], range);
if (OPS_diags > 1) {
// Update kernel record
ops_timers_core(&c2, &t2);
OPS_kernels[1].mpi_time += t2 - t1;
OPS_kernels[1].transfer += ops_compute_transfer(dim, start, end, &arg0);
OPS_kernels[1].transfer += ops_compute_transfer(dim, start, end, &arg1);
OPS_kernels[1].transfer += ops_compute_transfer(dim, start, end, &arg2);
OPS_kernels[1].transfer += ops_compute_transfer(dim, start, end, &arg3);
}
}
|
8833869de1e72df84949617287b06b719d7a7cee.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "update.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *original = NULL;
hipMalloc(&original, XSIZE*YSIZE);
float *newTE = NULL;
hipMalloc(&newTE, XSIZE*YSIZE);
float *current = NULL;
hipMalloc(¤t, XSIZE*YSIZE);
int nhalf = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
update), dim3(gridBlock),dim3(threadBlock), 0, 0, original,newTE,current,nhalf);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
update), dim3(gridBlock),dim3(threadBlock), 0, 0, original,newTE,current,nhalf);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
update), dim3(gridBlock),dim3(threadBlock), 0, 0, original,newTE,current,nhalf);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | 8833869de1e72df84949617287b06b719d7a7cee.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "update.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *original = NULL;
cudaMalloc(&original, XSIZE*YSIZE);
float *newTE = NULL;
cudaMalloc(&newTE, XSIZE*YSIZE);
float *current = NULL;
cudaMalloc(¤t, XSIZE*YSIZE);
int nhalf = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
update<<<gridBlock,threadBlock>>>(original,newTE,current,nhalf);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
update<<<gridBlock,threadBlock>>>(original,newTE,current,nhalf);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
update<<<gridBlock,threadBlock>>>(original,newTE,current,nhalf);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
c4936729ae1c4a605eb9e4f3524cbb61a865cd53.hip | // !!! This is a file automatically generated by hipify!!!
#include <string>
#include <iomanip>
#include <sstream>
#include <cutf/nvml.hpp>
#include "gpu_monitor.hpp"
#define NVML_ERROR_HANDLE(status) cutf::nvml::error::check(status, __FILE__, __LINE__, __func__)
gpu_monitor::monitor::monitor(unsigned int gpu_id) : max_power(0), max_temperature(0){
NVML_ERROR_HANDLE(nvmlInit());
NVML_ERROR_HANDLE(nvmlDeviceGetHandleByIndex(gpu_id, &device));
//
nvmlDeviceGetEnforcedPowerLimit(device, &power_max_limit);
}
gpu_monitor::monitor::~monitor(){
NVML_ERROR_HANDLE(nvmlShutdown());
}
// pre print string
// e.g. csv columun title
std::string gpu_monitor::monitor::get_gpu_status_pre_string(const gpu_monitor::string_mode_id string_mode){
std::string pre_status_string;
switch (string_mode) {
case gpu_monitor::human:
pre_status_string = "Power limit : " + std::to_string(power_max_limit/1000.0);
break;
case gpu_monitor::csv:
pre_status_string = "temperature,power,power_max_limit,performance,total_used_memory";
break;
default:
break;
}
return pre_status_string;
}
void gpu_monitor::monitor::get_gpu_status(){
// get gpu temperature/power {{{
// GTX650
NVML_ERROR_HANDLE(nvmlDeviceGetTemperature(device, NVML_TEMPERATURE_GPU, ¤t_temperature));
max_temperature = ::max(max_temperature, current_temperature);
NVML_ERROR_HANDLE(nvmlDeviceGetMemoryInfo(device, ¤t_memory));
NVML_ERROR_HANDLE(nvmlDeviceGetPerformanceState(device, ¤t_states));
NVML_ERROR_HANDLE(nvmlDeviceGetPowerUsage(device, ¤t_power));
max_power = ::max(max_power, current_power);
// }}}
}
std::string gpu_monitor::monitor::get_gpu_status_string(const gpu_monitor::string_mode_id string_mode){
std::string status_string = "";
if(string_mode == gpu_monitor::human){
// ss
std::stringstream ss;
ss<<"Temp: "<<std::setw(3)<<current_temperature<<"C, Pow: "<<std::setw(6)<<(current_power/1000.0)<<"W, Perf: P" + std::to_string((int) current_states)<<", TotalUsedMem: "<<current_memory.used/(1<<20)<<"MB";
status_string = ss.str();
}else if(string_mode == gpu_monitor::csv){
status_string = std::to_string(current_temperature) + "," + std::to_string(current_power/1000.0) + "," + std::to_string(power_max_limit/1000.0) + "," + std::to_string((int) current_states) + "," + std::to_string(current_memory.used/(1<<20));
}
return status_string;
}
unsigned int gpu_monitor::monitor::get_max_power() const{
return max_power;
}
unsigned int gpu_monitor::monitor::get_max_temperature() const{
return max_temperature;
}
| c4936729ae1c4a605eb9e4f3524cbb61a865cd53.cu | #include <string>
#include <iomanip>
#include <sstream>
#include <cutf/nvml.hpp>
#include "gpu_monitor.hpp"
#define NVML_ERROR_HANDLE(status) cutf::nvml::error::check(status, __FILE__, __LINE__, __func__)
gpu_monitor::monitor::monitor(unsigned int gpu_id) : max_power(0), max_temperature(0){
NVML_ERROR_HANDLE(nvmlInit());
NVML_ERROR_HANDLE(nvmlDeviceGetHandleByIndex(gpu_id, &device));
// コンストラクタで例外は見なかったことにしてほしい
nvmlDeviceGetEnforcedPowerLimit(device, &power_max_limit);
}
gpu_monitor::monitor::~monitor(){
NVML_ERROR_HANDLE(nvmlShutdown());
}
// pre print string
// e.g. csv columun title
std::string gpu_monitor::monitor::get_gpu_status_pre_string(const gpu_monitor::string_mode_id string_mode){
std::string pre_status_string;
switch (string_mode) {
case gpu_monitor::human:
pre_status_string = "Power limit : " + std::to_string(power_max_limit/1000.0);
break;
case gpu_monitor::csv:
pre_status_string = "temperature,power,power_max_limit,performance,total_used_memory";
break;
default:
break;
}
return pre_status_string;
}
void gpu_monitor::monitor::get_gpu_status(){
// get gpu temperature/power {{{
// GTX650で例外が起きない順に取得する
NVML_ERROR_HANDLE(nvmlDeviceGetTemperature(device, NVML_TEMPERATURE_GPU, ¤t_temperature));
max_temperature = std::max(max_temperature, current_temperature);
NVML_ERROR_HANDLE(nvmlDeviceGetMemoryInfo(device, ¤t_memory));
NVML_ERROR_HANDLE(nvmlDeviceGetPerformanceState(device, ¤t_states));
NVML_ERROR_HANDLE(nvmlDeviceGetPowerUsage(device, ¤t_power));
max_power = std::max(max_power, current_power);
// }}}
}
std::string gpu_monitor::monitor::get_gpu_status_string(const gpu_monitor::string_mode_id string_mode){
std::string status_string = "";
if(string_mode == gpu_monitor::human){
// 桁を揃えたりするためにssを用いる
std::stringstream ss;
ss<<"Temp: "<<std::setw(3)<<current_temperature<<"C, Pow: "<<std::setw(6)<<(current_power/1000.0)<<"W, Perf: P" + std::to_string((int) current_states)<<", TotalUsedMem: "<<current_memory.used/(1<<20)<<"MB";
status_string = ss.str();
}else if(string_mode == gpu_monitor::csv){
status_string = std::to_string(current_temperature) + "," + std::to_string(current_power/1000.0) + "," + std::to_string(power_max_limit/1000.0) + "," + std::to_string((int) current_states) + "," + std::to_string(current_memory.used/(1<<20));
}
return status_string;
}
unsigned int gpu_monitor::monitor::get_max_power() const{
return max_power;
}
unsigned int gpu_monitor::monitor::get_max_temperature() const{
return max_temperature;
}
|
0f393adba8174b66d0359e1f3978094178af9c60.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//// gol.cu
#include <stdlib.h>
#include <iostream>
#include <stdio.h>
#include <assert.h>
// Game of Life rules
// global memory only
typedef bool GolCell;
inline __device__ GolCell GetNeighbourCell (GolCell *input, int mapCellIdx, int mapWidth, int x_off, int y_off) {
return input[mapCellIdx + (mapWidth * y_off) + x_off];
}
inline __device__ void UpdateNeighbourhood(int &neighbourhood, GolCell &neighbourValue) {
neighbourhood += neighbourValue;
}
inline __device__ GolCell GetCell(GolCell *grid, int x, int y, int gridWidth) {
return grid[x + (y * gridWidth)];
}
inline __device__ bool IsAlive(GolCell &cell) {
return (1 == cell);
}
// A cell is alive the next generation if it is currently alive and has
// either 2 or 3 neighbours OR if it is dead and has 3 neighbours.
inline __device__ void UpdateState(GolCell &thisCell, int &neighbourhood) {
if(IsAlive(thisCell)) {
thisCell = (neighbourhood == 2 || neighbourhood == 3);
} else {
thisCell = (neighbourhood == 3);
}
}
__global__
void RunGoL(GolCell *input, GolCell *output, int gridWidth, int gridHeight, int iterations, bool wrapAround) {
int tid_x = threadIdx.x;
int tid_y = threadIdx.y;
int x = tid_x + blockIdx.x * blockDim.x;
int y = tid_y + blockIdx.y * blockDim.y;
int gridSizeX = blockDim.x * gridDim.x;
int gridSizeY = blockDim.y * gridDim.y;
for(int iter = 0; iter < iterations; iter = iter + 1) {
for(int glbl_x = x; glbl_x < gridWidth; glbl_x = glbl_x + gridSizeX) {
for(int glbl_y = y; glbl_y < gridHeight; glbl_y = glbl_y + gridSizeY) {
//Assume row-major here
int mapCell = (gridWidth * glbl_y) + glbl_x;
GolCell thisCell = input[mapCell];
// The variable we use to track the status of the cells surrounding this one
// A basic implementation will be one where for each neighbour that is alive
// the neighbourhood value increases by one
int neighbourhood = 0;
// As is right now, this is a lot of overhead, but I wrote it
// like this so we can easily add in optis later. At the end,
// if the CUDA compiler does not do inlining for us, we can manually
// do inlining of these functions.
// Here we assume that (0,0) is the top left of the grid (although there is)
// nothing stopping it from being the bottom left.
// **JUSTIN** - let me know if you have preference -> it doesn't bother me either way
int x_left = (glbl_x == 0) ? gridWidth - 1 : glbl_x - 1;
int x_right = (glbl_x == gridWidth - 1) ? 0 : glbl_x + 1;
int y_above = (glbl_y == 0) ? gridHeight - 1 : glbl_y - 1;
int y_below = (glbl_y == gridHeight - 1) ? 0 : glbl_y + 1;
GolCell neighbourValue;
// TOP LEFT
neighbourValue = GetCell(input, x_left, y_above, gridWidth);
UpdateNeighbourhood(neighbourhood, neighbourValue);
// TOP
neighbourValue = GetCell(input, glbl_x, y_above, gridWidth);
UpdateNeighbourhood(neighbourhood, neighbourValue);
// TOP RIGHT
neighbourValue = GetCell(input, x_right, y_above, gridWidth);
UpdateNeighbourhood(neighbourhood, neighbourValue);
// RIGHT
neighbourValue = GetCell(input, x_right, glbl_y, gridWidth);
UpdateNeighbourhood(neighbourhood, neighbourValue);
// BOTTOM RIGHT
neighbourValue = GetCell(input, x_right, y_below, gridWidth);
UpdateNeighbourhood(neighbourhood, neighbourValue);
// BOTTOM
neighbourValue = GetCell(input, glbl_x, y_below, gridWidth);
UpdateNeighbourhood(neighbourhood, neighbourValue);
// BOTTOM LEFT
neighbourValue = GetCell(input, x_left, y_below, gridWidth);
UpdateNeighbourhood(neighbourhood, neighbourValue);
// LEFT
neighbourValue = GetCell(input, x_left, glbl_y, gridWidth);
UpdateNeighbourhood(neighbourhood, neighbourValue);
UpdateState(thisCell, neighbourhood);
output[mapCell] = thisCell;
}
}
GolCell *bufferSwap = input;
input = output;
output = bufferSwap;
__syncthreads();
}
}
void InitializeBoard(GolCell *input, int gridWidth, int gridHeight, char *startingFile, bool bGenGridFromScratch) {
FILE *file = NULL;
if(!bGenGridFromScratch) {
fopen(startingFile, "r");
assert(file);
}
for(int i = 0; i < gridHeight; i = i + 1) {
for(int j = 0; j < gridWidth; j = j + 1) {
char cell = '\n';
if(!bGenGridFromScratch) {
fgetc(file);
} else {
cell = (rand() % 3 == 0) ? '1' : '0';
}
int index = j + gridWidth * i;
// Sorry about this - I would like a nicer way to deal with newline
// oddities across windows/Linux plats but we can hack it for now
while(cell != '1' && cell != 'X' && cell != '0'&& cell != ' ' && cell != '_' && !bGenGridFromScratch) {
cell = fgetc(file);
}
if((cell == '1' || cell == 'X')) {
input[index] = 1;
} else if((cell == '0' || cell == ' ' || cell == '_')) {
input[index] = 0;
}
}
}
if(!bGenGridFromScratch) {
fclose(file);
}
}
int main (int argc, char *argv[]) {
if(argc != 5 && argc != 4) {
return 0;
}
int gridWidth = atoi(argv[1]);
int gridHeight = atoi(argv[2]);
int iterations = atoi(argv[3]);
int gridSize = gridWidth * gridHeight;
char *startingFile = argv[4];
GolCell *input = (GolCell *)malloc(gridSize * sizeof(GolCell));
GolCell *output = (GolCell *)malloc(gridSize * sizeof(GolCell));
InitializeBoard(input, gridWidth, gridHeight, startingFile, argc == 4);
int THREADS_X = 32;
int THREADS_Y = 32;
int THREADS_Z = 1;
int BLOCKS_MAX = 256;
int BLOCKS_X = min(BLOCKS_MAX, gridWidth / THREADS_X) + 1;
int BLOCKS_Y = min(BLOCKS_MAX, gridHeight / THREADS_Y) + 1;
int BLOCKS_Z = 1;
dim3 threads(THREADS_X, THREADS_Y, THREADS_Z);
dim3 blocks(BLOCKS_X, BLOCKS_Y, BLOCKS_Z);
GolCell *d_input;
GolCell *d_output;
// Cuda Events
hipEvent_t start, end;
hipEventCreate(&start);
hipEventCreate(&end);
hipEventRecord(start, 0);
// std::cout << "threads: {"<<threads.x<<","<<threads.y<<","<<threads.z<<"} blocks:{"<<blocks.x<<","<<blocks.y<<","<<blocks.z<<"}"<<std::endl;
hipMalloc(&d_input, gridSize * sizeof(GolCell));
hipMalloc(&d_output, gridSize * sizeof(GolCell));
hipMemcpy(d_input, input, gridSize * sizeof(GolCell), hipMemcpyHostToDevice);
for(int i = 0; i < iterations; i = i + 1) {
// Make sure this is blocking for now
hipLaunchKernelGGL(( RunGoL), dim3(blocks), dim3(threads), 0, 0, d_input, d_output, gridWidth, gridHeight, 1, true);
GolCell *temp = d_input;
d_input = d_output;
d_output = temp;
}
// I think the number of iterations will determine whether we should copy from d_output or d_input
hipMemcpy(output, (iterations & 0x1) ? d_output : d_input, gridSize * sizeof(GolCell), hipMemcpyDeviceToHost);
hipEventRecord(end, 0);
hipEventSynchronize(end);
float time_ms;
hipEventElapsedTime(&time_ms, start, end);
std::cout <<"time: "<<time_ms<<std::endl;
// for(int j = 0; j < gridHeight; j = j + 1) {
// for(int i = 0; i < gridWidth; i = i + 1) {
// std::cout << (output[j * gridWidth + i] ? '#' : ' ');
// }
// std::cout << std::endl;
// }
hipFree(d_input);
hipFree(d_output);
free(input);
free(output);
} | 0f393adba8174b66d0359e1f3978094178af9c60.cu | //// gol.cu
#include <stdlib.h>
#include <iostream>
#include <stdio.h>
#include <assert.h>
// Game of Life rules
// global memory only
typedef bool GolCell;
inline __device__ GolCell GetNeighbourCell (GolCell *input, int mapCellIdx, int mapWidth, int x_off, int y_off) {
return input[mapCellIdx + (mapWidth * y_off) + x_off];
}
inline __device__ void UpdateNeighbourhood(int &neighbourhood, GolCell &neighbourValue) {
neighbourhood += neighbourValue;
}
inline __device__ GolCell GetCell(GolCell *grid, int x, int y, int gridWidth) {
return grid[x + (y * gridWidth)];
}
inline __device__ bool IsAlive(GolCell &cell) {
return (1 == cell);
}
// A cell is alive the next generation if it is currently alive and has
// either 2 or 3 neighbours OR if it is dead and has 3 neighbours.
inline __device__ void UpdateState(GolCell &thisCell, int &neighbourhood) {
if(IsAlive(thisCell)) {
thisCell = (neighbourhood == 2 || neighbourhood == 3);
} else {
thisCell = (neighbourhood == 3);
}
}
__global__
void RunGoL(GolCell *input, GolCell *output, int gridWidth, int gridHeight, int iterations, bool wrapAround) {
int tid_x = threadIdx.x;
int tid_y = threadIdx.y;
int x = tid_x + blockIdx.x * blockDim.x;
int y = tid_y + blockIdx.y * blockDim.y;
int gridSizeX = blockDim.x * gridDim.x;
int gridSizeY = blockDim.y * gridDim.y;
for(int iter = 0; iter < iterations; iter = iter + 1) {
for(int glbl_x = x; glbl_x < gridWidth; glbl_x = glbl_x + gridSizeX) {
for(int glbl_y = y; glbl_y < gridHeight; glbl_y = glbl_y + gridSizeY) {
//Assume row-major here
int mapCell = (gridWidth * glbl_y) + glbl_x;
GolCell thisCell = input[mapCell];
// The variable we use to track the status of the cells surrounding this one
// A basic implementation will be one where for each neighbour that is alive
// the neighbourhood value increases by one
int neighbourhood = 0;
// As is right now, this is a lot of overhead, but I wrote it
// like this so we can easily add in optis later. At the end,
// if the CUDA compiler does not do inlining for us, we can manually
// do inlining of these functions.
// Here we assume that (0,0) is the top left of the grid (although there is)
// nothing stopping it from being the bottom left.
// **JUSTIN** - let me know if you have preference -> it doesn't bother me either way
int x_left = (glbl_x == 0) ? gridWidth - 1 : glbl_x - 1;
int x_right = (glbl_x == gridWidth - 1) ? 0 : glbl_x + 1;
int y_above = (glbl_y == 0) ? gridHeight - 1 : glbl_y - 1;
int y_below = (glbl_y == gridHeight - 1) ? 0 : glbl_y + 1;
GolCell neighbourValue;
// TOP LEFT
neighbourValue = GetCell(input, x_left, y_above, gridWidth);
UpdateNeighbourhood(neighbourhood, neighbourValue);
// TOP
neighbourValue = GetCell(input, glbl_x, y_above, gridWidth);
UpdateNeighbourhood(neighbourhood, neighbourValue);
// TOP RIGHT
neighbourValue = GetCell(input, x_right, y_above, gridWidth);
UpdateNeighbourhood(neighbourhood, neighbourValue);
// RIGHT
neighbourValue = GetCell(input, x_right, glbl_y, gridWidth);
UpdateNeighbourhood(neighbourhood, neighbourValue);
// BOTTOM RIGHT
neighbourValue = GetCell(input, x_right, y_below, gridWidth);
UpdateNeighbourhood(neighbourhood, neighbourValue);
// BOTTOM
neighbourValue = GetCell(input, glbl_x, y_below, gridWidth);
UpdateNeighbourhood(neighbourhood, neighbourValue);
// BOTTOM LEFT
neighbourValue = GetCell(input, x_left, y_below, gridWidth);
UpdateNeighbourhood(neighbourhood, neighbourValue);
// LEFT
neighbourValue = GetCell(input, x_left, glbl_y, gridWidth);
UpdateNeighbourhood(neighbourhood, neighbourValue);
UpdateState(thisCell, neighbourhood);
output[mapCell] = thisCell;
}
}
GolCell *bufferSwap = input;
input = output;
output = bufferSwap;
__syncthreads();
}
}
void InitializeBoard(GolCell *input, int gridWidth, int gridHeight, char *startingFile, bool bGenGridFromScratch) {
FILE *file = NULL;
if(!bGenGridFromScratch) {
fopen(startingFile, "r");
assert(file);
}
for(int i = 0; i < gridHeight; i = i + 1) {
for(int j = 0; j < gridWidth; j = j + 1) {
char cell = '\n';
if(!bGenGridFromScratch) {
fgetc(file);
} else {
cell = (rand() % 3 == 0) ? '1' : '0';
}
int index = j + gridWidth * i;
// Sorry about this - I would like a nicer way to deal with newline
// oddities across windows/Linux plats but we can hack it for now
while(cell != '1' && cell != 'X' && cell != '0'&& cell != ' ' && cell != '_' && !bGenGridFromScratch) {
cell = fgetc(file);
}
if((cell == '1' || cell == 'X')) {
input[index] = 1;
} else if((cell == '0' || cell == ' ' || cell == '_')) {
input[index] = 0;
}
}
}
if(!bGenGridFromScratch) {
fclose(file);
}
}
int main (int argc, char *argv[]) {
if(argc != 5 && argc != 4) {
return 0;
}
int gridWidth = atoi(argv[1]);
int gridHeight = atoi(argv[2]);
int iterations = atoi(argv[3]);
int gridSize = gridWidth * gridHeight;
char *startingFile = argv[4];
GolCell *input = (GolCell *)malloc(gridSize * sizeof(GolCell));
GolCell *output = (GolCell *)malloc(gridSize * sizeof(GolCell));
InitializeBoard(input, gridWidth, gridHeight, startingFile, argc == 4);
int THREADS_X = 32;
int THREADS_Y = 32;
int THREADS_Z = 1;
int BLOCKS_MAX = 256;
int BLOCKS_X = min(BLOCKS_MAX, gridWidth / THREADS_X) + 1;
int BLOCKS_Y = min(BLOCKS_MAX, gridHeight / THREADS_Y) + 1;
int BLOCKS_Z = 1;
dim3 threads(THREADS_X, THREADS_Y, THREADS_Z);
dim3 blocks(BLOCKS_X, BLOCKS_Y, BLOCKS_Z);
GolCell *d_input;
GolCell *d_output;
// Cuda Events
cudaEvent_t start, end;
cudaEventCreate(&start);
cudaEventCreate(&end);
cudaEventRecord(start, 0);
// std::cout << "threads: {"<<threads.x<<","<<threads.y<<","<<threads.z<<"} blocks:{"<<blocks.x<<","<<blocks.y<<","<<blocks.z<<"}"<<std::endl;
cudaMalloc(&d_input, gridSize * sizeof(GolCell));
cudaMalloc(&d_output, gridSize * sizeof(GolCell));
cudaMemcpy(d_input, input, gridSize * sizeof(GolCell), cudaMemcpyHostToDevice);
for(int i = 0; i < iterations; i = i + 1) {
// Make sure this is blocking for now
RunGoL<<<blocks, threads>>>(d_input, d_output, gridWidth, gridHeight, 1, true);
GolCell *temp = d_input;
d_input = d_output;
d_output = temp;
}
// I think the number of iterations will determine whether we should copy from d_output or d_input
cudaMemcpy(output, (iterations & 0x1) ? d_output : d_input, gridSize * sizeof(GolCell), cudaMemcpyDeviceToHost);
cudaEventRecord(end, 0);
cudaEventSynchronize(end);
float time_ms;
cudaEventElapsedTime(&time_ms, start, end);
std::cout <<"time: "<<time_ms<<std::endl;
// for(int j = 0; j < gridHeight; j = j + 1) {
// for(int i = 0; i < gridWidth; i = i + 1) {
// std::cout << (output[j * gridWidth + i] ? '#' : ' ');
// }
// std::cout << std::endl;
// }
cudaFree(d_input);
cudaFree(d_output);
free(input);
free(output);
} |
d9e5a484c1bab3659413de257d8926dc8630b3ed.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <stdio.h>
#include <sys/time.h>
#define LEN 1 << 22
// define Array of Struct (AoS)
struct innerStruct{
float x;
float y;
};
// define Struct of Array (SoA)
struct innerArray{
float x[LEN];
float y[LEN];
};
double seconds(){
struct timeval tp;
struct timezone tzp;
int i = gettimeofday(&tp,&tzp);
return ((double)tp.tv_sec+(double)tp.tv_usec*1.e-6);
}
void initialInnerStruct(innerStruct *ip, int size){
// initialize a array of struct (AoS)
for (int i=0;i<size;i++){
ip[i].x = (float)(rand() & 0xFF)/100.0f;
ip[i].y = (float)(rand() & 0xFF)/100.0f;
}
return;
}
void checkInnerStruct(innerStruct *hostRef, innerStruct *gpuRef, const int N){
double epsilon = 1.0e-8;
bool match = 1;
for(int i = 0; i < N; i++){
if (abs(hostRef[i].x - gpuRef[i].x)>epsilon){
match = 0;
printf("different on %dth element: host %f gpu %f\n",i,
hostRef[i].x,gpuRef[i].x);
break;
}
if (abs(hostRef[i].y - gpuRef[i].y)>epsilon){
match = 0;
printf("different on %dth element: host %f gpu %f\n",i,
hostRef[i].y,gpuRef[i].y);
break;
}
}
if (!match) printf("Arrays do not match! \n\n");
}
void testInnerStructHost(innerStruct *A,innerStruct *C,const int n){
for (int idx = 0;idx < n; idx++){
C[idx].x = A[idx].x + 10.f;
C[idx].y = A[idx].y + 20.f;
}
return;
}
__global__ void testInnerStruct(innerStruct *data, innerStruct *result, const int n){
// test the array of struct (AoS)
unsigned int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i<n){
innerStruct tmp = data[i];
tmp.x += 10.f;
tmp.y += 20.f;
result[i] = tmp;
}
}
__global__ void warmup(innerStruct *data, innerStruct *result, const int n){
// warmup kernel function
unsigned int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i<n){
innerStruct tmp = data[i];
tmp.x += 10.f;
tmp.y += 20.f;
result[i] = tmp;
}
}
int main(int argc,char **argv){
// set up device
int dev = 0;
hipDeviceProp_t deviceProp;
hipGetDeviceProperties(&deviceProp,dev);
printf("%s test struct of array at ",argv[0]);
printf("device %d: %s \n",dev,deviceProp.name);
hipSetDevice(dev);
// allocate host memory
int nElem = LEN;
size_t nBytes = nElem * sizeof(innerStruct);
innerStruct *h_A = (innerStruct *)malloc(nBytes);
innerStruct *hostRef = (innerStruct *)malloc(nBytes);
innerStruct *gpuRef = (innerStruct *)malloc(nBytes);
// initialize host array
initialInnerStruct(h_A,nElem);
testInnerStructHost(h_A,hostRef,nElem);
// allocate device memory
innerStruct *d_A,*d_C;
hipMalloc((innerStruct**)&d_A,nBytes);
hipMalloc((innerStruct**)&d_C,nBytes);
// copy data from host to device
hipMemcpy(d_A,h_A,nBytes,hipMemcpyHostToDevice);
int blocksize = 128;
if (argc > 1) blocksize = atoi(argv[1]);
// execution config
dim3 block(blocksize,1);
dim3 grid((nElem+block.x-1)/block.x,1);
// kernel 1: warmup
double iStart = seconds();
hipLaunchKernelGGL(( warmup), dim3(grid),dim3(block), 0, 0, d_A,d_C,nElem);
hipDeviceSynchronize();
double iElaps = seconds() - iStart;
printf("warmup kernel <<< %3d, %3d >>> elapsed %f sec\n",grid.x,block.x,iElaps);
hipMemcpy(gpuRef,d_C,nBytes,hipMemcpyDeviceToHost);
checkInnerStruct(hostRef,gpuRef,nElem);
hipGetLastError();
// kernel 2: testInnerStruct
iStart = seconds();
hipLaunchKernelGGL(( testInnerStruct), dim3(grid),dim3(block), 0, 0, d_A,d_C,nElem);
hipDeviceSynchronize();
iElaps = seconds() - iStart;
printf("innerstruct <<< %3d, %3d >>> elapsed %f sec\n",grid.x,block.x,iElaps);
hipMemcpy(gpuRef,d_C,nBytes,hipMemcpyDeviceToHost);
checkInnerStruct(hostRef,gpuRef,nElem);
hipGetLastError();
// free memories
hipFree(d_A);
hipFree(d_C);
free(h_A);
free(hostRef);
free(gpuRef);
// reset devices
hipDeviceReset();
return EXIT_SUCCESS;
}
| d9e5a484c1bab3659413de257d8926dc8630b3ed.cu | #include <cuda_runtime.h>
#include <stdio.h>
#include <sys/time.h>
#define LEN 1 << 22
// define Array of Struct (AoS)
struct innerStruct{
float x;
float y;
};
// define Struct of Array (SoA)
struct innerArray{
float x[LEN];
float y[LEN];
};
double seconds(){
struct timeval tp;
struct timezone tzp;
int i = gettimeofday(&tp,&tzp);
return ((double)tp.tv_sec+(double)tp.tv_usec*1.e-6);
}
void initialInnerStruct(innerStruct *ip, int size){
// initialize a array of struct (AoS)
for (int i=0;i<size;i++){
ip[i].x = (float)(rand() & 0xFF)/100.0f;
ip[i].y = (float)(rand() & 0xFF)/100.0f;
}
return;
}
void checkInnerStruct(innerStruct *hostRef, innerStruct *gpuRef, const int N){
double epsilon = 1.0e-8;
bool match = 1;
for(int i = 0; i < N; i++){
if (abs(hostRef[i].x - gpuRef[i].x)>epsilon){
match = 0;
printf("different on %dth element: host %f gpu %f\n",i,
hostRef[i].x,gpuRef[i].x);
break;
}
if (abs(hostRef[i].y - gpuRef[i].y)>epsilon){
match = 0;
printf("different on %dth element: host %f gpu %f\n",i,
hostRef[i].y,gpuRef[i].y);
break;
}
}
if (!match) printf("Arrays do not match! \n\n");
}
void testInnerStructHost(innerStruct *A,innerStruct *C,const int n){
for (int idx = 0;idx < n; idx++){
C[idx].x = A[idx].x + 10.f;
C[idx].y = A[idx].y + 20.f;
}
return;
}
__global__ void testInnerStruct(innerStruct *data, innerStruct *result, const int n){
// test the array of struct (AoS)
unsigned int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i<n){
innerStruct tmp = data[i];
tmp.x += 10.f;
tmp.y += 20.f;
result[i] = tmp;
}
}
__global__ void warmup(innerStruct *data, innerStruct *result, const int n){
// warmup kernel function
unsigned int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i<n){
innerStruct tmp = data[i];
tmp.x += 10.f;
tmp.y += 20.f;
result[i] = tmp;
}
}
int main(int argc,char **argv){
// set up device
int dev = 0;
cudaDeviceProp deviceProp;
cudaGetDeviceProperties(&deviceProp,dev);
printf("%s test struct of array at ",argv[0]);
printf("device %d: %s \n",dev,deviceProp.name);
cudaSetDevice(dev);
// allocate host memory
int nElem = LEN;
size_t nBytes = nElem * sizeof(innerStruct);
innerStruct *h_A = (innerStruct *)malloc(nBytes);
innerStruct *hostRef = (innerStruct *)malloc(nBytes);
innerStruct *gpuRef = (innerStruct *)malloc(nBytes);
// initialize host array
initialInnerStruct(h_A,nElem);
testInnerStructHost(h_A,hostRef,nElem);
// allocate device memory
innerStruct *d_A,*d_C;
cudaMalloc((innerStruct**)&d_A,nBytes);
cudaMalloc((innerStruct**)&d_C,nBytes);
// copy data from host to device
cudaMemcpy(d_A,h_A,nBytes,cudaMemcpyHostToDevice);
int blocksize = 128;
if (argc > 1) blocksize = atoi(argv[1]);
// execution config
dim3 block(blocksize,1);
dim3 grid((nElem+block.x-1)/block.x,1);
// kernel 1: warmup
double iStart = seconds();
warmup<<<grid,block>>>(d_A,d_C,nElem);
cudaDeviceSynchronize();
double iElaps = seconds() - iStart;
printf("warmup kernel <<< %3d, %3d >>> elapsed %f sec\n",grid.x,block.x,iElaps);
cudaMemcpy(gpuRef,d_C,nBytes,cudaMemcpyDeviceToHost);
checkInnerStruct(hostRef,gpuRef,nElem);
cudaGetLastError();
// kernel 2: testInnerStruct
iStart = seconds();
testInnerStruct<<<grid,block>>>(d_A,d_C,nElem);
cudaDeviceSynchronize();
iElaps = seconds() - iStart;
printf("innerstruct <<< %3d, %3d >>> elapsed %f sec\n",grid.x,block.x,iElaps);
cudaMemcpy(gpuRef,d_C,nBytes,cudaMemcpyDeviceToHost);
checkInnerStruct(hostRef,gpuRef,nElem);
cudaGetLastError();
// free memories
cudaFree(d_A);
cudaFree(d_C);
free(h_A);
free(hostRef);
free(gpuRef);
// reset devices
cudaDeviceReset();
return EXIT_SUCCESS;
}
|
5c023180fa67308415a1c0dadfcdf4fdcc55f5c3.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <tdp/slam/keyframe.h>
#include <tdp/eigen/dense.h>
#include <tdp/cuda/cuda.h>
#include <tdp/nvidia/helper_cuda.h>
#include <tdp/data/image.h>
#include <tdp/data/managed_image.h>
#include <tdp/camera/camera_base.h>
#include <tdp/camera/camera.h>
#include <tdp/camera/camera_poly.h>
#include <tdp/reductions/reductions.cuh>
#include <tdp/manifold/SE3.h>
#include <tdp/cuda/cuda.cuh>
namespace tdp {
template<int BLK_SIZE, int D, typename Derived>
__global__ void KernelOverlap(
Image<float> greyA,
Image<float> greyB,
Image<Vector3fda> pcA,
Image<Vector3fda> pcB,
SE3f T_ab, // frame b to frame a !camera!
CameraBase<float,D,Derived> camA,
int N_PER_T,
Image<Vector3fda> stats,
float* errB
) {
const int tid = threadIdx.x;
const int idx = threadIdx.x + blockDim.x * blockIdx.x;
const int idS = idx*N_PER_T;
const int N = pcB.Area();
const int idE = min(N,(idx+1)*N_PER_T);
SharedMemory<Vector3fda> smem;
Vector3fda* sum = smem.getPointer();
sum[tid] = Vector3fda::Zero();
for (int id=idS; id<idE; ++id) {
const int x = id%pcB.w_;
const int y = id/pcB.w_;
if (x < pcB.w_ && y < pcB.h_) {
Vector3fda pB = pcB(x,y);
if (IsValidData(pB)) {
Vector3fda pBinA = T_ab*pB;
Eigen::Vector2f uv = camA.Project(pBinA);
if (greyA.Inside(uv)) {
Vector3fda pA = pcA(floor(uv(0)), floor(uv(1)));
if (IsValidData(pA) && (pBinA-pA).norm() < 0.03) {
// if (id % 1000 == 0)
// printf("%f %f %d %d %d\n", uv(0), uv(1), x, y, id);
float diff = greyA.GetBilinear(uv)-greyB(x,y);
float rmse = diff*diff;
if (errB) errB[id] = sqrt(rmse);
sum[tid](0) += rmse;
sum[tid](1) += 1;
}
}
sum[tid](2) += 1;
}
}
// const int x = id%pcA.w_;
// const int y = id/pcA.w_;
// if (x < pcA.w_ && y < pcA.h_) {
// Vector3fda pA = pcA(x,y);
// if (IsValidData(pA)) {
// Vector3fda pAinB = T_ab.Inverse()*pA;
// Eigen::Vector2f uv = camA.Project(pAinB);
// if (greyB.Inside(uv)) {
// Vector3fda pA = pcA(floor(uv(0)), floor(uv(1)));
//// if ((pBinA-pA).norm() < 0.03) {
// // if (tid % 10 == 0)
// // printf("%f %f %d %d %d\n", uv(0), uv(1), x, y, id);
// float diff = greyB.GetBilinear(uv)-greyA(x,y);
// // float diff = greyB(x,y);
// float rmse = diff*diff;
// if (errB) errB[id] = sqrt(rmse);
// sum[tid](0) += rmse;
// sum[tid](1) += 1;
//// }
// }
// sum[tid](2) += 1;
// }
// }
}
SumPyramidReduce<Vector3fda, BLK_SIZE>(tid, sum, stats.ptr_);
}
template <int D, class Derived>
void OverlapGpu(const Image<float>& greyA, const Image<float>& greyB,
const Image<Vector3fda>& pcA,
const Image<Vector3fda>& pcB,
const SE3f& T_ab,
const CameraBase<float,D,Derived>& camA, float& overlap, float& rmse,
Image<float>* errB) {
const size_t BLK_SIZE = 32;
size_t N = pcB.Area();
dim3 threads, blocks;
ComputeKernelParamsForArray(blocks,threads,N/10,BLK_SIZE);
ManagedDeviceImage<Vector3fda> out(1,1);
hipMemset(out.ptr_, 0, out.SizeBytes());
hipLaunchKernelGGL(( KernelOverlap<BLK_SIZE,D,Derived>), dim3(blocks),dim3(threads),
BLK_SIZE*sizeof(Vector3fda), 0, greyA, greyB, pcA, pcB, T_ab,
camA, 10, out, errB ? errB->ptr_ : nullptr);
checkCudaErrors(hipDeviceSynchronize());
ManagedHostImage<Vector3fda> stats(1,1);
stats.CopyFrom(out, hipMemcpyDeviceToHost);
overlap = stats[0](1) / stats[0](2);
rmse = sqrtf(stats[0](0) / stats[0](2));
}
template
void OverlapGpu(const Image<float>& greyA, const Image<float>& greyB,
const Image<Vector3fda>& pcA,
const Image<Vector3fda>& pcB,
const SE3f& T_ab,
const BaseCameraf& camA, float& overlap, float& rmse,
Image<float>* errB);
template
void OverlapGpu(const Image<float>& greyA, const Image<float>& greyB,
const Image<Vector3fda>& pcA,
const Image<Vector3fda>& pcB,
const SE3f& T_ab,
const BaseCameraPoly3f& camA, float& overlap, float& rmse,
Image<float>* errB);
}
| 5c023180fa67308415a1c0dadfcdf4fdcc55f5c3.cu | #include <tdp/slam/keyframe.h>
#include <tdp/eigen/dense.h>
#include <tdp/cuda/cuda.h>
#include <tdp/nvidia/helper_cuda.h>
#include <tdp/data/image.h>
#include <tdp/data/managed_image.h>
#include <tdp/camera/camera_base.h>
#include <tdp/camera/camera.h>
#include <tdp/camera/camera_poly.h>
#include <tdp/reductions/reductions.cuh>
#include <tdp/manifold/SE3.h>
#include <tdp/cuda/cuda.cuh>
namespace tdp {
template<int BLK_SIZE, int D, typename Derived>
__global__ void KernelOverlap(
Image<float> greyA,
Image<float> greyB,
Image<Vector3fda> pcA,
Image<Vector3fda> pcB,
SE3f T_ab, // frame b to frame a !camera!
CameraBase<float,D,Derived> camA,
int N_PER_T,
Image<Vector3fda> stats,
float* errB
) {
const int tid = threadIdx.x;
const int idx = threadIdx.x + blockDim.x * blockIdx.x;
const int idS = idx*N_PER_T;
const int N = pcB.Area();
const int idE = min(N,(idx+1)*N_PER_T);
SharedMemory<Vector3fda> smem;
Vector3fda* sum = smem.getPointer();
sum[tid] = Vector3fda::Zero();
for (int id=idS; id<idE; ++id) {
const int x = id%pcB.w_;
const int y = id/pcB.w_;
if (x < pcB.w_ && y < pcB.h_) {
Vector3fda pB = pcB(x,y);
if (IsValidData(pB)) {
Vector3fda pBinA = T_ab*pB;
Eigen::Vector2f uv = camA.Project(pBinA);
if (greyA.Inside(uv)) {
Vector3fda pA = pcA(floor(uv(0)), floor(uv(1)));
if (IsValidData(pA) && (pBinA-pA).norm() < 0.03) {
// if (id % 1000 == 0)
// printf("%f %f %d %d %d\n", uv(0), uv(1), x, y, id);
float diff = greyA.GetBilinear(uv)-greyB(x,y);
float rmse = diff*diff;
if (errB) errB[id] = sqrt(rmse);
sum[tid](0) += rmse;
sum[tid](1) += 1;
}
}
sum[tid](2) += 1;
}
}
// const int x = id%pcA.w_;
// const int y = id/pcA.w_;
// if (x < pcA.w_ && y < pcA.h_) {
// Vector3fda pA = pcA(x,y);
// if (IsValidData(pA)) {
// Vector3fda pAinB = T_ab.Inverse()*pA;
// Eigen::Vector2f uv = camA.Project(pAinB);
// if (greyB.Inside(uv)) {
// Vector3fda pA = pcA(floor(uv(0)), floor(uv(1)));
//// if ((pBinA-pA).norm() < 0.03) {
// // if (tid % 10 == 0)
// // printf("%f %f %d %d %d\n", uv(0), uv(1), x, y, id);
// float diff = greyB.GetBilinear(uv)-greyA(x,y);
// // float diff = greyB(x,y);
// float rmse = diff*diff;
// if (errB) errB[id] = sqrt(rmse);
// sum[tid](0) += rmse;
// sum[tid](1) += 1;
//// }
// }
// sum[tid](2) += 1;
// }
// }
}
SumPyramidReduce<Vector3fda, BLK_SIZE>(tid, sum, stats.ptr_);
}
template <int D, class Derived>
void OverlapGpu(const Image<float>& greyA, const Image<float>& greyB,
const Image<Vector3fda>& pcA,
const Image<Vector3fda>& pcB,
const SE3f& T_ab,
const CameraBase<float,D,Derived>& camA, float& overlap, float& rmse,
Image<float>* errB) {
const size_t BLK_SIZE = 32;
size_t N = pcB.Area();
dim3 threads, blocks;
ComputeKernelParamsForArray(blocks,threads,N/10,BLK_SIZE);
ManagedDeviceImage<Vector3fda> out(1,1);
cudaMemset(out.ptr_, 0, out.SizeBytes());
KernelOverlap<BLK_SIZE,D,Derived><<<blocks,threads,
BLK_SIZE*sizeof(Vector3fda)>>>(greyA, greyB, pcA, pcB, T_ab,
camA, 10, out, errB ? errB->ptr_ : nullptr);
checkCudaErrors(cudaDeviceSynchronize());
ManagedHostImage<Vector3fda> stats(1,1);
stats.CopyFrom(out, cudaMemcpyDeviceToHost);
overlap = stats[0](1) / stats[0](2);
rmse = sqrtf(stats[0](0) / stats[0](2));
}
template
void OverlapGpu(const Image<float>& greyA, const Image<float>& greyB,
const Image<Vector3fda>& pcA,
const Image<Vector3fda>& pcB,
const SE3f& T_ab,
const BaseCameraf& camA, float& overlap, float& rmse,
Image<float>* errB);
template
void OverlapGpu(const Image<float>& greyA, const Image<float>& greyB,
const Image<Vector3fda>& pcA,
const Image<Vector3fda>& pcB,
const SE3f& T_ab,
const BaseCameraPoly3f& camA, float& overlap, float& rmse,
Image<float>* errB);
}
|
1900c417d8e2532bd7a533b43e3223f85cdc48ff.hip | // !!! This is a file automatically generated by hipify!!!
// Copyright (C) 2021 ASTRON (Netherlands Institute for Radio Astronomy)
// SPDX-License-Identifier: GPL-3.0-or-later
#include <hip/hip_runtime.h>
#include "FDDKernel.hpp"
#include "fdd_kernel.cuh"
#include "common/cuda/CU.h"
/*
* Helper functions
*/
void FDDKernel::copy_delay_table(
const void* src,
size_t count,
size_t offset,
hipStream_t stream)
{
cu::checkError(hipMemcpyToSymbolAsync(
c_delay_table,
src,
count, offset,
hipMemcpyDeviceToDevice, stream)
);
}
unsigned long div_round_up(unsigned long a, unsigned long b) {
return (a-1) / b + 1;
}
/*
* dedisperse routine
*/
void FDDKernel::launch(
dedisp_size ndm,
dedisp_size nfreq,
dedisp_size nchan,
float dt,
const dedisp_float* d_spin_frequencies,
const dedisp_float* d_dm_list,
const dedisp_float2* d_in,
const dedisp_float2* d_out,
dedisp_size in_stride,
dedisp_size out_stride,
unsigned int idm_start,
unsigned int idm_end,
unsigned int ichan_start,
hipStream_t stream)
{
// Define thread decomposition
unsigned grid_x = ::max((int) ((ndm + NDM_BATCH_GRID) / NDM_BATCH_GRID), 1);
unsigned grid_y = NFREQ_BATCH_GRID;
dim3 grid(grid_x, grid_y);
dim3 block(NFREQ_BATCH_BLOCK);
/* Execute the kernel
* The second kernel argument can be set to true
* in order to enable an experimental optimization feature,
* where extrapolation is used in the computation of the phasors.
* Boudary conditions should be further explored to determine
* functional correctness at all times.
* Leaving this feature in because it might be beneficial
* depending on the system configurations.
*/
#define CALL_KERNEL(NCHAN) \
hipLaunchKernelGGL(( dedisperse_kernel<NCHAN, false>) \
, dim3(grid), dim3(block), 0, stream, \
nfreq, \
dt, \
(float *) d_spin_frequencies, \
(float *) d_dm_list, \
in_stride, \
out_stride, \
(const float2 *) d_in, \
(float2 *) d_out, \
idm_start, \
idm_end, \
ichan_start);
switch (nchan)
{
case 16: CALL_KERNEL(16); break;
case 32: CALL_KERNEL(32); break;
case 64: CALL_KERNEL(64); break;
case 128: CALL_KERNEL(128); break;
case 256: CALL_KERNEL(256); break;
}
}
/*
* dedisperse routine
*/
void FDDKernel::scale(
dedisp_size height,
dedisp_size width,
dedisp_size stride,
dedisp_float scale,
dedisp_float* d_data,
hipStream_t stream)
{
// Define thread decomposition
dim3 grid(height);
dim3 block(128);
// Execute the kernel
hipLaunchKernelGGL(( scale_output_kernel), dim3(grid), dim3(block), 0, stream,
width,
stride,
scale,
d_data);
}
| 1900c417d8e2532bd7a533b43e3223f85cdc48ff.cu | // Copyright (C) 2021 ASTRON (Netherlands Institute for Radio Astronomy)
// SPDX-License-Identifier: GPL-3.0-or-later
#include <cuda.h>
#include "FDDKernel.hpp"
#include "fdd_kernel.cuh"
#include "common/cuda/CU.h"
/*
* Helper functions
*/
void FDDKernel::copy_delay_table(
const void* src,
size_t count,
size_t offset,
cudaStream_t stream)
{
cu::checkError(cudaMemcpyToSymbolAsync(
c_delay_table,
src,
count, offset,
cudaMemcpyDeviceToDevice, stream)
);
}
unsigned long div_round_up(unsigned long a, unsigned long b) {
return (a-1) / b + 1;
}
/*
* dedisperse routine
*/
void FDDKernel::launch(
dedisp_size ndm,
dedisp_size nfreq,
dedisp_size nchan,
float dt,
const dedisp_float* d_spin_frequencies,
const dedisp_float* d_dm_list,
const dedisp_float2* d_in,
const dedisp_float2* d_out,
dedisp_size in_stride,
dedisp_size out_stride,
unsigned int idm_start,
unsigned int idm_end,
unsigned int ichan_start,
cudaStream_t stream)
{
// Define thread decomposition
unsigned grid_x = std::max((int) ((ndm + NDM_BATCH_GRID) / NDM_BATCH_GRID), 1);
unsigned grid_y = NFREQ_BATCH_GRID;
dim3 grid(grid_x, grid_y);
dim3 block(NFREQ_BATCH_BLOCK);
/* Execute the kernel
* The second kernel argument can be set to true
* in order to enable an experimental optimization feature,
* where extrapolation is used in the computation of the phasors.
* Boudary conditions should be further explored to determine
* functional correctness at all times.
* Leaving this feature in because it might be beneficial
* depending on the system configurations.
*/
#define CALL_KERNEL(NCHAN) \
dedisperse_kernel<NCHAN, false> \
<<<grid, block, 0, stream>>>( \
nfreq, \
dt, \
(float *) d_spin_frequencies, \
(float *) d_dm_list, \
in_stride, \
out_stride, \
(const float2 *) d_in, \
(float2 *) d_out, \
idm_start, \
idm_end, \
ichan_start);
switch (nchan)
{
case 16: CALL_KERNEL(16); break;
case 32: CALL_KERNEL(32); break;
case 64: CALL_KERNEL(64); break;
case 128: CALL_KERNEL(128); break;
case 256: CALL_KERNEL(256); break;
}
}
/*
* dedisperse routine
*/
void FDDKernel::scale(
dedisp_size height,
dedisp_size width,
dedisp_size stride,
dedisp_float scale,
dedisp_float* d_data,
cudaStream_t stream)
{
// Define thread decomposition
dim3 grid(height);
dim3 block(128);
// Execute the kernel
scale_output_kernel<<<grid, block, 0, stream>>>(
width,
stride,
scale,
d_data);
}
|
9d5a130698fc242dffc74b53ec61dfe293f5f5f5.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Copyright 2014-2015 Isis Innovation Limited and the authors of InfiniTAM
#include "ITMLowLevelEngine_CUDA.h"
#include "../../../../ORUtils/CUDADefines.h"
#include "../../DeviceAgnostic/ITMLowLevelEngine.h"
using namespace ITMLib::Engine;
ITMLowLevelEngine_CUDA::ITMLowLevelEngine_CUDA(void){}
ITMLowLevelEngine_CUDA::~ITMLowLevelEngine_CUDA(void){}
__global__ void filterSubsample_device(Vector4u *imageData_out, Vector2i newDims, const Vector4u *imageData_in, Vector2i oldDims);
__global__ void filterSubsampleWithHoles_device(float *imageData_out, Vector2i newDims, const float *imageData_in, Vector2i oldDims);
__global__ void filterSubsampleWithHoles_device(Vector4f *imageData_out, Vector2i newDims, const Vector4f *imageData_in, Vector2i oldDims);
__global__ void gradientX_device(Vector4s *grad, const Vector4u *image, Vector2i imgSize);
__global__ void gradientY_device(Vector4s *grad, const Vector4u *image, Vector2i imgSize);
// host methods
void ITMLowLevelEngine_CUDA::CopyImage(ITMUChar4Image *image_out, const ITMUChar4Image *image_in) const
{
Vector4u *dest = image_out->GetData(MEMORYDEVICE_CUDA);
const Vector4u *src = image_in->GetData(MEMORYDEVICE_CUDA);
ITMSafeCall(hipMemcpy(dest,src,image_in->dataSize*sizeof(Vector4u),hipMemcpyDeviceToDevice));
}
void ITMLowLevelEngine_CUDA::CopyImage(ITMFloatImage *image_out, const ITMFloatImage *image_in) const
{
float *dest = image_out->GetData(MEMORYDEVICE_CUDA);
const float *src = image_in->GetData(MEMORYDEVICE_CUDA);
ITMSafeCall(hipMemcpy(dest, src, image_in->dataSize * sizeof(float), hipMemcpyDeviceToDevice));
}
void ITMLowLevelEngine_CUDA::CopyImage(ITMFloat4Image *image_out, const ITMFloat4Image *image_in) const
{
Vector4f *dest = image_out->GetData(MEMORYDEVICE_CUDA);
const Vector4f *src = image_in->GetData(MEMORYDEVICE_CUDA);
ITMSafeCall(hipMemcpy(dest, src, image_in->dataSize * sizeof(Vector4f), hipMemcpyDeviceToDevice));
}
void ITMLowLevelEngine_CUDA::FilterSubsample(ITMUChar4Image *image_out, const ITMUChar4Image *image_in) const
{
Vector2i oldDims = image_in->noDims;
Vector2i newDims; newDims.x=image_in->noDims.x/2;newDims.y=image_in->noDims.y/2;
image_out->ChangeDims(newDims);
const Vector4f *imageData_in = image_in->GetData(MEMORYDEVICE_CUDA);
Vector4f *imageData_out = image_out->GetData(MEMORYDEVICE_CUDA);
dim3 blockSize(16,16);
dim3 gridSize((int)ceil((float)newDims.x/(float)blockSize.x),(int)ceil((float)newDims.y/(float)blockSize.y));
filterSubsample_device << <gridSize,blockSize>> >(imageData_out,newDims,image_in,oldDims);
}
void ITMLowLevelEngine_CUDA::FilterSubsampleWithHoles(ITMFloatImage *image_out, const ITMFloatImage *image_in) const
{
Vector2i oldDims = image_in->noDims;
Vector2i newDims; newDims.x = image_in->noDims.x / 2; newDims.y = image_in->noDims.y / 2;
image_out->ChangeDims(newDims);
const float *imageData_in = image_in->GetData(MEMORYDEVICE_CUDA);
float *imageData_out = image_out->GetData(MEMORYDEVICE_CUDA);
dim3 blockSize(16, 16);
dim3 gridSize((int)ceil((float)newDims.x / (float)blockSize.x), (int)ceil((float)newDims.y / (float)blockSize.y));
filterSubsampleWithHoles_device << <gridSize, blockSize >> >(imageData_out, newDims, imageData_in, oldDims);
}
void ITMLowLevelEngine_CUDA::FilterSubsampleWithHoles(ITMFloat4Image *image_out, const ITMFloat4Image *image_in) const
{
Vector2i oldDims = image_in->noDims;
Vector2i newDims; newDims.x = image_in->noDims.x / 2; newDims.y = image_in->noDims.y / 2;
image_out->ChangeDims(newDims);
const Vector4f *imageData_in = image_in->GetData(MEMORYDEVICE_CUDA);
Vector4f *imageData_out = image_out->GetData(MEMORYDEVICE_CUDA);
dim3 blockSize(16, 16);
dim3 gridSize((int)ceil((float)newDims.x / (float)blockSize.x), (int)ceil((float)newDims.y / (float)blockSize.y));
filterSubsampleWithHoles_device << <gridSize, blockSize >> >(imageData_out, newDims, imageData_in, oldDims);
}
void ITMLowLevelEngine_CUDA::GradientX(ITMShort4Image *grad_out, const ITMUChar4Image *image_in) const
{
grad_out->ChangeDims(image_in->noDims);
Vector2i imgSize = image_in->noDims;
Vector4s *grad = grad_out->GetData(MEMORYDEVICE_CUDA);
const Vector4u *image = image_in->GetData(MEMORYDEVICE_CUDA);
dim3 blockSize(16, 16);
dim3 gridSize((int)ceil((float)imgSize.x/(float)blockSize.x),(int)ceil((float)imgSize.y/(float)blockSize.y));
ITMSafeCall(hipMemset(grad,0,imgSize.x*imgSize.y*sizeof(Vector4s)));
gradientX_device << <gridSize,blockSize>> >(grad,image,imgSize);
}
void ITMLowLevelEngine_CUDA::GradientY(ITMShort4Image *grad_out, const ITMUChar4Image *image_in) const
{
grad_out->ChangeDims(image_in->noDims);
Vector2i imgSize = image_in->noDims;
Vector4s *grad = grad_out->GetData(MEMORYDEVICE_CUDA);
const Vector4u *image = image_in->GetData(MEMORYDEVICE_CUDA);
dim3 blockSize(16, 16);
dim3 gridSize((int)ceil((float)imgSize.x / (float)blockSize.x), (int)ceil((float)imgSize.y / (float)blockSize.y));
ITMSafeCall(hipMemset(grad, 0, imgSize.x * imgSize.y * sizeof(Vector4s)));
gradientY_device << <gridSize, blockSize >> >(grad, image, imgSize);
}
// device functions
__global__ void filterSubsample_device(Vector4u *imageData_out, Vector2i newDims, const Vector4u *imageData_in, Vector2i oldDims)
{
int x = threadIdx.x + blockIdx.x * blockDim.x, y= threadIdx.y + blockIdx.y * blockDim.y;
if(x > newDims.x-1||y>newDims.y-1)return;
filterSubsample(imageData_out, x, y, newDims, imageData_in, oldDims);
}
__global__ void filterSubsampleWithHoles_device(float *imageData_out, Vector2i newDims, const float *imageData_in, Vector2i oldDims)
{
int x = threadIdx.x + blockIdx.x * blockDim.x, y = threadIdx.y + blockIdx.y * blockDim.y;
if (x > newDims.x - 1 || y > newDims.y - 1) return;
filterSubsampleWithHoles(imageData_out, x, y, newDims, imageData_in, oldDims);
}
__global__ void filterSubsampleWithHoles_device(Vector4f *imageData_out, Vector2i newDims, const Vector4f *imageData_in, Vector2i oldDims)
{
int x = threadIdx.x + blockIdx.x * blockDim.x, y = threadIdx.y + blockIdx.y * blockDim.y;
if (x > newDims.x - 1 || y > newDims.y - 1) return;
filterSubsampleWithHoles(imageData_out, x, y, newDims, imageData_in, oldDims);
}
__global__ void gradientX_device(Vector4s *grad, const Vector4u *image, Vector2i imgSize)
{
int x = threadIdx.x + blockIdx.x * blockDim.x, y = threadIdx.y + blockIdx.y * blockDim.y;
if(x<2 || x>imgSize.x-2 || y<2 || y>imgSize.y-2)return;
gradientX(grad, x, y, image, imgSize);
}
__global__ void gradientY_device(Vector4s *grad, const Vector4u *image, Vector2i imgSize)
{
int x = threadIdx.x + blockIdx.x * blockDim.x, y = threadIdx.y + blockIdx.y * blockDim.y;
if (x < 2 || x > imgSize.x - 2 || y < 2 || y > imgSize.y - 2) return;
gradientY(grad, x, y, image, imgSize);
}
| 9d5a130698fc242dffc74b53ec61dfe293f5f5f5.cu | // Copyright 2014-2015 Isis Innovation Limited and the authors of InfiniTAM
#include "ITMLowLevelEngine_CUDA.h"
#include "../../../../ORUtils/CUDADefines.h"
#include "../../DeviceAgnostic/ITMLowLevelEngine.h"
using namespace ITMLib::Engine;
ITMLowLevelEngine_CUDA::ITMLowLevelEngine_CUDA(void){}
ITMLowLevelEngine_CUDA::~ITMLowLevelEngine_CUDA(void){}
__global__ void filterSubsample_device(Vector4u *imageData_out, Vector2i newDims, const Vector4u *imageData_in, Vector2i oldDims);
__global__ void filterSubsampleWithHoles_device(float *imageData_out, Vector2i newDims, const float *imageData_in, Vector2i oldDims);
__global__ void filterSubsampleWithHoles_device(Vector4f *imageData_out, Vector2i newDims, const Vector4f *imageData_in, Vector2i oldDims);
__global__ void gradientX_device(Vector4s *grad, const Vector4u *image, Vector2i imgSize);
__global__ void gradientY_device(Vector4s *grad, const Vector4u *image, Vector2i imgSize);
// host methods
void ITMLowLevelEngine_CUDA::CopyImage(ITMUChar4Image *image_out, const ITMUChar4Image *image_in) const
{
Vector4u *dest = image_out->GetData(MEMORYDEVICE_CUDA);
const Vector4u *src = image_in->GetData(MEMORYDEVICE_CUDA);
ITMSafeCall(cudaMemcpy(dest,src,image_in->dataSize*sizeof(Vector4u),cudaMemcpyDeviceToDevice));
}
void ITMLowLevelEngine_CUDA::CopyImage(ITMFloatImage *image_out, const ITMFloatImage *image_in) const
{
float *dest = image_out->GetData(MEMORYDEVICE_CUDA);
const float *src = image_in->GetData(MEMORYDEVICE_CUDA);
ITMSafeCall(cudaMemcpy(dest, src, image_in->dataSize * sizeof(float), cudaMemcpyDeviceToDevice));
}
void ITMLowLevelEngine_CUDA::CopyImage(ITMFloat4Image *image_out, const ITMFloat4Image *image_in) const
{
Vector4f *dest = image_out->GetData(MEMORYDEVICE_CUDA);
const Vector4f *src = image_in->GetData(MEMORYDEVICE_CUDA);
ITMSafeCall(cudaMemcpy(dest, src, image_in->dataSize * sizeof(Vector4f), cudaMemcpyDeviceToDevice));
}
void ITMLowLevelEngine_CUDA::FilterSubsample(ITMUChar4Image *image_out, const ITMUChar4Image *image_in) const
{
Vector2i oldDims = image_in->noDims;
Vector2i newDims; newDims.x=image_in->noDims.x/2;newDims.y=image_in->noDims.y/2;
image_out->ChangeDims(newDims);
const Vector4f *imageData_in = image_in->GetData(MEMORYDEVICE_CUDA);
Vector4f *imageData_out = image_out->GetData(MEMORYDEVICE_CUDA);
dim3 blockSize(16,16);
dim3 gridSize((int)ceil((float)newDims.x/(float)blockSize.x),(int)ceil((float)newDims.y/(float)blockSize.y));
filterSubsample_device << <gridSize,blockSize>> >(imageData_out,newDims,image_in,oldDims);
}
void ITMLowLevelEngine_CUDA::FilterSubsampleWithHoles(ITMFloatImage *image_out, const ITMFloatImage *image_in) const
{
Vector2i oldDims = image_in->noDims;
Vector2i newDims; newDims.x = image_in->noDims.x / 2; newDims.y = image_in->noDims.y / 2;
image_out->ChangeDims(newDims);
const float *imageData_in = image_in->GetData(MEMORYDEVICE_CUDA);
float *imageData_out = image_out->GetData(MEMORYDEVICE_CUDA);
dim3 blockSize(16, 16);
dim3 gridSize((int)ceil((float)newDims.x / (float)blockSize.x), (int)ceil((float)newDims.y / (float)blockSize.y));
filterSubsampleWithHoles_device << <gridSize, blockSize >> >(imageData_out, newDims, imageData_in, oldDims);
}
void ITMLowLevelEngine_CUDA::FilterSubsampleWithHoles(ITMFloat4Image *image_out, const ITMFloat4Image *image_in) const
{
Vector2i oldDims = image_in->noDims;
Vector2i newDims; newDims.x = image_in->noDims.x / 2; newDims.y = image_in->noDims.y / 2;
image_out->ChangeDims(newDims);
const Vector4f *imageData_in = image_in->GetData(MEMORYDEVICE_CUDA);
Vector4f *imageData_out = image_out->GetData(MEMORYDEVICE_CUDA);
dim3 blockSize(16, 16);
dim3 gridSize((int)ceil((float)newDims.x / (float)blockSize.x), (int)ceil((float)newDims.y / (float)blockSize.y));
filterSubsampleWithHoles_device << <gridSize, blockSize >> >(imageData_out, newDims, imageData_in, oldDims);
}
void ITMLowLevelEngine_CUDA::GradientX(ITMShort4Image *grad_out, const ITMUChar4Image *image_in) const
{
grad_out->ChangeDims(image_in->noDims);
Vector2i imgSize = image_in->noDims;
Vector4s *grad = grad_out->GetData(MEMORYDEVICE_CUDA);
const Vector4u *image = image_in->GetData(MEMORYDEVICE_CUDA);
dim3 blockSize(16, 16);
dim3 gridSize((int)ceil((float)imgSize.x/(float)blockSize.x),(int)ceil((float)imgSize.y/(float)blockSize.y));
ITMSafeCall(cudaMemset(grad,0,imgSize.x*imgSize.y*sizeof(Vector4s)));
gradientX_device << <gridSize,blockSize>> >(grad,image,imgSize);
}
void ITMLowLevelEngine_CUDA::GradientY(ITMShort4Image *grad_out, const ITMUChar4Image *image_in) const
{
grad_out->ChangeDims(image_in->noDims);
Vector2i imgSize = image_in->noDims;
Vector4s *grad = grad_out->GetData(MEMORYDEVICE_CUDA);
const Vector4u *image = image_in->GetData(MEMORYDEVICE_CUDA);
dim3 blockSize(16, 16);
dim3 gridSize((int)ceil((float)imgSize.x / (float)blockSize.x), (int)ceil((float)imgSize.y / (float)blockSize.y));
ITMSafeCall(cudaMemset(grad, 0, imgSize.x * imgSize.y * sizeof(Vector4s)));
gradientY_device << <gridSize, blockSize >> >(grad, image, imgSize);
}
// device functions
__global__ void filterSubsample_device(Vector4u *imageData_out, Vector2i newDims, const Vector4u *imageData_in, Vector2i oldDims)
{
int x = threadIdx.x + blockIdx.x * blockDim.x, y= threadIdx.y + blockIdx.y * blockDim.y;
if(x > newDims.x-1||y>newDims.y-1)return;
filterSubsample(imageData_out, x, y, newDims, imageData_in, oldDims);
}
__global__ void filterSubsampleWithHoles_device(float *imageData_out, Vector2i newDims, const float *imageData_in, Vector2i oldDims)
{
int x = threadIdx.x + blockIdx.x * blockDim.x, y = threadIdx.y + blockIdx.y * blockDim.y;
if (x > newDims.x - 1 || y > newDims.y - 1) return;
filterSubsampleWithHoles(imageData_out, x, y, newDims, imageData_in, oldDims);
}
__global__ void filterSubsampleWithHoles_device(Vector4f *imageData_out, Vector2i newDims, const Vector4f *imageData_in, Vector2i oldDims)
{
int x = threadIdx.x + blockIdx.x * blockDim.x, y = threadIdx.y + blockIdx.y * blockDim.y;
if (x > newDims.x - 1 || y > newDims.y - 1) return;
filterSubsampleWithHoles(imageData_out, x, y, newDims, imageData_in, oldDims);
}
__global__ void gradientX_device(Vector4s *grad, const Vector4u *image, Vector2i imgSize)
{
int x = threadIdx.x + blockIdx.x * blockDim.x, y = threadIdx.y + blockIdx.y * blockDim.y;
if(x<2 || x>imgSize.x-2 || y<2 || y>imgSize.y-2)return;
gradientX(grad, x, y, image, imgSize);
}
__global__ void gradientY_device(Vector4s *grad, const Vector4u *image, Vector2i imgSize)
{
int x = threadIdx.x + blockIdx.x * blockDim.x, y = threadIdx.y + blockIdx.y * blockDim.y;
if (x < 2 || x > imgSize.x - 2 || y < 2 || y > imgSize.y - 2) return;
gradientY(grad, x, y, image, imgSize);
}
|
d834ef69085f827535f641feeaac8b55c1a8f5d1.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <algorithm>
#include <cfloat>
#include <vector>
#include "caffe/layers/focal_loss_layer.hpp"
#include "caffe/util/math_functions.hpp"
namespace caffe {
template <typename Dtype>
__global__ void FocalLossForwardGPU(const int nthreads,
const Dtype * prob_data, const Dtype * label, Dtype* loss,
const int num, const int dim, const int spatial_dim,
const bool has_ignore_label_, const int ignore_label_,
Dtype * counts, const Dtype alpha_, const Dtype gamma_) {
CUDA_KERNEL_LOOP(index, nthreads) {
const int n = index / spatial_dim;
const int s = index % spatial_dim;
const int label_value = static_cast<int>(label[n * spatial_dim + s]);
if (has_ignore_label_ && label_value == ignore_label_) {
loss[index] = 0;
counts[index] = 0;
} else {
const Dtype pk = max(prob_data[n * dim + label_value * spatial_dim + s], Dtype(FLT_MIN));
loss[index] = -1 * alpha_ * powf(1 - pk, gamma_) * log(pk);
counts[index] = 1;
}
}
}
template <typename Dtype>
void FocalLossLayer<Dtype>::Forward_gpu(
const vector<Blob<Dtype> *> & bottom, const vector<Blob<Dtype> *> & top) {
softmax_layer_->Forward(softmax_bottom_vec_, softmax_top_vec_);
const Dtype * prob_data = prob_.gpu_data();
const Dtype * label = bottom[1]->gpu_data();
const int dim = prob_.count() / outer_num_;
const int nthreads = outer_num_ * inner_num_;
Dtype * loss_data = bottom[0]->mutable_gpu_diff();
Dtype * counts = prob_.mutable_gpu_diff();
hipLaunchKernelGGL(( FocalLossForwardGPU<Dtype>), dim3(CAFFE_GET_BLOCKS(nthreads)),
dim3( CAFFE_CUDA_NUM_THREADS), 0, 0, nthreads, prob_data, label, loss_data,
outer_num_, dim, inner_num_, has_ignore_label_, ignore_label_, counts, alpha_, gamma_);
Dtype loss;
caffe_gpu_asum(nthreads, loss_data, &loss);
Dtype valid_count = -1;
if (normalization_ == LossParameter_NormalizationMode_VALID &&
has_ignore_label_) {
caffe_gpu_asum(nthreads, counts, & valid_count);
}
top[0]->mutable_cpu_data()[0] = loss / get_normalizer(normalization_,
valid_count);
if (2 == top.size()) {
top[1]->ShareData(prob_);
}
}
template <typename Dtype>
__global__ void FocalLossBackwardGPU(const int nthreads, const Dtype * prob_data,
const Dtype * label, Dtype * bottom_diff, const int num, const int dim,
const int spatial_dim, const bool has_ignore_label_,
const int ignore_label_, Dtype * counts, const Dtype alpha_, const Dtype gamma_) {
const int channels = dim / spatial_dim;
CUDA_KERNEL_LOOP(index, nthreads) {
const int n = index / spatial_dim;
const int s = index % spatial_dim;
const int label_value = static_cast<int>(label[n * spatial_dim + s]);
if (has_ignore_label_ && label_value == ignore_label_) {
for (int c = 0; c < channels; ++c) {
bottom_diff[n * dim + c * spatial_dim + s] = 0;
}
counts[index] = 0;
} else {
int c = 0;
const Dtype pk = max(prob_data[n * dim + label_value * spatial_dim + s], Dtype(FLT_MIN));
for (c = 0; c < label_value; ++c) {
const Dtype pj = max(prob_data[n * dim + c * spatial_dim + s], Dtype(FLT_MIN));
bottom_diff[n * dim + c * spatial_dim + s] = Dtype(
-1 * alpha_ * (gamma_ * pow(1 - pk, gamma_ - 1) * pk * pj * log(pk) - pow(1 - pk, gamma_) * pj));
}
bottom_diff[n * dim + c * spatial_dim + s] = Dtype(
-1 * alpha_ * (-1 * gamma_ * pow(1 - pk, gamma_) * pk * log(pk) + pow(1 - pk, gamma_ + 1)));
c++;
for ( ; c < channels; ++c) {
const Dtype pj = max(prob_data[n * dim + c * spatial_dim + s], Dtype(FLT_MIN));
bottom_diff[n * dim + c * spatial_dim + s] = Dtype(
-1 * alpha_ * (gamma_ * pow(1 - pk, gamma_ - 1) * pk * pj * log(pk) - pow(1 - pk, gamma_) * pj));
}
counts[index] = 1;
}
}
}
template <typename Dtype>
void FocalLossLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype> *> & top,
const vector<bool>& propagate_down, const vector<Blob<Dtype> *> & bottom) {
if (propagate_down[1]) {
LOG(FATAL) << this->type()
<< " Layer cannot backpropagate to label inputs.";
}
if (propagate_down[0]) {
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
const Dtype* prob_data = prob_.gpu_data();
const Dtype* top_data = top[0]->gpu_data();
const Dtype* label = bottom[1]->gpu_data();
const int dim = prob_.count() / outer_num_;
const int nthreads = outer_num_ * inner_num_;
// Since this memory is nerver used for anything else,
// we use to to avoid allocating new GPU memory
Dtype* counts = prob_.mutable_gpu_diff();
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( FocalLossBackwardGPU<Dtype>), dim3(CAFFE_GET_BLOCKS(nthreads)),
dim3( CAFFE_CUDA_NUM_THREADS), 0, 0, nthreads, prob_data, label, bottom_diff,
outer_num_, dim, inner_num_, has_ignore_label_, ignore_label_, counts,
alpha_, gamma_);
Dtype valid_count = -1;
// Only launch another CUDA kernel if we actually need the count of valid
// outputs.
if (normalization_ == LossParameter_NormalizationMode_VALID &&
has_ignore_label_) {
caffe_gpu_asum(nthreads, counts, & valid_count);
}
const Dtype loss_weight = top[0]->cpu_diff()[0] /
get_normalizer(normalization_, valid_count);
caffe_gpu_scal(prob_.count(), loss_weight, bottom_diff);
}
}
INSTANTIATE_LAYER_GPU_FUNCS(FocalLossLayer);
} // namespace caffe
| d834ef69085f827535f641feeaac8b55c1a8f5d1.cu | #include <algorithm>
#include <cfloat>
#include <vector>
#include "caffe/layers/focal_loss_layer.hpp"
#include "caffe/util/math_functions.hpp"
namespace caffe {
template <typename Dtype>
__global__ void FocalLossForwardGPU(const int nthreads,
const Dtype * prob_data, const Dtype * label, Dtype* loss,
const int num, const int dim, const int spatial_dim,
const bool has_ignore_label_, const int ignore_label_,
Dtype * counts, const Dtype alpha_, const Dtype gamma_) {
CUDA_KERNEL_LOOP(index, nthreads) {
const int n = index / spatial_dim;
const int s = index % spatial_dim;
const int label_value = static_cast<int>(label[n * spatial_dim + s]);
if (has_ignore_label_ && label_value == ignore_label_) {
loss[index] = 0;
counts[index] = 0;
} else {
const Dtype pk = max(prob_data[n * dim + label_value * spatial_dim + s], Dtype(FLT_MIN));
loss[index] = -1 * alpha_ * powf(1 - pk, gamma_) * log(pk);
counts[index] = 1;
}
}
}
template <typename Dtype>
void FocalLossLayer<Dtype>::Forward_gpu(
const vector<Blob<Dtype> *> & bottom, const vector<Blob<Dtype> *> & top) {
softmax_layer_->Forward(softmax_bottom_vec_, softmax_top_vec_);
const Dtype * prob_data = prob_.gpu_data();
const Dtype * label = bottom[1]->gpu_data();
const int dim = prob_.count() / outer_num_;
const int nthreads = outer_num_ * inner_num_;
Dtype * loss_data = bottom[0]->mutable_gpu_diff();
Dtype * counts = prob_.mutable_gpu_diff();
FocalLossForwardGPU<Dtype><<<CAFFE_GET_BLOCKS(nthreads),
CAFFE_CUDA_NUM_THREADS>>>(nthreads, prob_data, label, loss_data,
outer_num_, dim, inner_num_, has_ignore_label_, ignore_label_, counts, alpha_, gamma_);
Dtype loss;
caffe_gpu_asum(nthreads, loss_data, &loss);
Dtype valid_count = -1;
if (normalization_ == LossParameter_NormalizationMode_VALID &&
has_ignore_label_) {
caffe_gpu_asum(nthreads, counts, & valid_count);
}
top[0]->mutable_cpu_data()[0] = loss / get_normalizer(normalization_,
valid_count);
if (2 == top.size()) {
top[1]->ShareData(prob_);
}
}
template <typename Dtype>
__global__ void FocalLossBackwardGPU(const int nthreads, const Dtype * prob_data,
const Dtype * label, Dtype * bottom_diff, const int num, const int dim,
const int spatial_dim, const bool has_ignore_label_,
const int ignore_label_, Dtype * counts, const Dtype alpha_, const Dtype gamma_) {
const int channels = dim / spatial_dim;
CUDA_KERNEL_LOOP(index, nthreads) {
const int n = index / spatial_dim;
const int s = index % spatial_dim;
const int label_value = static_cast<int>(label[n * spatial_dim + s]);
if (has_ignore_label_ && label_value == ignore_label_) {
for (int c = 0; c < channels; ++c) {
bottom_diff[n * dim + c * spatial_dim + s] = 0;
}
counts[index] = 0;
} else {
int c = 0;
const Dtype pk = max(prob_data[n * dim + label_value * spatial_dim + s], Dtype(FLT_MIN));
for (c = 0; c < label_value; ++c) {
const Dtype pj = max(prob_data[n * dim + c * spatial_dim + s], Dtype(FLT_MIN));
bottom_diff[n * dim + c * spatial_dim + s] = Dtype(
-1 * alpha_ * (gamma_ * pow(1 - pk, gamma_ - 1) * pk * pj * log(pk) - pow(1 - pk, gamma_) * pj));
}
bottom_diff[n * dim + c * spatial_dim + s] = Dtype(
-1 * alpha_ * (-1 * gamma_ * pow(1 - pk, gamma_) * pk * log(pk) + pow(1 - pk, gamma_ + 1)));
c++;
for ( ; c < channels; ++c) {
const Dtype pj = max(prob_data[n * dim + c * spatial_dim + s], Dtype(FLT_MIN));
bottom_diff[n * dim + c * spatial_dim + s] = Dtype(
-1 * alpha_ * (gamma_ * pow(1 - pk, gamma_ - 1) * pk * pj * log(pk) - pow(1 - pk, gamma_) * pj));
}
counts[index] = 1;
}
}
}
template <typename Dtype>
void FocalLossLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype> *> & top,
const vector<bool>& propagate_down, const vector<Blob<Dtype> *> & bottom) {
if (propagate_down[1]) {
LOG(FATAL) << this->type()
<< " Layer cannot backpropagate to label inputs.";
}
if (propagate_down[0]) {
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
const Dtype* prob_data = prob_.gpu_data();
const Dtype* top_data = top[0]->gpu_data();
const Dtype* label = bottom[1]->gpu_data();
const int dim = prob_.count() / outer_num_;
const int nthreads = outer_num_ * inner_num_;
// Since this memory is nerver used for anything else,
// we use to to avoid allocating new GPU memory
Dtype* counts = prob_.mutable_gpu_diff();
// NOLINT_NEXT_LINE(whitespace/operators)
FocalLossBackwardGPU<Dtype><<<CAFFE_GET_BLOCKS(nthreads),
CAFFE_CUDA_NUM_THREADS>>>(nthreads, prob_data, label, bottom_diff,
outer_num_, dim, inner_num_, has_ignore_label_, ignore_label_, counts,
alpha_, gamma_);
Dtype valid_count = -1;
// Only launch another CUDA kernel if we actually need the count of valid
// outputs.
if (normalization_ == LossParameter_NormalizationMode_VALID &&
has_ignore_label_) {
caffe_gpu_asum(nthreads, counts, & valid_count);
}
const Dtype loss_weight = top[0]->cpu_diff()[0] /
get_normalizer(normalization_, valid_count);
caffe_gpu_scal(prob_.count(), loss_weight, bottom_diff);
}
}
INSTANTIATE_LAYER_GPU_FUNCS(FocalLossLayer);
} // namespace caffe
|
4d211b62d4f1de635aae29b216c4c5ab65e701c4.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#define THREADS_PER_BLOCK 1024
#define DATA_TYPE float
// GPU error check
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(hipError_t code, const char *file, int line, bool abort=true){
if (code != hipSuccess) {
fprintf(stderr,"GPUassert: %s %s %d\n", hipGetErrorString(code), file, line);
if (abort) exit(code);
}
}
__global__ void power_microbench(float *data1, float *data2, uint32_t *data3, uint32_t *data4, float *res, int div, unsigned iterations) {
int gid = blockIdx.x*blockDim.x + threadIdx.x;
register float s1 = data1[gid];
register float s2 = data2[gid];
register uint32_t s3 = data3[gid];
register uint32_t s4 = data4[gid];
register float result = 0;
register float Value1=0;
register uint32_t Value2=0;
// synchronize all threads
asm volatile ("bar.sync 0;");
if((gid%32)<div){
//ROI
#pragma unroll 100
for (unsigned j=0 ; j<iterations ; ++j) {
asm volatile ("{\t\n"
"add.f32 %0, %1, %0;\n\t"
"add.u32 %2, %3, %2;\n\t"
"add.u32 %2, %3, %2;\n\t"
// "add.u32 %2, %2, %0;\n\t"
// "mul.lo.u32 %1, %0, %2;\n\t"
"fma.rn.f32 %1, %1, %1 , %0;\n\t"
"mad.lo.u32 %3, %3, %3 , %2;\n\t"
"}" : "+f"(Value1),"+f"(s1),"+r"(s3),"+r"(Value2)
);
// result=s1+s2;
// Value2=s1-s2;
// result+=Value1;
// result*=Value1;
// Value1=Value2+result;
// result=Value1+Value2;
}
}
// synchronize all threads
asm volatile("bar.sync 0;");
// write data back to memory
res[gid] = Value1 + (float)Value2;
}
int main(int argc, char** argv){
unsigned iterations;
int blocks;
int div;
if (argc != 4){
fprintf(stderr,"usage: %s #iterations #cores #ActiveThreadsperWarp\n",argv[0]);
exit(1);
}
else {
iterations = atoi(argv[1]);
blocks = atoi(argv[2]);
div = atoi(argv[3]);
}
printf("Power Microbenchmarks with iterations %u\n",iterations);
int total_threads = THREADS_PER_BLOCK*blocks;
DATA_TYPE *data1 = (DATA_TYPE*) malloc(total_threads*sizeof(DATA_TYPE));
DATA_TYPE *data2 = (DATA_TYPE*) malloc(total_threads*sizeof(DATA_TYPE));
uint32_t *data3 = (uint32_t*) malloc(total_threads*sizeof(uint32_t));
uint32_t *data4 = (uint32_t*) malloc(total_threads*sizeof(uint32_t));
DATA_TYPE *res = (DATA_TYPE*) malloc(total_threads*sizeof(DATA_TYPE));
DATA_TYPE *data1_g;
DATA_TYPE *data2_g;
uint32_t *data3_g;
uint32_t *data4_g;
DATA_TYPE *res_g;
for (uint32_t i=0; i<total_threads; i++) {
srand((unsigned)time(0));
data1[i] = (DATA_TYPE) rand() / RAND_MAX;
srand((unsigned)time(0));
data2[i] = (DATA_TYPE) rand() / RAND_MAX;
srand((unsigned)time(0));
data3[i] = (uint32_t) rand() / RAND_MAX;
srand((unsigned)time(0));
data4[i] = (uint32_t) rand() / RAND_MAX;
}
gpuErrchk( hipMalloc(&data1_g, total_threads*sizeof(DATA_TYPE)) );
gpuErrchk( hipMalloc(&data2_g, total_threads*sizeof(DATA_TYPE)) );
gpuErrchk( hipMalloc(&data3_g, total_threads*sizeof(uint32_t)) );
gpuErrchk( hipMalloc(&data4_g, total_threads*sizeof(uint32_t)) );
gpuErrchk( hipMalloc(&res_g, total_threads*sizeof(DATA_TYPE)) );
gpuErrchk( hipMemcpy(data1_g, data1, total_threads*sizeof(DATA_TYPE), hipMemcpyHostToDevice) );
gpuErrchk( hipMemcpy(data2_g, data2, total_threads*sizeof(DATA_TYPE), hipMemcpyHostToDevice) );
gpuErrchk( hipMemcpy(data3_g, data3, total_threads*sizeof(uint32_t), hipMemcpyHostToDevice) );
gpuErrchk( hipMemcpy(data4_g, data4, total_threads*sizeof(uint32_t), hipMemcpyHostToDevice) );
hipLaunchKernelGGL((
power_microbench), dim3(blocks),dim3(THREADS_PER_BLOCK), 0, 0, data1_g, data2_g, data3_g, data4_g, res_g, div, iterations);
gpuErrchk( hipPeekAtLastError() );
gpuErrchk( hipMemcpy(res, res_g, total_threads*sizeof(DATA_TYPE), hipMemcpyDeviceToHost) );
hipFree(data1_g);
hipFree(data2_g);
hipFree(data3_g);
hipFree(data4_g);
hipFree(res_g);
free(data1);
free(data2);
free(data3);
free(data4);
free(res);
return 0;
} | 4d211b62d4f1de635aae29b216c4c5ab65e701c4.cu | #include <stdio.h>
#include <stdlib.h>
#include <cuda.h>
#define THREADS_PER_BLOCK 1024
#define DATA_TYPE float
// GPU error check
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort=true){
if (code != cudaSuccess) {
fprintf(stderr,"GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line);
if (abort) exit(code);
}
}
__global__ void power_microbench(float *data1, float *data2, uint32_t *data3, uint32_t *data4, float *res, int div, unsigned iterations) {
int gid = blockIdx.x*blockDim.x + threadIdx.x;
register float s1 = data1[gid];
register float s2 = data2[gid];
register uint32_t s3 = data3[gid];
register uint32_t s4 = data4[gid];
register float result = 0;
register float Value1=0;
register uint32_t Value2=0;
// synchronize all threads
asm volatile ("bar.sync 0;");
if((gid%32)<div){
//ROI
#pragma unroll 100
for (unsigned j=0 ; j<iterations ; ++j) {
asm volatile ("{\t\n"
"add.f32 %0, %1, %0;\n\t"
"add.u32 %2, %3, %2;\n\t"
"add.u32 %2, %3, %2;\n\t"
// "add.u32 %2, %2, %0;\n\t"
// "mul.lo.u32 %1, %0, %2;\n\t"
"fma.rn.f32 %1, %1, %1 , %0;\n\t"
"mad.lo.u32 %3, %3, %3 , %2;\n\t"
"}" : "+f"(Value1),"+f"(s1),"+r"(s3),"+r"(Value2)
);
// result=s1+s2;
// Value2=s1-s2;
// result+=Value1;
// result*=Value1;
// Value1=Value2+result;
// result=Value1+Value2;
}
}
// synchronize all threads
asm volatile("bar.sync 0;");
// write data back to memory
res[gid] = Value1 + (float)Value2;
}
int main(int argc, char** argv){
unsigned iterations;
int blocks;
int div;
if (argc != 4){
fprintf(stderr,"usage: %s #iterations #cores #ActiveThreadsperWarp\n",argv[0]);
exit(1);
}
else {
iterations = atoi(argv[1]);
blocks = atoi(argv[2]);
div = atoi(argv[3]);
}
printf("Power Microbenchmarks with iterations %u\n",iterations);
int total_threads = THREADS_PER_BLOCK*blocks;
DATA_TYPE *data1 = (DATA_TYPE*) malloc(total_threads*sizeof(DATA_TYPE));
DATA_TYPE *data2 = (DATA_TYPE*) malloc(total_threads*sizeof(DATA_TYPE));
uint32_t *data3 = (uint32_t*) malloc(total_threads*sizeof(uint32_t));
uint32_t *data4 = (uint32_t*) malloc(total_threads*sizeof(uint32_t));
DATA_TYPE *res = (DATA_TYPE*) malloc(total_threads*sizeof(DATA_TYPE));
DATA_TYPE *data1_g;
DATA_TYPE *data2_g;
uint32_t *data3_g;
uint32_t *data4_g;
DATA_TYPE *res_g;
for (uint32_t i=0; i<total_threads; i++) {
srand((unsigned)time(0));
data1[i] = (DATA_TYPE) rand() / RAND_MAX;
srand((unsigned)time(0));
data2[i] = (DATA_TYPE) rand() / RAND_MAX;
srand((unsigned)time(0));
data3[i] = (uint32_t) rand() / RAND_MAX;
srand((unsigned)time(0));
data4[i] = (uint32_t) rand() / RAND_MAX;
}
gpuErrchk( cudaMalloc(&data1_g, total_threads*sizeof(DATA_TYPE)) );
gpuErrchk( cudaMalloc(&data2_g, total_threads*sizeof(DATA_TYPE)) );
gpuErrchk( cudaMalloc(&data3_g, total_threads*sizeof(uint32_t)) );
gpuErrchk( cudaMalloc(&data4_g, total_threads*sizeof(uint32_t)) );
gpuErrchk( cudaMalloc(&res_g, total_threads*sizeof(DATA_TYPE)) );
gpuErrchk( cudaMemcpy(data1_g, data1, total_threads*sizeof(DATA_TYPE), cudaMemcpyHostToDevice) );
gpuErrchk( cudaMemcpy(data2_g, data2, total_threads*sizeof(DATA_TYPE), cudaMemcpyHostToDevice) );
gpuErrchk( cudaMemcpy(data3_g, data3, total_threads*sizeof(uint32_t), cudaMemcpyHostToDevice) );
gpuErrchk( cudaMemcpy(data4_g, data4, total_threads*sizeof(uint32_t), cudaMemcpyHostToDevice) );
power_microbench<<<blocks,THREADS_PER_BLOCK>>>(data1_g, data2_g, data3_g, data4_g, res_g, div, iterations);
gpuErrchk( cudaPeekAtLastError() );
gpuErrchk( cudaMemcpy(res, res_g, total_threads*sizeof(DATA_TYPE), cudaMemcpyDeviceToHost) );
cudaFree(data1_g);
cudaFree(data2_g);
cudaFree(data3_g);
cudaFree(data4_g);
cudaFree(res_g);
free(data1);
free(data2);
free(data3);
free(data4);
free(res);
return 0;
} |
ef84a59da189b092ab4d6b4787365cc8ebe85c71.hip | // !!! This is a file automatically generated by hipify!!!
/*******************************************************************************
* Copyright (c) 2015-2018 Skymind, Inc.
*
* This program and the accompanying materials are made available under the
* terms of the Apache License, Version 2.0 which is available at
* https://www.apache.org/licenses/LICENSE-2.0.
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
* SPDX-License-Identifier: Apache-2.0
******************************************************************************/
//
// @author [email protected]
//
#include <system/op_boilerplate.h>
#include <loops/broadcasting_bool.h>
#include <loops/legacy_ops.h>
#include <types/types.h>
#include <system/Environment.h>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <string>
#include <stdexcept>
#include <helpers/StringUtils.h>
using namespace simdOps;
//////////////////////////////////////////////////////////////////////////
template<typename X, typename Z, typename OpClass>
static __global__ void broadcastBoolSimple(
void const* x,
Nd4jLong const* xShapeInfo,
void const* y,
Nd4jLong const* yShapeInfo,
void *z,
Nd4jLong const* zShapeInfo,
void *extraParams,
int *dimension,
int dimensionLength, Nd4jLong const* tadOnlyShapeInfo, Nd4jLong const* tadOffsets, Nd4jLong const* tadOnlyShapeInfoZ, Nd4jLong const* tadOffsetsZ) {
functions::broadcast::BroadcastBool<X, Z>::template transformCuda<OpClass>(x,xShapeInfo,y,yShapeInfo,z,zShapeInfo, extraParams, dimension,dimensionLength,tadOnlyShapeInfo,tadOffsets,tadOnlyShapeInfoZ,tadOffsetsZ);
}
//////////////////////////////////////////////////////////////////////////
template<typename X, typename Z, typename OpClass>
static __global__ void broadcastBoolSimple(const void const* x, const Nd4jLong const* xShapeInfo,
const void const* y, const Nd4jLong const* yShapeInfo,
void *z, const Nd4jLong const* zShapeInfo,
void *extraParams) {
functions::broadcast::BroadcastBool<X, Z>::template transformCuda<OpClass>(x, xShapeInfo, y, yShapeInfo, z, zShapeInfo, extraParams);
}
//////////////////////////////////////////////////////////////////////////
template<typename X, typename Z, typename OpClass>
static __global__ void broadcastBoolInverseSimple(
void const* x,
Nd4jLong const* xShapeInfo,
void const* y,
Nd4jLong const* yShapeInfo,
void *z,
Nd4jLong const* zShapeInfo,
void *extraParams,
int *dimension,
int dimensionLength, Nd4jLong const* tadOnlyShapeInfo, Nd4jLong const* tadOffsets, Nd4jLong const* tadOnlyShapeInfoZ, Nd4jLong const* tadOffsetsZ) {
functions::broadcast::BroadcastBool<X, Z>::template transformInverseCuda<OpClass>(x,xShapeInfo,y,yShapeInfo,z,zShapeInfo,extraParams,dimension,dimensionLength,tadOnlyShapeInfo,tadOffsets,tadOnlyShapeInfoZ,tadOffsetsZ);
}
namespace functions {
namespace broadcast {
//////////////////////////////////////////////////////////////////////////
template<typename X, typename Z>
template <typename OpClass>
__host__ void BroadcastBool<X,Z>::intermediateBroadcast(dim3 launchDims, hipStream_t *stream, void const* x, Nd4jLong const* xShapeInfo, void const* y, Nd4jLong const* yShapeInfo, void* z, Nd4jLong const* zShapeInfo, void *extraParams, int *dimension, int dimensionLength, Nd4jLong const* tadOnlyShapeInfo, Nd4jLong const* tadOffsets, Nd4jLong const* tadOnlyShapeInfoZ, Nd4jLong const* tadOffsetsZ) {
hipLaunchKernelGGL(( broadcastBoolSimple<X, Z, OpClass>), dim3(launchDims.x), dim3(launchDims.y), launchDims.z, *stream, x, xShapeInfo, y, yShapeInfo, z, zShapeInfo, extraParams, dimension, dimensionLength, tadOnlyShapeInfo, tadOffsets, tadOnlyShapeInfoZ, tadOffsetsZ);
sd::DebugHelper::checkErrorCode(stream, "intermediateBroadcastBool(...) failed");
}
//////////////////////////////////////////////////////////////////////////
template<typename X, typename Z>
template <typename OpClass>
__host__ void BroadcastBool<X,Z>::intermediateBroadcast(dim3 launchDims, hipStream_t *stream,
const void *x, const Nd4jLong *xShapeInfo,
const void *y, const Nd4jLong *yShapeInfo,
void *z, const Nd4jLong *zShapeInfo,
void *extraParams) {
hipLaunchKernelGGL(( broadcastBoolSimple<X, Z, OpClass>), dim3(launchDims.x), dim3(launchDims.y), launchDims.z, *stream, x, xShapeInfo, y, yShapeInfo, z, zShapeInfo, extraParams);
sd::DebugHelper::checkErrorCode(stream, "intermediateBroadcastBool(...) failed");
}
//////////////////////////////////////////////////////////////////////////
template<typename X, typename Y>
__host__ void BroadcastBool<X,Y>::execBroadcast(dim3 launchDims, hipStream_t *stream, int opNum, void const* x, Nd4jLong const* xShapeInfo, void const* y, Nd4jLong const* yShapeInfo, void *z, Nd4jLong const* zShapeInfo, void *extraParams, int *dimension, int dimensionLength, Nd4jLong const* tadOnlyShapeInfo, Nd4jLong const* tadOffsets, Nd4jLong const* tadOnlyShapeInfoZ, Nd4jLong const* tadOffsetsZ) {
DISPATCH_BY_OPNUM_TT(intermediateBroadcast, PARAMS(launchDims, stream, x, xShapeInfo, y, yShapeInfo, z, zShapeInfo, extraParams, dimension, dimensionLength, tadOnlyShapeInfo, tadOffsets, tadOnlyShapeInfoZ, tadOffsetsZ), OPS_A(BROADCAST_BOOL_OPS))
DEBUG_KERNEL(stream, opNum);
}
//////////////////////////////////////////////////////////////////////////
template<typename X, typename Y>
__host__ void BroadcastBool<X,Y>::execBroadcast(dim3 launchDims, hipStream_t *stream, const int opNum,
const void *x, const Nd4jLong *xShapeInfo,
const void *y, const Nd4jLong *yShapeInfo,
void *z, const Nd4jLong *zShapeInfo,
void *extraParams) {
DISPATCH_BY_OPNUM_TT(intermediateBroadcast, PARAMS(launchDims, stream, x, xShapeInfo, y, yShapeInfo, z, zShapeInfo, extraParams), OPS_A(BROADCAST_BOOL_OPS))
DEBUG_KERNEL(stream, opNum);
}
//////////////////////////////////////////////////////////////////////////
template<typename X, typename Z>
template <typename OpClass>
__host__ void BroadcastBool<X,Z>::intermediateInverseBroadcast(dim3 launchDims, hipStream_t *stream, void const* x, Nd4jLong const* xShapeInfo, void const* y, Nd4jLong const* yShapeInfo, void *z, Nd4jLong const* zShapeInfo, void *extraParams, int *dimension, int dimensionLength, Nd4jLong const* tadOnlyShapeInfo, Nd4jLong const* tadOffsets, Nd4jLong const* tadOnlyShapeInfoZ, Nd4jLong const* tadOffsetsZ) {
hipLaunchKernelGGL(( broadcastBoolInverseSimple<X, Z, OpClass>), dim3(launchDims.x), dim3(launchDims.y), launchDims.z, *stream, x, xShapeInfo, y, yShapeInfo, z, zShapeInfo, extraParams, dimension, dimensionLength, tadOnlyShapeInfo, tadOffsets, tadOnlyShapeInfoZ, tadOffsetsZ);
sd::DebugHelper::checkErrorCode(stream, "intermediateBroadcastBool(...) failed");
}
//////////////////////////////////////////////////////////////////////////
template<typename X, typename Y>
__host__ void BroadcastBool<X,Y>::execInverseBroadcast(dim3 launchDims, hipStream_t *stream, int opNum, void const* x, Nd4jLong const* xShapeInfo, void const* y, Nd4jLong const* yShapeInfo, void *z, Nd4jLong const* zShapeInfo, void *extraParams, int *dimension, int dimensionLength, Nd4jLong const* tadOnlyShapeInfo, Nd4jLong const* tadOffsets, Nd4jLong const* tadOnlyShapeInfoZ, Nd4jLong const* tadOffsetsZ) {
DISPATCH_BY_OPNUM_TT(intermediateInverseBroadcast, PARAMS(launchDims, stream, x, xShapeInfo, y, yShapeInfo, z, zShapeInfo, extraParams, dimension, dimensionLength, tadOnlyShapeInfo, tadOffsets, tadOnlyShapeInfoZ, tadOffsetsZ), OPS_A(BROADCAST_BOOL_OPS))
DEBUG_KERNEL(stream, opNum);
}
//////////////////////////////////////////////////////////////////////////
template<typename X, typename Z>
template <typename OpType>
__device__ void BroadcastBool<X,Z>::transformInverseCuda(
void const* vx, Nd4jLong const* xShapeInfo,
void const* vy, Nd4jLong const* yShapeInfo,
void *vz, Nd4jLong const* zShapeInfo,
void *vextraParams,
int *dimension, int dimensionLength,
Nd4jLong const* tadOnlyShapeInfo, Nd4jLong const* tadOffsets, Nd4jLong const* tadOnlyShapeInfoZ, Nd4jLong const* tadOffsetsZ) {
if (tadOnlyShapeInfoZ == nullptr) {
tadOnlyShapeInfoZ = tadOnlyShapeInfo;
tadOffsetsZ = tadOffsets;
}
auto x = reinterpret_cast<X const*>(vx);
auto y = reinterpret_cast<X const*>(vy);
auto z = reinterpret_cast<Z*>(vz);
auto extraParams = reinterpret_cast<X*>(vextraParams);
//decompose in to several sub tads after
//moving all dimensions (in sorted order)
//to the back.
//permuted version of the x shape info for setting up the tad problem
__shared__ Nd4jLong tadLength;
__shared__ Nd4jLong tadEWS;
__shared__ int numTads;
__shared__ Nd4jLong xEWS;
__shared__ Nd4jLong zEWS;
if (threadIdx.x == 0) {
tadLength = shape::length(tadOnlyShapeInfo);//shape::tadLength(xShapeInfo, dimension, dimensionLength);
tadEWS = shape::elementWiseStride(tadOnlyShapeInfo);
numTads = shape::length(yShapeInfo) / tadLength;
xEWS = shape::elementWiseStride(xShapeInfo);
zEWS = shape::elementWiseStride(tadOnlyShapeInfoZ);
}
__syncthreads();
for (int r = blockIdx.x; r < numTads; r += gridDim.x) {
auto rZ = z + tadOffsetsZ[r];
auto rY = y + tadOffsets[r];
if(tadEWS > 0 && zEWS > 0 && xEWS > 0 && dimensionLength == 1) {
for (int i = threadIdx.x; i < tadLength; i+= blockDim.x)
rZ[i * zEWS] = OpType::op(x[i * xEWS], rY[i * tadEWS], extraParams);
}
else {
// it is expected that x and z tads and y array all have the same length
for (Nd4jLong i = threadIdx.x; i < tadLength; i+= blockDim.x) {
auto xOffset = shape::getIndexOffset(i, xShapeInfo);
auto yOffset = shape::getIndexOffset(i, tadOnlyShapeInfo);
auto zOffset = shape::getIndexOffset(i, tadOnlyShapeInfoZ);
rZ[zOffset] = OpType::op(x[xOffset], rY[yOffset], extraParams);
}
}
}
}
//////////////////////////////////////////////////////////////////////////
template<typename X, typename Z>
template <typename OpType>
__device__ void BroadcastBool<X,Z>::transformCuda(
void const* vx, Nd4jLong const* xShapeInfo,
void const* vy, Nd4jLong const* yShapeInfo,
void *vz, Nd4jLong const* zShapeInfo,
void *vextraParams,
int *dimension, int dimensionLength,
Nd4jLong const* tadOnlyShapeInfo, Nd4jLong const* tadOffsets, Nd4jLong const* tadOnlyShapeInfoZ, Nd4jLong const* tadOffsetsZ) {
if (tadOnlyShapeInfoZ == nullptr) {
tadOnlyShapeInfoZ = tadOnlyShapeInfo;
tadOffsetsZ = tadOffsets;
}
auto x = reinterpret_cast<X const*>(vx);
auto y = reinterpret_cast<X const*>(vy);
auto z = reinterpret_cast<Z*>(vz);
auto extraParams = reinterpret_cast<X*>(vextraParams);
//decompose in to several sub tads after
//moving all dimensions (in sorted order)
//to the back.
//permuted version of the x shape info for setting up the tad problem
__shared__ Nd4jLong tadLength;
__shared__ Nd4jLong tadEWS;
__shared__ int numTads;
__shared__ Nd4jLong yEWS;
__shared__ Nd4jLong zEWS;
if (threadIdx.x == 0) {
tadLength = shape::length(tadOnlyShapeInfo);//shape::tadLength(xShapeInfo, dimension, dimensionLength);
tadEWS = shape::elementWiseStride(tadOnlyShapeInfo);
numTads = shape::length(xShapeInfo) / tadLength;
yEWS = shape::elementWiseStride(yShapeInfo);
zEWS = shape::elementWiseStride(tadOnlyShapeInfoZ);
}
__syncthreads();
__shared__ Z *rZ;
__shared__ X const* rX;
for (int r = blockIdx.x; r < numTads; r += gridDim.x) {
if (threadIdx.x == 0) {
rZ = z + tadOffsetsZ[r];
rX = x + tadOffsets[r];
}
__syncthreads();
if(tadEWS > 0 && zEWS > 0 && yEWS > 0 && dimensionLength == 1) {
for (int i = threadIdx.x; i < tadLength; i+= blockDim.x)
rZ[i * zEWS] = OpType::op(rX[i * tadEWS], y[i * yEWS], extraParams);
}
else {
// it is expected that x and z tads and y array all have the same length
for (Nd4jLong i = threadIdx.x; i < tadLength; i+= blockDim.x) {
auto xOffset = shape::getIndexOffset(i, tadOnlyShapeInfo);
auto yOffset = shape::getIndexOffset(i, yShapeInfo);
auto zOffset = shape::getIndexOffset(i, tadOnlyShapeInfoZ);
rZ[zOffset] = OpType::op(rX[xOffset], y[yOffset], extraParams);
}
}
}
}
//////////////////////////////////////////////////////////////////////////
template<typename X, typename Z>
template <typename OpType>
__device__ void BroadcastBool<X,Z>::transformCuda(const void *vx, const Nd4jLong *xShapeInfo,
const void *vy, const Nd4jLong *yShapeInfo,
void *vz, const Nd4jLong *zShapeInfo,
void *vextraParams) {
const X* x = reinterpret_cast<const X*>(vx);
const X* y = reinterpret_cast<const X*>(vy);
Z* z = reinterpret_cast<Z*>(vz);
auto extraParams = reinterpret_cast<X*>(vextraParams);
__shared__ Nd4jLong zLen;
__shared__ int rank;
__shared__ bool xzSameOffsets, yzSameOffsets;
if (threadIdx.x == 0) {
zLen = shape::length(zShapeInfo);
rank = shape::rank(zShapeInfo);
xzSameOffsets = shape::haveSameShapeAndStrides(xShapeInfo, zShapeInfo);
yzSameOffsets = shape::haveSameShapeAndStrides(yShapeInfo, zShapeInfo);
}
__syncthreads();
const auto tid = blockIdx.x * blockDim.x + threadIdx.x;
int xCoords[MAX_RANK], yCoords[MAX_RANK], zCoords[MAX_RANK];
for (int i = tid; i < zLen; i += blockDim.x * gridDim.x) {
shape::index2coords(i, zShapeInfo, zCoords);
for (uint j = 0; j < rank; ++j) {
xCoords[j] = shape::sizeAt(xShapeInfo, j) == 1 ? 0 : zCoords[j];
yCoords[j] = shape::sizeAt(yShapeInfo, j) == 1 ? 0 : zCoords[j];
}
const auto zOffset = shape::getOffset(zShapeInfo, zCoords);
const auto xOffset = xzSameOffsets ? zOffset : shape::getOffset(xShapeInfo, xCoords);
const auto yOffset = yzSameOffsets ? zOffset : shape::getOffset(yShapeInfo, yCoords);
z[zOffset] = OpType::op(x[xOffset], y[yOffset], extraParams);
}
}
BUILD_DOUBLE_TEMPLATE(template class ND4J_EXPORT BroadcastBool, , LIBND4J_TYPES, BOOL_TYPES);
}
} | ef84a59da189b092ab4d6b4787365cc8ebe85c71.cu | /*******************************************************************************
* Copyright (c) 2015-2018 Skymind, Inc.
*
* This program and the accompanying materials are made available under the
* terms of the Apache License, Version 2.0 which is available at
* https://www.apache.org/licenses/LICENSE-2.0.
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
* SPDX-License-Identifier: Apache-2.0
******************************************************************************/
//
// @author [email protected]
//
#include <system/op_boilerplate.h>
#include <loops/broadcasting_bool.h>
#include <loops/legacy_ops.h>
#include <types/types.h>
#include <system/Environment.h>
#include <cuda.h>
#include <cuda_runtime.h>
#include <string>
#include <stdexcept>
#include <helpers/StringUtils.h>
using namespace simdOps;
//////////////////////////////////////////////////////////////////////////
template<typename X, typename Z, typename OpClass>
static __global__ void broadcastBoolSimple(
void const* x,
Nd4jLong const* xShapeInfo,
void const* y,
Nd4jLong const* yShapeInfo,
void *z,
Nd4jLong const* zShapeInfo,
void *extraParams,
int *dimension,
int dimensionLength, Nd4jLong const* tadOnlyShapeInfo, Nd4jLong const* tadOffsets, Nd4jLong const* tadOnlyShapeInfoZ, Nd4jLong const* tadOffsetsZ) {
functions::broadcast::BroadcastBool<X, Z>::template transformCuda<OpClass>(x,xShapeInfo,y,yShapeInfo,z,zShapeInfo, extraParams, dimension,dimensionLength,tadOnlyShapeInfo,tadOffsets,tadOnlyShapeInfoZ,tadOffsetsZ);
}
//////////////////////////////////////////////////////////////////////////
template<typename X, typename Z, typename OpClass>
static __global__ void broadcastBoolSimple(const void const* x, const Nd4jLong const* xShapeInfo,
const void const* y, const Nd4jLong const* yShapeInfo,
void *z, const Nd4jLong const* zShapeInfo,
void *extraParams) {
functions::broadcast::BroadcastBool<X, Z>::template transformCuda<OpClass>(x, xShapeInfo, y, yShapeInfo, z, zShapeInfo, extraParams);
}
//////////////////////////////////////////////////////////////////////////
template<typename X, typename Z, typename OpClass>
static __global__ void broadcastBoolInverseSimple(
void const* x,
Nd4jLong const* xShapeInfo,
void const* y,
Nd4jLong const* yShapeInfo,
void *z,
Nd4jLong const* zShapeInfo,
void *extraParams,
int *dimension,
int dimensionLength, Nd4jLong const* tadOnlyShapeInfo, Nd4jLong const* tadOffsets, Nd4jLong const* tadOnlyShapeInfoZ, Nd4jLong const* tadOffsetsZ) {
functions::broadcast::BroadcastBool<X, Z>::template transformInverseCuda<OpClass>(x,xShapeInfo,y,yShapeInfo,z,zShapeInfo,extraParams,dimension,dimensionLength,tadOnlyShapeInfo,tadOffsets,tadOnlyShapeInfoZ,tadOffsetsZ);
}
namespace functions {
namespace broadcast {
//////////////////////////////////////////////////////////////////////////
template<typename X, typename Z>
template <typename OpClass>
__host__ void BroadcastBool<X,Z>::intermediateBroadcast(dim3 launchDims, cudaStream_t *stream, void const* x, Nd4jLong const* xShapeInfo, void const* y, Nd4jLong const* yShapeInfo, void* z, Nd4jLong const* zShapeInfo, void *extraParams, int *dimension, int dimensionLength, Nd4jLong const* tadOnlyShapeInfo, Nd4jLong const* tadOffsets, Nd4jLong const* tadOnlyShapeInfoZ, Nd4jLong const* tadOffsetsZ) {
broadcastBoolSimple<X, Z, OpClass><<<launchDims.x, launchDims.y, launchDims.z, *stream>>>(x, xShapeInfo, y, yShapeInfo, z, zShapeInfo, extraParams, dimension, dimensionLength, tadOnlyShapeInfo, tadOffsets, tadOnlyShapeInfoZ, tadOffsetsZ);
sd::DebugHelper::checkErrorCode(stream, "intermediateBroadcastBool(...) failed");
}
//////////////////////////////////////////////////////////////////////////
template<typename X, typename Z>
template <typename OpClass>
__host__ void BroadcastBool<X,Z>::intermediateBroadcast(dim3 launchDims, cudaStream_t *stream,
const void *x, const Nd4jLong *xShapeInfo,
const void *y, const Nd4jLong *yShapeInfo,
void *z, const Nd4jLong *zShapeInfo,
void *extraParams) {
broadcastBoolSimple<X, Z, OpClass><<<launchDims.x, launchDims.y, launchDims.z, *stream>>>(x, xShapeInfo, y, yShapeInfo, z, zShapeInfo, extraParams);
sd::DebugHelper::checkErrorCode(stream, "intermediateBroadcastBool(...) failed");
}
//////////////////////////////////////////////////////////////////////////
template<typename X, typename Y>
__host__ void BroadcastBool<X,Y>::execBroadcast(dim3 launchDims, cudaStream_t *stream, int opNum, void const* x, Nd4jLong const* xShapeInfo, void const* y, Nd4jLong const* yShapeInfo, void *z, Nd4jLong const* zShapeInfo, void *extraParams, int *dimension, int dimensionLength, Nd4jLong const* tadOnlyShapeInfo, Nd4jLong const* tadOffsets, Nd4jLong const* tadOnlyShapeInfoZ, Nd4jLong const* tadOffsetsZ) {
DISPATCH_BY_OPNUM_TT(intermediateBroadcast, PARAMS(launchDims, stream, x, xShapeInfo, y, yShapeInfo, z, zShapeInfo, extraParams, dimension, dimensionLength, tadOnlyShapeInfo, tadOffsets, tadOnlyShapeInfoZ, tadOffsetsZ), OPS_A(BROADCAST_BOOL_OPS))
DEBUG_KERNEL(stream, opNum);
}
//////////////////////////////////////////////////////////////////////////
template<typename X, typename Y>
__host__ void BroadcastBool<X,Y>::execBroadcast(dim3 launchDims, cudaStream_t *stream, const int opNum,
const void *x, const Nd4jLong *xShapeInfo,
const void *y, const Nd4jLong *yShapeInfo,
void *z, const Nd4jLong *zShapeInfo,
void *extraParams) {
DISPATCH_BY_OPNUM_TT(intermediateBroadcast, PARAMS(launchDims, stream, x, xShapeInfo, y, yShapeInfo, z, zShapeInfo, extraParams), OPS_A(BROADCAST_BOOL_OPS))
DEBUG_KERNEL(stream, opNum);
}
//////////////////////////////////////////////////////////////////////////
template<typename X, typename Z>
template <typename OpClass>
__host__ void BroadcastBool<X,Z>::intermediateInverseBroadcast(dim3 launchDims, cudaStream_t *stream, void const* x, Nd4jLong const* xShapeInfo, void const* y, Nd4jLong const* yShapeInfo, void *z, Nd4jLong const* zShapeInfo, void *extraParams, int *dimension, int dimensionLength, Nd4jLong const* tadOnlyShapeInfo, Nd4jLong const* tadOffsets, Nd4jLong const* tadOnlyShapeInfoZ, Nd4jLong const* tadOffsetsZ) {
broadcastBoolInverseSimple<X, Z, OpClass><<<launchDims.x, launchDims.y, launchDims.z, *stream>>>(x, xShapeInfo, y, yShapeInfo, z, zShapeInfo, extraParams, dimension, dimensionLength, tadOnlyShapeInfo, tadOffsets, tadOnlyShapeInfoZ, tadOffsetsZ);
sd::DebugHelper::checkErrorCode(stream, "intermediateBroadcastBool(...) failed");
}
//////////////////////////////////////////////////////////////////////////
template<typename X, typename Y>
__host__ void BroadcastBool<X,Y>::execInverseBroadcast(dim3 launchDims, cudaStream_t *stream, int opNum, void const* x, Nd4jLong const* xShapeInfo, void const* y, Nd4jLong const* yShapeInfo, void *z, Nd4jLong const* zShapeInfo, void *extraParams, int *dimension, int dimensionLength, Nd4jLong const* tadOnlyShapeInfo, Nd4jLong const* tadOffsets, Nd4jLong const* tadOnlyShapeInfoZ, Nd4jLong const* tadOffsetsZ) {
DISPATCH_BY_OPNUM_TT(intermediateInverseBroadcast, PARAMS(launchDims, stream, x, xShapeInfo, y, yShapeInfo, z, zShapeInfo, extraParams, dimension, dimensionLength, tadOnlyShapeInfo, tadOffsets, tadOnlyShapeInfoZ, tadOffsetsZ), OPS_A(BROADCAST_BOOL_OPS))
DEBUG_KERNEL(stream, opNum);
}
//////////////////////////////////////////////////////////////////////////
template<typename X, typename Z>
template <typename OpType>
__device__ void BroadcastBool<X,Z>::transformInverseCuda(
void const* vx, Nd4jLong const* xShapeInfo,
void const* vy, Nd4jLong const* yShapeInfo,
void *vz, Nd4jLong const* zShapeInfo,
void *vextraParams,
int *dimension, int dimensionLength,
Nd4jLong const* tadOnlyShapeInfo, Nd4jLong const* tadOffsets, Nd4jLong const* tadOnlyShapeInfoZ, Nd4jLong const* tadOffsetsZ) {
if (tadOnlyShapeInfoZ == nullptr) {
tadOnlyShapeInfoZ = tadOnlyShapeInfo;
tadOffsetsZ = tadOffsets;
}
auto x = reinterpret_cast<X const*>(vx);
auto y = reinterpret_cast<X const*>(vy);
auto z = reinterpret_cast<Z*>(vz);
auto extraParams = reinterpret_cast<X*>(vextraParams);
//decompose in to several sub tads after
//moving all dimensions (in sorted order)
//to the back.
//permuted version of the x shape info for setting up the tad problem
__shared__ Nd4jLong tadLength;
__shared__ Nd4jLong tadEWS;
__shared__ int numTads;
__shared__ Nd4jLong xEWS;
__shared__ Nd4jLong zEWS;
if (threadIdx.x == 0) {
tadLength = shape::length(tadOnlyShapeInfo);//shape::tadLength(xShapeInfo, dimension, dimensionLength);
tadEWS = shape::elementWiseStride(tadOnlyShapeInfo);
numTads = shape::length(yShapeInfo) / tadLength;
xEWS = shape::elementWiseStride(xShapeInfo);
zEWS = shape::elementWiseStride(tadOnlyShapeInfoZ);
}
__syncthreads();
for (int r = blockIdx.x; r < numTads; r += gridDim.x) {
auto rZ = z + tadOffsetsZ[r];
auto rY = y + tadOffsets[r];
if(tadEWS > 0 && zEWS > 0 && xEWS > 0 && dimensionLength == 1) {
for (int i = threadIdx.x; i < tadLength; i+= blockDim.x)
rZ[i * zEWS] = OpType::op(x[i * xEWS], rY[i * tadEWS], extraParams);
}
else {
// it is expected that x and z tads and y array all have the same length
for (Nd4jLong i = threadIdx.x; i < tadLength; i+= blockDim.x) {
auto xOffset = shape::getIndexOffset(i, xShapeInfo);
auto yOffset = shape::getIndexOffset(i, tadOnlyShapeInfo);
auto zOffset = shape::getIndexOffset(i, tadOnlyShapeInfoZ);
rZ[zOffset] = OpType::op(x[xOffset], rY[yOffset], extraParams);
}
}
}
}
//////////////////////////////////////////////////////////////////////////
template<typename X, typename Z>
template <typename OpType>
__device__ void BroadcastBool<X,Z>::transformCuda(
void const* vx, Nd4jLong const* xShapeInfo,
void const* vy, Nd4jLong const* yShapeInfo,
void *vz, Nd4jLong const* zShapeInfo,
void *vextraParams,
int *dimension, int dimensionLength,
Nd4jLong const* tadOnlyShapeInfo, Nd4jLong const* tadOffsets, Nd4jLong const* tadOnlyShapeInfoZ, Nd4jLong const* tadOffsetsZ) {
if (tadOnlyShapeInfoZ == nullptr) {
tadOnlyShapeInfoZ = tadOnlyShapeInfo;
tadOffsetsZ = tadOffsets;
}
auto x = reinterpret_cast<X const*>(vx);
auto y = reinterpret_cast<X const*>(vy);
auto z = reinterpret_cast<Z*>(vz);
auto extraParams = reinterpret_cast<X*>(vextraParams);
//decompose in to several sub tads after
//moving all dimensions (in sorted order)
//to the back.
//permuted version of the x shape info for setting up the tad problem
__shared__ Nd4jLong tadLength;
__shared__ Nd4jLong tadEWS;
__shared__ int numTads;
__shared__ Nd4jLong yEWS;
__shared__ Nd4jLong zEWS;
if (threadIdx.x == 0) {
tadLength = shape::length(tadOnlyShapeInfo);//shape::tadLength(xShapeInfo, dimension, dimensionLength);
tadEWS = shape::elementWiseStride(tadOnlyShapeInfo);
numTads = shape::length(xShapeInfo) / tadLength;
yEWS = shape::elementWiseStride(yShapeInfo);
zEWS = shape::elementWiseStride(tadOnlyShapeInfoZ);
}
__syncthreads();
__shared__ Z *rZ;
__shared__ X const* rX;
for (int r = blockIdx.x; r < numTads; r += gridDim.x) {
if (threadIdx.x == 0) {
rZ = z + tadOffsetsZ[r];
rX = x + tadOffsets[r];
}
__syncthreads();
if(tadEWS > 0 && zEWS > 0 && yEWS > 0 && dimensionLength == 1) {
for (int i = threadIdx.x; i < tadLength; i+= blockDim.x)
rZ[i * zEWS] = OpType::op(rX[i * tadEWS], y[i * yEWS], extraParams);
}
else {
// it is expected that x and z tads and y array all have the same length
for (Nd4jLong i = threadIdx.x; i < tadLength; i+= blockDim.x) {
auto xOffset = shape::getIndexOffset(i, tadOnlyShapeInfo);
auto yOffset = shape::getIndexOffset(i, yShapeInfo);
auto zOffset = shape::getIndexOffset(i, tadOnlyShapeInfoZ);
rZ[zOffset] = OpType::op(rX[xOffset], y[yOffset], extraParams);
}
}
}
}
//////////////////////////////////////////////////////////////////////////
template<typename X, typename Z>
template <typename OpType>
__device__ void BroadcastBool<X,Z>::transformCuda(const void *vx, const Nd4jLong *xShapeInfo,
const void *vy, const Nd4jLong *yShapeInfo,
void *vz, const Nd4jLong *zShapeInfo,
void *vextraParams) {
const X* x = reinterpret_cast<const X*>(vx);
const X* y = reinterpret_cast<const X*>(vy);
Z* z = reinterpret_cast<Z*>(vz);
auto extraParams = reinterpret_cast<X*>(vextraParams);
__shared__ Nd4jLong zLen;
__shared__ int rank;
__shared__ bool xzSameOffsets, yzSameOffsets;
if (threadIdx.x == 0) {
zLen = shape::length(zShapeInfo);
rank = shape::rank(zShapeInfo);
xzSameOffsets = shape::haveSameShapeAndStrides(xShapeInfo, zShapeInfo);
yzSameOffsets = shape::haveSameShapeAndStrides(yShapeInfo, zShapeInfo);
}
__syncthreads();
const auto tid = blockIdx.x * blockDim.x + threadIdx.x;
int xCoords[MAX_RANK], yCoords[MAX_RANK], zCoords[MAX_RANK];
for (int i = tid; i < zLen; i += blockDim.x * gridDim.x) {
shape::index2coords(i, zShapeInfo, zCoords);
for (uint j = 0; j < rank; ++j) {
xCoords[j] = shape::sizeAt(xShapeInfo, j) == 1 ? 0 : zCoords[j];
yCoords[j] = shape::sizeAt(yShapeInfo, j) == 1 ? 0 : zCoords[j];
}
const auto zOffset = shape::getOffset(zShapeInfo, zCoords);
const auto xOffset = xzSameOffsets ? zOffset : shape::getOffset(xShapeInfo, xCoords);
const auto yOffset = yzSameOffsets ? zOffset : shape::getOffset(yShapeInfo, yCoords);
z[zOffset] = OpType::op(x[xOffset], y[yOffset], extraParams);
}
}
BUILD_DOUBLE_TEMPLATE(template class ND4J_EXPORT BroadcastBool, , LIBND4J_TYPES, BOOL_TYPES);
}
} |
932c16d0a3bf4aba8d80f354e6ce142965ec82bb.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "convolutionColumnsKernel_down_smp.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *d_Dst = NULL;
hipMalloc(&d_Dst, XSIZE*YSIZE);
float *d_Src = NULL;
hipMalloc(&d_Src, XSIZE*YSIZE);
int imageW = 1;
int imageH = 1;
int n_imageH = 1;
int pitch = 2;
int filter_Rad = 2;
int Halo_steps = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
convolutionColumnsKernel_down_smp), dim3(gridBlock),dim3(threadBlock), 0, 0, d_Dst,d_Src,imageW,imageH,n_imageH,pitch,filter_Rad,Halo_steps);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
convolutionColumnsKernel_down_smp), dim3(gridBlock),dim3(threadBlock), 0, 0, d_Dst,d_Src,imageW,imageH,n_imageH,pitch,filter_Rad,Halo_steps);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
convolutionColumnsKernel_down_smp), dim3(gridBlock),dim3(threadBlock), 0, 0, d_Dst,d_Src,imageW,imageH,n_imageH,pitch,filter_Rad,Halo_steps);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | 932c16d0a3bf4aba8d80f354e6ce142965ec82bb.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "convolutionColumnsKernel_down_smp.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *d_Dst = NULL;
cudaMalloc(&d_Dst, XSIZE*YSIZE);
float *d_Src = NULL;
cudaMalloc(&d_Src, XSIZE*YSIZE);
int imageW = 1;
int imageH = 1;
int n_imageH = 1;
int pitch = 2;
int filter_Rad = 2;
int Halo_steps = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
convolutionColumnsKernel_down_smp<<<gridBlock,threadBlock>>>(d_Dst,d_Src,imageW,imageH,n_imageH,pitch,filter_Rad,Halo_steps);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
convolutionColumnsKernel_down_smp<<<gridBlock,threadBlock>>>(d_Dst,d_Src,imageW,imageH,n_imageH,pitch,filter_Rad,Halo_steps);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
convolutionColumnsKernel_down_smp<<<gridBlock,threadBlock>>>(d_Dst,d_Src,imageW,imageH,n_imageH,pitch,filter_Rad,Halo_steps);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
588f5d2968859e8f88c8a0cdc6931ad75736996d.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <time.h>
#include <math.h>
#include <stdlib.h>
#include <string.h>
#include <iostream>
#include <hip/hip_runtime.h>
#include "jerasure.h"
extern "C"{
#include "gf_rand.h"
}
using namespace std;
#define talloc(type, num) (type *) malloc(sizeof(type)*(num))
texture<int, 1, hipReadModeElementType> texBDM;
__global__ void gmpe(int k, int w, int destId, long *dataDevice, long *codingDevice, int numOfLong) {
int blockNumInGrid, threadsPerBlock, threadNumInBlock, tId;
blockNumInGrid = blockIdx.x + gridDim.x * blockIdx.y;
threadsPerBlock = blockDim.x * blockDim.y;
threadNumInBlock = threadIdx.x + blockDim.x * threadIdx.y;
tId = blockNumInGrid * threadsPerBlock + threadNumInBlock;
int longIndex = tId % numOfLong;
int index, dataIdx, i, j;
long temp;
long *codPtr, *dtPtr, *innerDtPtr;
if( tId >= numOfLong)
return;
for(i=0; i<w; i++){
codPtr = codingDevice + destId * w * numOfLong + i * numOfLong;
index = destId * k * w * w + i * k * w;
temp = 0;
for(dataIdx=0; dataIdx<k; dataIdx++){
dtPtr = dataDevice + dataIdx * w * numOfLong;
for(j=0; j<w; j++){
if(tex1Dfetch(texBDM, index)){
innerDtPtr = dtPtr + j * numOfLong;
temp ^= innerDtPtr[longIndex];
}
index++;
}
}
codPtr[longIndex] = temp;
}
}
void extendCodingDevice(long *codingTemp, long *coding, int i, int m, int psize, int offset, int rows){
int k, j;
for(k=0; k<m; k++)
for(j=0; j<rows; j++)
memcpy((coding + k * psize * rows + psize * j + i * offset), (codingTemp + k * offset * rows + offset * j), sizeof(long) * offset);
}
int main(int argc, char **argv){
unsigned int m, k, w, i, j, d, r, seed, psize, round;
int *matrix, *bitmatrix, *bitmatrixDevice;
long *data, *dataDevice, *dataTemp, *coding, *codingDevice, *codingTemp;
clock_t start;
texBDM.filterMode = hipFilterModePoint;
texBDM.addressMode[0] = hipAddressModeClamp;
srand(time(NULL));
seed = rand();
MOA_Seed(seed);
if(argc != 5) {
fprintf(stderr, "Please add arguments k, m, w and size\n");
exit(1);
}
if(sscanf(argv[1], "%d", &k) == 0 || k <= 0) {
fprintf(stderr, "Wrong k. It must be strictly postive.\n");
exit(1);
}
if (sscanf(argv[2], "%d", &m) == 0 || m <= 0) {
fprintf(stderr, "Wrong m. It must be strictly positive.\n");
exit(1);
}
if (sscanf(argv[3], "%d", &w) == 0 || w <= 0 || w > 31) {
fprintf(stderr, "Wrong w. It must be between 0 and 32.\n");
exit(1);
}
if (sscanf(argv[4], "%d", &psize) == 0 || psize%sizeof(long) != 0){
fprintf(stderr, "Wrong packetsize. It must be an amount of bytes multiple of long.\n");
exit(1);
}
if((k + m) > (1 << w)) {
fprintf(stderr, "Wrong combinatio of k, m and w. The following must hold: m + k <= 2^w\n");
exit(1);
}
psize = psize/sizeof(long);
int threadPerBlock = min(psize, 1024);
int nBlocks = ceil((float)psize/threadPerBlock);
// Creating CRS matrix and BDM
matrix = talloc(int, m*k);
for (i = 0; i < m; i++) {
for (j = 0; j < k; j++) {
matrix[i*k+j] = galois_single_divide(1, i ^ (m + j), w);
}
}
bitmatrix = jerasure_matrix_to_bitmatrix(k, m, w, matrix);
// Generating fake random data
data = talloc(long , k*w*psize);
for (i = 0; i < k; i++) {
for(j=0; j< w*psize; j++)
*(data + i*psize*w + j) = 97 + rand()%26;
}
// Allocating space for coding devices
coding = talloc(long , m * w * psize);
// Allocating GPU memory
hipMalloc(&bitmatrixDevice, m*k*w*w*sizeof(int));
hipMemcpy(bitmatrixDevice, bitmatrix, m*k*w*w*sizeof(int), hipMemcpyHostToDevice);
hipChannelFormatDesc channelDesc = hipCreateChannelDesc<int>();
hipBindTexture(0, texBDM, bitmatrixDevice, channelDesc, m*k*w*w*sizeof(int));
// Computing number of rounds
size_t free, total;
hipMemGetInfo(&free, &total);
round = ceil((float)(psize * w * (k + m) * sizeof(long)) / free);
dataTemp = talloc(long , k * w * (psize/round));
codingTemp = talloc(long, m * w * (psize/round));
start = clock();
for(i = 0; i < round; i++){
// load data chunks when needed
if(round > 1){
for(d = 0; d < k; d++)
for(r = 0; r < w; r++)
memcpy((dataTemp + d * w *(psize/round) + r * (psize/round)), (data + d * w * psize + i * psize/round + r * psize), sizeof(long) * (psize/round));
hipMalloc(&dataDevice, k * w * (psize/round) * sizeof(long));
hipMalloc(&codingDevice, m * w * (psize/round) * sizeof(long));
hipMemcpy(dataDevice, dataTemp, k * w * (psize/round) * sizeof(long), hipMemcpyHostToDevice);
hipMemcpy(codingDevice, coding, m * w * (psize/round) * sizeof(long), hipMemcpyHostToDevice);
} //else load all the data
else{
hipMalloc(&dataDevice, k * w * (psize/round) * sizeof(long));
hipMalloc(&codingDevice, m * w * (psize/round) * sizeof(long));
hipMemcpy(dataDevice, data, k * w * (psize/round) * sizeof(long), hipMemcpyHostToDevice);
hipMemcpy(codingDevice, codingTemp, m * w * (psize/round) * sizeof(long), hipMemcpyHostToDevice);
}
for(j = 0; j < m; j++)
hipLaunchKernelGGL(( gmpe), dim3(nBlocks), dim3(threadPerBlock), 0, 0, k, w, j, dataDevice, codingDevice, (psize/round));
// copy coding back to main memory
hipDeviceSynchronize();
hipMemcpy(codingTemp, codingDevice, m * w * (psize/round) * sizeof(long), hipMemcpyDeviceToHost);
extendCodingDevice(codingTemp, coding, i, m, psize, (psize/round), w);
hipFree(dataDevice);
hipFree(codingDevice);
}
printf("Encoding complete, time elapsed: %.8fs\n", (clock() - (float)start) / CLOCKS_PER_SEC);
hipUnbindTexture(texBDM);
return 0;
}
| 588f5d2968859e8f88c8a0cdc6931ad75736996d.cu | #include <stdio.h>
#include <time.h>
#include <math.h>
#include <stdlib.h>
#include <string.h>
#include <iostream>
#include <cuda.h>
#include "jerasure.h"
extern "C"{
#include "gf_rand.h"
}
using namespace std;
#define talloc(type, num) (type *) malloc(sizeof(type)*(num))
texture<int, 1, cudaReadModeElementType> texBDM;
__global__ void gmpe(int k, int w, int destId, long *dataDevice, long *codingDevice, int numOfLong) {
int blockNumInGrid, threadsPerBlock, threadNumInBlock, tId;
blockNumInGrid = blockIdx.x + gridDim.x * blockIdx.y;
threadsPerBlock = blockDim.x * blockDim.y;
threadNumInBlock = threadIdx.x + blockDim.x * threadIdx.y;
tId = blockNumInGrid * threadsPerBlock + threadNumInBlock;
int longIndex = tId % numOfLong;
int index, dataIdx, i, j;
long temp;
long *codPtr, *dtPtr, *innerDtPtr;
if( tId >= numOfLong)
return;
for(i=0; i<w; i++){
codPtr = codingDevice + destId * w * numOfLong + i * numOfLong;
index = destId * k * w * w + i * k * w;
temp = 0;
for(dataIdx=0; dataIdx<k; dataIdx++){
dtPtr = dataDevice + dataIdx * w * numOfLong;
for(j=0; j<w; j++){
if(tex1Dfetch(texBDM, index)){
innerDtPtr = dtPtr + j * numOfLong;
temp ^= innerDtPtr[longIndex];
}
index++;
}
}
codPtr[longIndex] = temp;
}
}
void extendCodingDevice(long *codingTemp, long *coding, int i, int m, int psize, int offset, int rows){
int k, j;
for(k=0; k<m; k++)
for(j=0; j<rows; j++)
memcpy((coding + k * psize * rows + psize * j + i * offset), (codingTemp + k * offset * rows + offset * j), sizeof(long) * offset);
}
int main(int argc, char **argv){
unsigned int m, k, w, i, j, d, r, seed, psize, round;
int *matrix, *bitmatrix, *bitmatrixDevice;
long *data, *dataDevice, *dataTemp, *coding, *codingDevice, *codingTemp;
clock_t start;
texBDM.filterMode = cudaFilterModePoint;
texBDM.addressMode[0] = cudaAddressModeClamp;
srand(time(NULL));
seed = rand();
MOA_Seed(seed);
if(argc != 5) {
fprintf(stderr, "Please add arguments k, m, w and size\n");
exit(1);
}
if(sscanf(argv[1], "%d", &k) == 0 || k <= 0) {
fprintf(stderr, "Wrong k. It must be strictly postive.\n");
exit(1);
}
if (sscanf(argv[2], "%d", &m) == 0 || m <= 0) {
fprintf(stderr, "Wrong m. It must be strictly positive.\n");
exit(1);
}
if (sscanf(argv[3], "%d", &w) == 0 || w <= 0 || w > 31) {
fprintf(stderr, "Wrong w. It must be between 0 and 32.\n");
exit(1);
}
if (sscanf(argv[4], "%d", &psize) == 0 || psize%sizeof(long) != 0){
fprintf(stderr, "Wrong packetsize. It must be an amount of bytes multiple of long.\n");
exit(1);
}
if((k + m) > (1 << w)) {
fprintf(stderr, "Wrong combinatio of k, m and w. The following must hold: m + k <= 2^w\n");
exit(1);
}
psize = psize/sizeof(long);
int threadPerBlock = min(psize, 1024);
int nBlocks = ceil((float)psize/threadPerBlock);
// Creating CRS matrix and BDM
matrix = talloc(int, m*k);
for (i = 0; i < m; i++) {
for (j = 0; j < k; j++) {
matrix[i*k+j] = galois_single_divide(1, i ^ (m + j), w);
}
}
bitmatrix = jerasure_matrix_to_bitmatrix(k, m, w, matrix);
// Generating fake random data
data = talloc(long , k*w*psize);
for (i = 0; i < k; i++) {
for(j=0; j< w*psize; j++)
*(data + i*psize*w + j) = 97 + rand()%26;
}
// Allocating space for coding devices
coding = talloc(long , m * w * psize);
// Allocating GPU memory
cudaMalloc(&bitmatrixDevice, m*k*w*w*sizeof(int));
cudaMemcpy(bitmatrixDevice, bitmatrix, m*k*w*w*sizeof(int), cudaMemcpyHostToDevice);
cudaChannelFormatDesc channelDesc = cudaCreateChannelDesc<int>();
cudaBindTexture(0, texBDM, bitmatrixDevice, channelDesc, m*k*w*w*sizeof(int));
// Computing number of rounds
size_t free, total;
cudaMemGetInfo(&free, &total);
round = ceil((float)(psize * w * (k + m) * sizeof(long)) / free);
dataTemp = talloc(long , k * w * (psize/round));
codingTemp = talloc(long, m * w * (psize/round));
start = clock();
for(i = 0; i < round; i++){
// load data chunks when needed
if(round > 1){
for(d = 0; d < k; d++)
for(r = 0; r < w; r++)
memcpy((dataTemp + d * w *(psize/round) + r * (psize/round)), (data + d * w * psize + i * psize/round + r * psize), sizeof(long) * (psize/round));
cudaMalloc(&dataDevice, k * w * (psize/round) * sizeof(long));
cudaMalloc(&codingDevice, m * w * (psize/round) * sizeof(long));
cudaMemcpy(dataDevice, dataTemp, k * w * (psize/round) * sizeof(long), cudaMemcpyHostToDevice);
cudaMemcpy(codingDevice, coding, m * w * (psize/round) * sizeof(long), cudaMemcpyHostToDevice);
} //else load all the data
else{
cudaMalloc(&dataDevice, k * w * (psize/round) * sizeof(long));
cudaMalloc(&codingDevice, m * w * (psize/round) * sizeof(long));
cudaMemcpy(dataDevice, data, k * w * (psize/round) * sizeof(long), cudaMemcpyHostToDevice);
cudaMemcpy(codingDevice, codingTemp, m * w * (psize/round) * sizeof(long), cudaMemcpyHostToDevice);
}
for(j = 0; j < m; j++)
gmpe<<<nBlocks, threadPerBlock>>>(k, w, j, dataDevice, codingDevice, (psize/round));
// copy coding back to main memory
cudaDeviceSynchronize();
cudaMemcpy(codingTemp, codingDevice, m * w * (psize/round) * sizeof(long), cudaMemcpyDeviceToHost);
extendCodingDevice(codingTemp, coding, i, m, psize, (psize/round), w);
cudaFree(dataDevice);
cudaFree(codingDevice);
}
printf("Encoding complete, time elapsed: %.8fs\n", (clock() - (float)start) / CLOCKS_PER_SEC);
cudaUnbindTexture(texBDM);
return 0;
}
|
f1b98a28428ca4c288b6268715db5a861fa730c9.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <cmath>
#include <roctracer/roctx.h>
#include "argparse/argparse.hpp"
#include "stencil/stencil.hpp"
#include "statistics.hpp"
const float COLD_TEMP = 0;
const float HOT_TEMP = 1;
/*! set compute region to zero
*/
/* Apply the stencil to the coordinates in `reg`
*/
__global__ void init_kernel(Accessor<float> dst, const Rect3 reg, const Rect3 cReg //<! the entire compute region
) {
for (int64_t z = reg.lo.z + blockIdx.z * blockDim.z + threadIdx.z; z < reg.hi.z; z += gridDim.z * blockDim.z) {
for (int64_t y = reg.lo.y + blockIdx.y * blockDim.y + threadIdx.y; y < reg.hi.y; y += gridDim.y * blockDim.y) {
for (int64_t x = reg.lo.x + blockIdx.x * blockDim.x + threadIdx.x; x < reg.hi.x; x += gridDim.x * blockDim.x) {
Dim3 o(x, y, z);
dst[o] = (HOT_TEMP + COLD_TEMP) / 2;
}
}
}
}
__device__ int64_t dist(const Dim3 a, const Dim3 b) {
return __fsqrt_rn(float((a.x - b.x) * (a.x - b.x) + (a.y - b.y) * (a.y - b.y) + (a.z - b.z) * (a.z - b.z)));
}
/* Apply a 3d jacobi stencil to `reg`
Since the library only supports periodic boundary conditions right now,
fix part of the middle of the compute region at 1 and part at 0
*/
__global__ void stencil_kernel(Accessor<float> dst, const Accessor<float> src,
const Rect3 myReg, //<! the region i should modify
const Rect3 cReg //<! the entire compute region
) {
// x = 1/3, y = 1/2, z = 1/2
const Dim3 hotCenter(cReg.lo.x + (cReg.hi.x - cReg.lo.x) / 3, (cReg.lo.y + cReg.hi.y) / 2,
(cReg.lo.z + cReg.hi.z) / 2);
const Dim3 coldCenter(cReg.lo.x + (cReg.hi.x - cReg.lo.x) * 2 / 3, (cReg.lo.y + cReg.hi.y) / 2,
(cReg.lo.z + cReg.hi.z) / 2);
const int sphereRadius = (cReg.hi.x - cReg.lo.x) / 10;
for (int z = myReg.lo.z + blockIdx.z * blockDim.z + threadIdx.z; z < myReg.hi.z; z += gridDim.z * blockDim.z) {
for (int y = myReg.lo.y + blockIdx.y * blockDim.y + threadIdx.y; y < myReg.hi.y; y += gridDim.y * blockDim.y) {
for (int x = myReg.lo.x + blockIdx.x * blockDim.x + threadIdx.x; x < myReg.hi.x; x += gridDim.x * blockDim.x) {
Dim3 o(x, y, z);
/* a sphere 1/10 of the CR in radius and x = 1/3 of the way over is set hot
a similar sphere of cold is at x = 2/3
*/
if (dist(o, hotCenter) <= sphereRadius) {
dst[o] = HOT_TEMP;
} else if (dist(o, coldCenter) <= sphereRadius) {
dst[o] = COLD_TEMP;
} else {
float px = src[o + Dim3(1, 0, 0)];
float mx = src[o + Dim3(-1, 0, 0)];
float py = src[o + Dim3(0, 1, 0)];
float my = src[o + Dim3(0, -1, 0)];
float pz = src[o + Dim3(0, 0, 1)];
float mz = src[o + Dim3(0, 0, -1)];
float val = 0;
val += px;
val += mx;
val += py;
val += my;
val += pz;
val += mz;
val /= 6;
dst[o] = val;
}
}
}
}
}
int main(int argc, char **argv) {
bool useStaged = false;
bool useColo = false;
bool useMemcpyPeer = false;
bool useKernel = false;
bool trivial = false;
bool noOverlap = false;
bool paraview = false;
size_t x = 512;
size_t y = 512;
size_t z = 512;
std::string prefix;
int iters = 5;
int checkpointPeriod = -1;
argparse::Parser parser("a cwpearson/argparse-powered CLI app");
// clang-format off
parser.add_flag(useStaged, "--staged")->help("Enable RemoteSender/Recver");
parser.add_flag(useColo, "--colo")->help("Enable ColocatedHaloSender/Recver");
parser.add_flag(useMemcpyPeer, "--peer")->help("Enable PeerAccessSender");
parser.add_flag(useKernel, "--kernel")->help("Enable PeerCopySender");
parser.add_flag(trivial, "--trivial")->help("Skip node-aware placement");
parser.add_flag(noOverlap, "--no-overlap")->help("Don't overlap communication and computation");
parser.add_option(prefix, "--prefix")->help("prefix for paraview files");
parser.add_flag(paraview, "--paraview")->help("dump paraview files");
parser.add_option(iters, "--iters", "-n")->help("number of iterations");
parser.add_option(checkpointPeriod, "--period", "-q")->help("iterations between checkpoints");
parser.add_positional(x)->required();
parser.add_positional(y)->required();
parser.add_positional(z)->required();
// clang-format on
if (!parser.parse(argc, argv)) {
std::cerr << parser.help() << "\n";
exit(EXIT_FAILURE);
}
if (parser.need_help()) {
std::cerr << parser.help() << "\n";
return 0;
}
// default checkpoint 10 times
if (checkpointPeriod <= 0) {
checkpointPeriod = iters / 10;
}
MPI_Init(&argc, &argv);
int size;
int rank;
MPI_Comm_size(MPI_COMM_WORLD, &size);
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
if (0 == rank) {
#ifndef NDEBUG
std::cout << "ERR: not release mode\n";
std::cerr << "ERR: not release mode\n";
exit(-1);
#endif
#ifdef STENCIL_EXCHANGE_STATS
std::cout << "ERR: STENCIL_EXCHANGE_STATS\n";
std::cerr << "ERR: STENCIL_EXCHANGE_STATS\n";
exit(-1);
#endif
}
int devCount;
CUDA_RUNTIME(hipGetDeviceCount(&devCount));
hipDeviceProp_t prop;
CUDA_RUNTIME(hipGetDeviceProperties(&prop, 0));
Method methods = Method::None;
if (useStaged) {
methods |= Method::CudaMpi;
}
if (useColo) {
methods |= Method::ColoPackMemcpyUnpack;
}
if (useMemcpyPeer) {
methods |= Method::CudaMemcpyPeer;
}
if (useKernel) {
methods |= Method::CudaKernel;
}
if (Method::None == methods) {
methods = Method::Default;
}
PlacementStrategy strategy = PlacementStrategy::NodeAware;
if (trivial) {
strategy = PlacementStrategy::Trivial;
}
bool overlap = true;
if (noOverlap) {
overlap = false;
}
Radius radius = Radius::constant(0);
// x
radius.dir(1, 0, 0) = 1;
radius.dir(-1, 0, 0) = 1;
// y
radius.dir(0, 1, 0) = 1;
radius.dir(0, -1, 0) = 1;
// z
radius.dir(0, 0, 1) = 1;
radius.dir(0, 0, -1) = 1;
// radius.set_face(1);
Statistics iterTime;
{
DistributedDomain dd(x, y, z);
dd.set_methods(methods);
dd.set_radius(radius);
dd.set_placement(strategy);
auto dh = dd.add_data<float>("d");
dd.realize();
MPI_Barrier(MPI_COMM_WORLD);
Rect3 computeRegion = dd.get_compute_region();
// create a compute stream for each local domain
std::vector<RcStream> computeStreams(dd.domains().size());
for (size_t di = 0; di < dd.domains().size(); ++di) {
computeStreams[di] = RcStream(dd.domains()[di].gpu());
}
// init current values
std::cerr << "init\n";
for (size_t di = 0; di < dd.domains().size(); ++di) {
auto &d = dd.domains()[di];
Rect3 reg = d.get_compute_region();
const Accessor<float> src = d.get_curr_accessor<float>(dh);
dim3 dimBlock = Dim3::make_block_dim(reg.extent(), 512);
dim3 dimGrid = (reg.extent() + Dim3(dimBlock) - 1) / Dim3(dimBlock);
d.set_device();
hipLaunchKernelGGL(( init_kernel), dim3(dimGrid), dim3(dimBlock), 0, computeStreams[di], src, reg, computeRegion);
}
// wait for init to complete
for (auto &s : computeStreams) {
CUDA_RUNTIME(hipStreamSynchronize(s));
}
if (paraview) {
dd.write_paraview(prefix + "jacobi3d_init");
}
const std::vector<Rect3> interiors = dd.get_interior();
const std::vector<std::vector<Rect3>> exteriors = dd.get_exterior();
for (int iter = 0; iter < iters; ++iter) {
double elapsed = MPI_Wtime();
if (overlap) {
// launch operations on interior, safe to compute on before exchange
for (size_t di = 0; di < dd.domains().size(); ++di) {
auto &d = dd.domains()[di];
const Rect3 mr = interiors[di];
const Accessor<float> src0 = d.get_curr_accessor<float>(dh);
const Accessor<float> dst0 = d.get_next_accessor<float>(dh);
roctxRangePush("launch");
// if (0 == rank)
// std::cerr << rank << ": launch on region=" << mr << " (interior)\n";
dim3 dimBlock = Dim3::make_block_dim(mr.extent(), 256);
dim3 dimGrid = (mr.extent() + Dim3(dimBlock) - 1) / Dim3(dimBlock);
d.set_device();
hipLaunchKernelGGL(( stencil_kernel), dim3(dimGrid), dim3(dimBlock), 0, computeStreams[di], dst0, src0, mr, computeRegion);
CUDA_RUNTIME(hipGetLastError());
roctxRangePop(); // launch
}
}
// exchange halos: update ghost elements with current values from neighbors
// if (0 == rank)
// std::cerr << rank << ": exchange\n";
dd.exchange();
if (overlap) {
// operate on exterior now that ghost values are right
for (size_t di = 0; di < dd.domains().size(); ++di) {
auto &d = dd.domains()[di];
const Accessor<float> src = d.get_curr_accessor<float>(dh);
const Accessor<float> dst = d.get_next_accessor<float>(dh);
for (size_t si = 0; si < exteriors[di].size(); ++si) {
roctxRangePush("launch");
const Rect3 mr = exteriors[di][si];
// if (0 == rank)
// std::cerr << rank << ": launch on region=" << mr << " (exterior)\n";
dim3 dimBlock = Dim3::make_block_dim(mr.extent(), 256);
dim3 dimGrid = (mr.extent() + Dim3(dimBlock) - 1) / Dim3(dimBlock);
d.set_device();
hipLaunchKernelGGL(( stencil_kernel), dim3(dimGrid), dim3(dimBlock), 0, computeStreams[di], dst, src, mr, computeRegion);
CUDA_RUNTIME(hipGetLastError());
roctxRangePop(); // launch
}
}
} else {
// launch operations on compute region now that ghost values are right
for (size_t di = 0; di < dd.domains().size(); ++di) {
auto &d = dd.domains()[di];
const Rect3 mr = d.get_compute_region();
const Accessor<float> src = d.get_curr_accessor<float>(dh);
const Accessor<float> dst = d.get_next_accessor<float>(dh);
roctxRangePush("launch (whole)");
// if (0 == rank)
// std::cerr << rank << ": launch on region=" << mr << " (whole)\n";
d.set_device();
dim3 dimBlock = Dim3::make_block_dim(mr.extent(), 256);
dim3 dimGrid = (mr.extent() + Dim3(dimBlock) - 1) / Dim3(dimBlock);
hipLaunchKernelGGL(( stencil_kernel), dim3(dimGrid), dim3(dimBlock), 0, computeStreams[di], dst, src, mr, computeRegion);
CUDA_RUNTIME(hipGetLastError());
roctxRangePop(); // launch (whole)
}
}
// wait for stencil to complete before swapping pointers
for (auto &s : computeStreams) {
CUDA_RUNTIME(hipStreamSynchronize(s));
}
// current = next
dd.swap();
elapsed = MPI_Wtime() - elapsed;
MPI_Allreduce(MPI_IN_PLACE, &elapsed, 1, MPI_DOUBLE, MPI_MAX, MPI_COMM_WORLD);
iterTime.insert(elapsed);
if (paraview && (iter % checkpointPeriod == 0)) {
dd.write_paraview(prefix + "jacobi3d_" + std::to_string(iter));
}
}
if (paraview) {
dd.write_paraview(prefix + "jacobi3d_final");
}
if (0 == mpi::world_rank()) {
std::string methodStr;
if (methods && Method::CudaMpi) {
methodStr += methodStr.empty() ? "" : ",";
methodStr += "staged";
}
if (methods && Method::ColoPackMemcpyUnpack) {
methodStr += methodStr.empty() ? "" : "/";
methodStr += "colo";
}
if (methods && Method::CudaMemcpyPeer) {
methodStr += methodStr.empty() ? "" : "/";
methodStr += "peer";
}
if (methods && Method::CudaKernel) {
methodStr += methodStr.empty() ? "" : "/";
methodStr += "kernel";
}
std::cout << "jacobi3d," << methodStr << "," << size << "," << devCount << "," << x << "," << y << "," << z << ","
<< dd.exchange_bytes_for_method(Method::CudaMpi) << ","
<< dd.exchange_bytes_for_method(Method::ColoPackMemcpyUnpack) << ","
<< dd.exchange_bytes_for_method(Method::CudaMemcpyPeer) << ","
<< dd.exchange_bytes_for_method(Method::CudaKernel) << "," << iterTime.min() << ","
<< iterTime.trimean() << "\n";
}
} // send domains out of scope before MPI_Finalize
MPI_Finalize();
return 0;
}
| f1b98a28428ca4c288b6268715db5a861fa730c9.cu | #include <cmath>
#include <nvToolsExt.h>
#include "argparse/argparse.hpp"
#include "stencil/stencil.hpp"
#include "statistics.hpp"
const float COLD_TEMP = 0;
const float HOT_TEMP = 1;
/*! set compute region to zero
*/
/* Apply the stencil to the coordinates in `reg`
*/
__global__ void init_kernel(Accessor<float> dst, const Rect3 reg, const Rect3 cReg //<! the entire compute region
) {
for (int64_t z = reg.lo.z + blockIdx.z * blockDim.z + threadIdx.z; z < reg.hi.z; z += gridDim.z * blockDim.z) {
for (int64_t y = reg.lo.y + blockIdx.y * blockDim.y + threadIdx.y; y < reg.hi.y; y += gridDim.y * blockDim.y) {
for (int64_t x = reg.lo.x + blockIdx.x * blockDim.x + threadIdx.x; x < reg.hi.x; x += gridDim.x * blockDim.x) {
Dim3 o(x, y, z);
dst[o] = (HOT_TEMP + COLD_TEMP) / 2;
}
}
}
}
__device__ int64_t dist(const Dim3 a, const Dim3 b) {
return __fsqrt_rn(float((a.x - b.x) * (a.x - b.x) + (a.y - b.y) * (a.y - b.y) + (a.z - b.z) * (a.z - b.z)));
}
/* Apply a 3d jacobi stencil to `reg`
Since the library only supports periodic boundary conditions right now,
fix part of the middle of the compute region at 1 and part at 0
*/
__global__ void stencil_kernel(Accessor<float> dst, const Accessor<float> src,
const Rect3 myReg, //<! the region i should modify
const Rect3 cReg //<! the entire compute region
) {
// x = 1/3, y = 1/2, z = 1/2
const Dim3 hotCenter(cReg.lo.x + (cReg.hi.x - cReg.lo.x) / 3, (cReg.lo.y + cReg.hi.y) / 2,
(cReg.lo.z + cReg.hi.z) / 2);
const Dim3 coldCenter(cReg.lo.x + (cReg.hi.x - cReg.lo.x) * 2 / 3, (cReg.lo.y + cReg.hi.y) / 2,
(cReg.lo.z + cReg.hi.z) / 2);
const int sphereRadius = (cReg.hi.x - cReg.lo.x) / 10;
for (int z = myReg.lo.z + blockIdx.z * blockDim.z + threadIdx.z; z < myReg.hi.z; z += gridDim.z * blockDim.z) {
for (int y = myReg.lo.y + blockIdx.y * blockDim.y + threadIdx.y; y < myReg.hi.y; y += gridDim.y * blockDim.y) {
for (int x = myReg.lo.x + blockIdx.x * blockDim.x + threadIdx.x; x < myReg.hi.x; x += gridDim.x * blockDim.x) {
Dim3 o(x, y, z);
/* a sphere 1/10 of the CR in radius and x = 1/3 of the way over is set hot
a similar sphere of cold is at x = 2/3
*/
if (dist(o, hotCenter) <= sphereRadius) {
dst[o] = HOT_TEMP;
} else if (dist(o, coldCenter) <= sphereRadius) {
dst[o] = COLD_TEMP;
} else {
float px = src[o + Dim3(1, 0, 0)];
float mx = src[o + Dim3(-1, 0, 0)];
float py = src[o + Dim3(0, 1, 0)];
float my = src[o + Dim3(0, -1, 0)];
float pz = src[o + Dim3(0, 0, 1)];
float mz = src[o + Dim3(0, 0, -1)];
float val = 0;
val += px;
val += mx;
val += py;
val += my;
val += pz;
val += mz;
val /= 6;
dst[o] = val;
}
}
}
}
}
int main(int argc, char **argv) {
bool useStaged = false;
bool useColo = false;
bool useMemcpyPeer = false;
bool useKernel = false;
bool trivial = false;
bool noOverlap = false;
bool paraview = false;
size_t x = 512;
size_t y = 512;
size_t z = 512;
std::string prefix;
int iters = 5;
int checkpointPeriod = -1;
argparse::Parser parser("a cwpearson/argparse-powered CLI app");
// clang-format off
parser.add_flag(useStaged, "--staged")->help("Enable RemoteSender/Recver");
parser.add_flag(useColo, "--colo")->help("Enable ColocatedHaloSender/Recver");
parser.add_flag(useMemcpyPeer, "--peer")->help("Enable PeerAccessSender");
parser.add_flag(useKernel, "--kernel")->help("Enable PeerCopySender");
parser.add_flag(trivial, "--trivial")->help("Skip node-aware placement");
parser.add_flag(noOverlap, "--no-overlap")->help("Don't overlap communication and computation");
parser.add_option(prefix, "--prefix")->help("prefix for paraview files");
parser.add_flag(paraview, "--paraview")->help("dump paraview files");
parser.add_option(iters, "--iters", "-n")->help("number of iterations");
parser.add_option(checkpointPeriod, "--period", "-q")->help("iterations between checkpoints");
parser.add_positional(x)->required();
parser.add_positional(y)->required();
parser.add_positional(z)->required();
// clang-format on
if (!parser.parse(argc, argv)) {
std::cerr << parser.help() << "\n";
exit(EXIT_FAILURE);
}
if (parser.need_help()) {
std::cerr << parser.help() << "\n";
return 0;
}
// default checkpoint 10 times
if (checkpointPeriod <= 0) {
checkpointPeriod = iters / 10;
}
MPI_Init(&argc, &argv);
int size;
int rank;
MPI_Comm_size(MPI_COMM_WORLD, &size);
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
if (0 == rank) {
#ifndef NDEBUG
std::cout << "ERR: not release mode\n";
std::cerr << "ERR: not release mode\n";
exit(-1);
#endif
#ifdef STENCIL_EXCHANGE_STATS
std::cout << "ERR: STENCIL_EXCHANGE_STATS\n";
std::cerr << "ERR: STENCIL_EXCHANGE_STATS\n";
exit(-1);
#endif
}
int devCount;
CUDA_RUNTIME(cudaGetDeviceCount(&devCount));
cudaDeviceProp prop;
CUDA_RUNTIME(cudaGetDeviceProperties(&prop, 0));
Method methods = Method::None;
if (useStaged) {
methods |= Method::CudaMpi;
}
if (useColo) {
methods |= Method::ColoPackMemcpyUnpack;
}
if (useMemcpyPeer) {
methods |= Method::CudaMemcpyPeer;
}
if (useKernel) {
methods |= Method::CudaKernel;
}
if (Method::None == methods) {
methods = Method::Default;
}
PlacementStrategy strategy = PlacementStrategy::NodeAware;
if (trivial) {
strategy = PlacementStrategy::Trivial;
}
bool overlap = true;
if (noOverlap) {
overlap = false;
}
Radius radius = Radius::constant(0);
// x
radius.dir(1, 0, 0) = 1;
radius.dir(-1, 0, 0) = 1;
// y
radius.dir(0, 1, 0) = 1;
radius.dir(0, -1, 0) = 1;
// z
radius.dir(0, 0, 1) = 1;
radius.dir(0, 0, -1) = 1;
// radius.set_face(1);
Statistics iterTime;
{
DistributedDomain dd(x, y, z);
dd.set_methods(methods);
dd.set_radius(radius);
dd.set_placement(strategy);
auto dh = dd.add_data<float>("d");
dd.realize();
MPI_Barrier(MPI_COMM_WORLD);
Rect3 computeRegion = dd.get_compute_region();
// create a compute stream for each local domain
std::vector<RcStream> computeStreams(dd.domains().size());
for (size_t di = 0; di < dd.domains().size(); ++di) {
computeStreams[di] = RcStream(dd.domains()[di].gpu());
}
// init current values
std::cerr << "init\n";
for (size_t di = 0; di < dd.domains().size(); ++di) {
auto &d = dd.domains()[di];
Rect3 reg = d.get_compute_region();
const Accessor<float> src = d.get_curr_accessor<float>(dh);
dim3 dimBlock = Dim3::make_block_dim(reg.extent(), 512);
dim3 dimGrid = (reg.extent() + Dim3(dimBlock) - 1) / Dim3(dimBlock);
d.set_device();
init_kernel<<<dimGrid, dimBlock, 0, computeStreams[di]>>>(src, reg, computeRegion);
}
// wait for init to complete
for (auto &s : computeStreams) {
CUDA_RUNTIME(cudaStreamSynchronize(s));
}
if (paraview) {
dd.write_paraview(prefix + "jacobi3d_init");
}
const std::vector<Rect3> interiors = dd.get_interior();
const std::vector<std::vector<Rect3>> exteriors = dd.get_exterior();
for (int iter = 0; iter < iters; ++iter) {
double elapsed = MPI_Wtime();
if (overlap) {
// launch operations on interior, safe to compute on before exchange
for (size_t di = 0; di < dd.domains().size(); ++di) {
auto &d = dd.domains()[di];
const Rect3 mr = interiors[di];
const Accessor<float> src0 = d.get_curr_accessor<float>(dh);
const Accessor<float> dst0 = d.get_next_accessor<float>(dh);
nvtxRangePush("launch");
// if (0 == rank)
// std::cerr << rank << ": launch on region=" << mr << " (interior)\n";
dim3 dimBlock = Dim3::make_block_dim(mr.extent(), 256);
dim3 dimGrid = (mr.extent() + Dim3(dimBlock) - 1) / Dim3(dimBlock);
d.set_device();
stencil_kernel<<<dimGrid, dimBlock, 0, computeStreams[di]>>>(dst0, src0, mr, computeRegion);
CUDA_RUNTIME(cudaGetLastError());
nvtxRangePop(); // launch
}
}
// exchange halos: update ghost elements with current values from neighbors
// if (0 == rank)
// std::cerr << rank << ": exchange\n";
dd.exchange();
if (overlap) {
// operate on exterior now that ghost values are right
for (size_t di = 0; di < dd.domains().size(); ++di) {
auto &d = dd.domains()[di];
const Accessor<float> src = d.get_curr_accessor<float>(dh);
const Accessor<float> dst = d.get_next_accessor<float>(dh);
for (size_t si = 0; si < exteriors[di].size(); ++si) {
nvtxRangePush("launch");
const Rect3 mr = exteriors[di][si];
// if (0 == rank)
// std::cerr << rank << ": launch on region=" << mr << " (exterior)\n";
dim3 dimBlock = Dim3::make_block_dim(mr.extent(), 256);
dim3 dimGrid = (mr.extent() + Dim3(dimBlock) - 1) / Dim3(dimBlock);
d.set_device();
stencil_kernel<<<dimGrid, dimBlock, 0, computeStreams[di]>>>(dst, src, mr, computeRegion);
CUDA_RUNTIME(cudaGetLastError());
nvtxRangePop(); // launch
}
}
} else {
// launch operations on compute region now that ghost values are right
for (size_t di = 0; di < dd.domains().size(); ++di) {
auto &d = dd.domains()[di];
const Rect3 mr = d.get_compute_region();
const Accessor<float> src = d.get_curr_accessor<float>(dh);
const Accessor<float> dst = d.get_next_accessor<float>(dh);
nvtxRangePush("launch (whole)");
// if (0 == rank)
// std::cerr << rank << ": launch on region=" << mr << " (whole)\n";
d.set_device();
dim3 dimBlock = Dim3::make_block_dim(mr.extent(), 256);
dim3 dimGrid = (mr.extent() + Dim3(dimBlock) - 1) / Dim3(dimBlock);
stencil_kernel<<<dimGrid, dimBlock, 0, computeStreams[di]>>>(dst, src, mr, computeRegion);
CUDA_RUNTIME(cudaGetLastError());
nvtxRangePop(); // launch (whole)
}
}
// wait for stencil to complete before swapping pointers
for (auto &s : computeStreams) {
CUDA_RUNTIME(cudaStreamSynchronize(s));
}
// current = next
dd.swap();
elapsed = MPI_Wtime() - elapsed;
MPI_Allreduce(MPI_IN_PLACE, &elapsed, 1, MPI_DOUBLE, MPI_MAX, MPI_COMM_WORLD);
iterTime.insert(elapsed);
if (paraview && (iter % checkpointPeriod == 0)) {
dd.write_paraview(prefix + "jacobi3d_" + std::to_string(iter));
}
}
if (paraview) {
dd.write_paraview(prefix + "jacobi3d_final");
}
if (0 == mpi::world_rank()) {
std::string methodStr;
if (methods && Method::CudaMpi) {
methodStr += methodStr.empty() ? "" : ",";
methodStr += "staged";
}
if (methods && Method::ColoPackMemcpyUnpack) {
methodStr += methodStr.empty() ? "" : "/";
methodStr += "colo";
}
if (methods && Method::CudaMemcpyPeer) {
methodStr += methodStr.empty() ? "" : "/";
methodStr += "peer";
}
if (methods && Method::CudaKernel) {
methodStr += methodStr.empty() ? "" : "/";
methodStr += "kernel";
}
std::cout << "jacobi3d," << methodStr << "," << size << "," << devCount << "," << x << "," << y << "," << z << ","
<< dd.exchange_bytes_for_method(Method::CudaMpi) << ","
<< dd.exchange_bytes_for_method(Method::ColoPackMemcpyUnpack) << ","
<< dd.exchange_bytes_for_method(Method::CudaMemcpyPeer) << ","
<< dd.exchange_bytes_for_method(Method::CudaKernel) << "," << iterTime.min() << ","
<< iterTime.trimean() << "\n";
}
} // send domains out of scope before MPI_Finalize
MPI_Finalize();
return 0;
}
|
3944e792601abe2633de3d9cb79ac5206a41f1bb.hip | // !!! This is a file automatically generated by hipify!!!
/*
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
#include "oneflow/core/framework/framework.h"
#include <hip/hip_runtime.h>
#include "oneflow/core/ep/cuda/cuda_stream.h"
#if defined(TORCH_HIP_VERSION) && TORCH_HIP_VERSION >= 11000
#include "oneflow/core/device/cuda_pseudo_bfloat16.h"
namespace oneflow {
namespace user_op {
namespace {
template<typename T>
__global__ void ReluForwardGpu(int64_t n, const T* in, T* out) {
const T zero = static_cast<T>(0.0);
CUDA_1D_KERNEL_LOOP(i, n) {
const T in_i = in[i];
T out_i = zero;
if (in_i > zero) { out_i = in_i; }
out[i] = out_i;
}
}
template<typename T>
__global__ void ReluBackwardGpu(int64_t n, const T* y, const T* dy, T* dx) {
const T zero = static_cast<T>(0.0);
CUDA_1D_KERNEL_LOOP(i, n) { dx[i] = y[i] > zero ? dy[i] : zero; }
}
} // namespace
class ReluNvBFloat16Kernel final : public OpKernel {
public:
ReluNvBFloat16Kernel() = default;
~ReluNvBFloat16Kernel() override = default;
private:
using user_op::OpKernel::Compute;
void Compute(KernelComputeContext* ctx) const override {
const Tensor* in = ctx->Tensor4ArgNameAndIndex("in", 0);
Tensor* out = ctx->Tensor4ArgNameAndIndex("out", 0);
const int64_t n = in->shape().elem_cnt();
hipLaunchKernelGGL(( ReluForwardGpu<nv_bfloat16>), dim3(BlocksNum4ThreadsNum(n)), dim3(kCudaThreadsNumPerBlock), 0,
ctx->stream()->As<ep::CudaStream>()->cuda_stream(),
n, reinterpret_cast<const nv_bfloat16*>(in->dptr()),
reinterpret_cast<nv_bfloat16*>(out->mut_dptr()));
}
bool AlwaysComputeWhenAllOutputsEmpty() const override { return false; }
};
class ReluGradNvBFloat16Kernel final : public OpKernel {
public:
ReluGradNvBFloat16Kernel() = default;
~ReluGradNvBFloat16Kernel() override = default;
private:
using user_op::OpKernel::Compute;
void Compute(KernelComputeContext* ctx) const override {
const Tensor* y = ctx->Tensor4ArgNameAndIndex("y", 0);
const Tensor* dy = ctx->Tensor4ArgNameAndIndex("dy", 0);
Tensor* dx = ctx->Tensor4ArgNameAndIndex("dx", 0);
const int64_t n = y->shape().elem_cnt();
hipLaunchKernelGGL(( ReluBackwardGpu<nv_bfloat16>), dim3(BlocksNum4ThreadsNum(n)), dim3(kCudaThreadsNumPerBlock), 0,
ctx->stream()->As<ep::CudaStream>()->cuda_stream(),
n, reinterpret_cast<const nv_bfloat16*>(y->dptr()),
reinterpret_cast<const nv_bfloat16*>(dy->dptr()),
reinterpret_cast<nv_bfloat16*>(dx->mut_dptr()));
}
bool AlwaysComputeWhenAllOutputsEmpty() const override { return false; }
};
REGISTER_USER_KERNEL("relu")
.SetCreateFn<ReluNvBFloat16Kernel>()
.SetIsMatchedHob((user_op::HobDeviceType() == DeviceType::kGPU)
&& (user_op::HobDataType("out", 0) == DataType::kBFloat16))
.SetInplaceProposalFn([](const user_op::InferContext&,
user_op::AddInplaceArgPair AddInplaceArgPairFn) -> Maybe<void> {
OF_RETURN_IF_ERROR(AddInplaceArgPairFn("out", 0, "in", 0, true));
return Maybe<void>::Ok();
});
REGISTER_USER_KERNEL("relu_grad")
.SetCreateFn<ReluGradNvBFloat16Kernel>()
.SetIsMatchedHob((user_op::HobDeviceType() == DeviceType::kGPU)
&& (user_op::HobDataType("dx", 0) == DataType::kBFloat16))
.SetInplaceProposalFn([](const user_op::InferContext&,
user_op::AddInplaceArgPair AddInplaceArgPairFn) -> Maybe<void> {
OF_RETURN_IF_ERROR(AddInplaceArgPairFn("dx", 0, "dy", 0, true));
return Maybe<void>::Ok();
});
} // namespace user_op
} // namespace oneflow
#endif // defined(TORCH_HIP_VERSION) && TORCH_HIP_VERSION >= 11000
| 3944e792601abe2633de3d9cb79ac5206a41f1bb.cu | /*
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
#include "oneflow/core/framework/framework.h"
#include <cuda.h>
#include "oneflow/core/ep/cuda/cuda_stream.h"
#if defined(CUDA_VERSION) && CUDA_VERSION >= 11000
#include "oneflow/core/device/cuda_pseudo_bfloat16.h"
namespace oneflow {
namespace user_op {
namespace {
template<typename T>
__global__ void ReluForwardGpu(int64_t n, const T* in, T* out) {
const T zero = static_cast<T>(0.0);
CUDA_1D_KERNEL_LOOP(i, n) {
const T in_i = in[i];
T out_i = zero;
if (in_i > zero) { out_i = in_i; }
out[i] = out_i;
}
}
template<typename T>
__global__ void ReluBackwardGpu(int64_t n, const T* y, const T* dy, T* dx) {
const T zero = static_cast<T>(0.0);
CUDA_1D_KERNEL_LOOP(i, n) { dx[i] = y[i] > zero ? dy[i] : zero; }
}
} // namespace
class ReluNvBFloat16Kernel final : public OpKernel {
public:
ReluNvBFloat16Kernel() = default;
~ReluNvBFloat16Kernel() override = default;
private:
using user_op::OpKernel::Compute;
void Compute(KernelComputeContext* ctx) const override {
const Tensor* in = ctx->Tensor4ArgNameAndIndex("in", 0);
Tensor* out = ctx->Tensor4ArgNameAndIndex("out", 0);
const int64_t n = in->shape().elem_cnt();
ReluForwardGpu<nv_bfloat16><<<BlocksNum4ThreadsNum(n), kCudaThreadsNumPerBlock, 0,
ctx->stream()->As<ep::CudaStream>()->cuda_stream()>>>(
n, reinterpret_cast<const nv_bfloat16*>(in->dptr()),
reinterpret_cast<nv_bfloat16*>(out->mut_dptr()));
}
bool AlwaysComputeWhenAllOutputsEmpty() const override { return false; }
};
class ReluGradNvBFloat16Kernel final : public OpKernel {
public:
ReluGradNvBFloat16Kernel() = default;
~ReluGradNvBFloat16Kernel() override = default;
private:
using user_op::OpKernel::Compute;
void Compute(KernelComputeContext* ctx) const override {
const Tensor* y = ctx->Tensor4ArgNameAndIndex("y", 0);
const Tensor* dy = ctx->Tensor4ArgNameAndIndex("dy", 0);
Tensor* dx = ctx->Tensor4ArgNameAndIndex("dx", 0);
const int64_t n = y->shape().elem_cnt();
ReluBackwardGpu<nv_bfloat16><<<BlocksNum4ThreadsNum(n), kCudaThreadsNumPerBlock, 0,
ctx->stream()->As<ep::CudaStream>()->cuda_stream()>>>(
n, reinterpret_cast<const nv_bfloat16*>(y->dptr()),
reinterpret_cast<const nv_bfloat16*>(dy->dptr()),
reinterpret_cast<nv_bfloat16*>(dx->mut_dptr()));
}
bool AlwaysComputeWhenAllOutputsEmpty() const override { return false; }
};
REGISTER_USER_KERNEL("relu")
.SetCreateFn<ReluNvBFloat16Kernel>()
.SetIsMatchedHob((user_op::HobDeviceType() == DeviceType::kGPU)
&& (user_op::HobDataType("out", 0) == DataType::kBFloat16))
.SetInplaceProposalFn([](const user_op::InferContext&,
user_op::AddInplaceArgPair AddInplaceArgPairFn) -> Maybe<void> {
OF_RETURN_IF_ERROR(AddInplaceArgPairFn("out", 0, "in", 0, true));
return Maybe<void>::Ok();
});
REGISTER_USER_KERNEL("relu_grad")
.SetCreateFn<ReluGradNvBFloat16Kernel>()
.SetIsMatchedHob((user_op::HobDeviceType() == DeviceType::kGPU)
&& (user_op::HobDataType("dx", 0) == DataType::kBFloat16))
.SetInplaceProposalFn([](const user_op::InferContext&,
user_op::AddInplaceArgPair AddInplaceArgPairFn) -> Maybe<void> {
OF_RETURN_IF_ERROR(AddInplaceArgPairFn("dx", 0, "dy", 0, true));
return Maybe<void>::Ok();
});
} // namespace user_op
} // namespace oneflow
#endif // defined(CUDA_VERSION) && CUDA_VERSION >= 11000
|
3b60374ef3160b856ac82dd0a802a0ab2a266ce3.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <stdlib.h>
#include <rocblas.h>
#include "cudamat_kernels.cuh"
#include "cudamat.cuh"
extern "C" {
/* ------------------------------ CUBLAS init/shutdown ------------------------------ */
inline bool check_cublas_error() {
cublasStatus status = hipblasGetError();
return status != HIPBLAS_STATUS_SUCCESS;
}
inline bool checkCUDAError() {
hipError_t err = hipGetLastError();
if (hipSuccess != err)
printf("%s\n", hipGetErrorString( err));
return hipSuccess != err;
}
extern const char* get_last_cuda_error() {
hipError_t err = hipGetLastError();
return hipGetErrorString( err);
}
extern int cublas_init() {
hipblasInit();
if (check_cublas_error())
return CUBLAS_ERROR;
else
return 0;
}
extern int cublas_shutdown() {
hipblasShutdown();
hipDeviceReset();
return 0;
}
extern int cuda_set_device(int deviceId) {
hipSetDevice(deviceId);
if (checkCUDAError())
return CUDA_ERROR;
else
return 0;
}
extern int init_random(rnd_struct* rnd_state, int seed, char* cudamatpath) {
unsigned int * host_mults;
host_mults = (unsigned int*)malloc(NUM_RND_STREAMS * sizeof(unsigned int));
FILE * pFile;
pFile = fopen (cudamatpath,"r");
for (int i = 0; i < NUM_RND_STREAMS; i++) {
fscanf (pFile, "%u", &host_mults[i]);
}
fclose (pFile);
hipblasAlloc(NUM_RND_STREAMS, sizeof(unsigned int), (void**)&rnd_state->dev_mults);
hipblasAlloc(NUM_RND_STREAMS, sizeof(unsigned long long), (void**)&rnd_state->dev_words);
hipblasSetVector(NUM_RND_STREAMS, sizeof(unsigned int), host_mults, 1, rnd_state->dev_mults, 1);
//hipMalloc((void **)&rnd_state->dev_mults, NUM_RND_STREAMS * sizeof(unsigned int));
//hipMalloc((void **)&rnd_state->dev_words, NUM_RND_STREAMS * sizeof(unsigned long long));
//hipMemcpy(rnd_state->dev_mults, host_mults, NUM_RND_STREAMS * sizeof(unsigned int), hipMemcpyHostToDevice);
hipDeviceSynchronize();
hipLaunchKernelGGL(( kSeedRandom), dim3(NUM_RND_BLOCKS), dim3(NUM_RND_THREADS_PER_BLOCK), 0, 0, rnd_state->dev_mults, rnd_state->dev_words, seed);
hipDeviceSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
else
return 0;
}
/* ------------------------------ Utility routines ------------------------------ */
extern int get_leading_dimension(cudamat* mat) {
return mat->is_trans ? mat->size[1] : mat->size[0];
}
extern int get_nonleading_dimension(cudamat* mat) {
return mat->is_trans ? mat->size[0] : mat->size[1];
}
extern void set_transpose(cudamat* mat, int is_trans) {
mat->is_trans = is_trans;
}
inline char get_transpose_char(cudamat* mat) {
return mat->is_trans ? 't' : 'n';
}
extern void cuda_sync_threads() {
hipDeviceSynchronize();
}
/* ------------------------------ Allocating/moving data ------------------------------ */
extern int allocate_device_memory(cudamat* mat) {
int len = mat->size[0]*mat->size[1];
cublasStatus stat;
stat = hipblasAlloc(len, sizeof(mat->data_device[0]), (void**)&mat->data_device);
if (stat != HIPBLAS_STATUS_SUCCESS || check_cublas_error()) {
checkCUDAError();
return CUBLAS_ERROR;
}
mat->on_device = 1;
return 0;
}
extern int copy_to_host(cudamat* mat) {
int len = mat->size[0]*mat->size[1];
if (mat->on_device) {
hipblasGetVector(len, sizeof(mat->data_host[0]), mat->data_device, 1, mat->data_host, 1);
if (check_cublas_error())
return CUBLAS_ERROR;
} else
return ERROR_NOT_ON_DEVICE;
return 0;
}
extern int copy_to_device(cudamat* mat) {
int len = mat->size[0]*mat->size[1];
int err_code = 0;
//if (!mat->owns_data)
// return VIEW_ERROR;
if (!mat->on_device) {
err_code = allocate_device_memory(mat);
if (err_code)
return err_code;
}
hipblasSetVector(len, sizeof(mat->data_host[0]), mat->data_host, 1, mat->data_device, 1);
if (check_cublas_error())
return CUBLAS_ERROR;
return 0;
}
extern int copy_on_device(cudamat* mat1, cudamat* mat2) {
int len = mat1->size[0]*mat1->size[1];
if (mat1->size[0] != mat2->size[0] || mat1->size[1] != mat2->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
hipblasScopy(len, mat1->data_device, 1, mat2->data_device, 1);
if (check_cublas_error())
return CUBLAS_ERROR;
else
return 0;
}
extern int get_row_slice(cudamat* source, cudamat* target, unsigned int start, unsigned int end) {
int height = source->size[0];
int width = source->size[1];
if ((end - start) != target->size[0] || source->size[1] != target->size[1] || start >= end || end > height)
return ERROR_INCOMPATIBLE_DIMENSIONS;
dim3 kernelBlockGrid((int)ceil((end - start)/32.), (int)ceil(width/32.), 1);
dim3 kernelBlockDim(32, 1, 1);
hipLaunchKernelGGL(( kGetRowSlice), dim3(kernelBlockGrid),dim3(kernelBlockDim), 0, 0, source->data_device, target->data_device, start, end, width, height);
if (SYNC_THREADS)
hipDeviceSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
else
return 0;
}
extern int set_row_slice(cudamat* source, cudamat* target, unsigned int start, unsigned int end) {
int height = target->size[0];
int width = target->size[1];
if ((end - start) != source->size[0] || source->size[1] != target->size[1] || start >= end || end > height)
return ERROR_INCOMPATIBLE_DIMENSIONS;
dim3 kernelBlockGrid((int)ceil((end - start)/32.), (int)ceil(width/32.), 1);
dim3 kernelBlockDim(32, 1, 1);
hipLaunchKernelGGL(( kSetRowSlice), dim3(kernelBlockGrid),dim3(kernelBlockDim), 0, 0, source->data_device, target->data_device, start, end, width, height);
if (SYNC_THREADS)
hipDeviceSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
else
return 0;
}
extern int copy_transpose(cudamat* source, cudamat* target) {
unsigned int height = source->size[0];
unsigned int width = source->size[1];
if (source->size[0] != target->size[1] || source->size[1] != target->size[0])
return ERROR_INCOMPATIBLE_DIMENSIONS;
// setup execution parameters
unsigned int grid_x = height / COPY_BLOCK_SIZE;
if (height % COPY_BLOCK_SIZE)
grid_x++;
unsigned int grid_y = width / COPY_BLOCK_SIZE;
if (width % COPY_BLOCK_SIZE)
grid_y++;
dim3 grid(grid_x, grid_y, 1);
dim3 threads(COPY_BLOCK_SIZE, COPY_BLOCK_SIZE, 1);
hipLaunchKernelGGL(( kTranspose), dim3(grid), dim3(threads) , 0, 0, target->data_device, source->data_device, height, width);
if (checkCUDAError())
return CUDA_ERROR;
else
return 0;
}
extern int free_device_memory(cudamat* mat) {
if (mat->owns_data && mat->on_device) {
cublasStatus stat;
stat = hipblasFree(mat->data_device);
mat->on_device = 0;
if (stat != HIPBLAS_STATUS_SUCCESS || check_cublas_error())
return CUBLAS_ERROR;
}
return 0;
}
extern int reshape(cudamat* mat, unsigned int m, unsigned int n) {
if (mat->size[0] * mat->size[1] != m * n)
return ERROR_INCOMPATIBLE_DIMENSIONS;
mat->size[0] = m;
mat->size[1] = n;
return 0;
}
extern int get_slice(cudamat* source, cudamat* target, unsigned int first_col, unsigned int last_col) {
if (source->is_trans)
return ERROR_TRANSPOSED;
if (!source->on_device)
return ERROR_NOT_ON_DEVICE;
if (last_col > source->size[1] || (first_col >= last_col))
return ERROR_INCOMPATIBLE_DIMENSIONS;
int num_rows = source->size[0];
target->data_host = 0;
target->data_device = source->data_device + first_col * num_rows;
target->on_device = 1;
target->on_host = 0;
target->size[0] = source->size[0];
target->size[1] = last_col - first_col;
target->is_trans = 0;
target->owns_data = 0;
return 0;
}
extern int get_vector_slice(cudamat* source, cudamat* target, unsigned int first_ind, unsigned int last_ind) {
// source must be a vector
if (source->size[0] > 1 && source->size[1] > 1)
return ERROR_GENERIC;
if (source->is_trans)
return ERROR_TRANSPOSED;
if (!source->on_device)
return ERROR_NOT_ON_DEVICE;
if (first_ind >= last_ind)
return ERROR_INCOMPATIBLE_DIMENSIONS;
int num_rows = source->size[0];
target->data_host = 0;
target->data_device = source->data_device + first_ind * num_rows;
target->on_device = 1;
target->on_host = 0;
target->is_trans = 0;
target->owns_data = 0;
if (source->size[0] > 1) {
if (last_ind > source->size[0])
return ERROR_INCOMPATIBLE_DIMENSIONS;
target->size[0] = last_ind - first_ind;
target->size[1] = 1;
} else {
if (last_ind > source->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
target->size[0] = 1;
target->size[1] = last_ind - first_ind;
}
return 0;
}
/* ------------------------------ Initialization routines ------------------------------ */
extern void init_from_array(cudamat* mat, float* data, int m, int n) {
mat->data_host = data;
mat->size[0] = m;
mat->size[1] = n;
mat->on_device = 0;
mat->on_host = 1;
mat->is_trans = 0;
mat->owns_data = 1;
}
extern int init_empty(cudamat* mat, int m, int n) {
mat->size[0] = m;
mat->size[1] = n;
mat->on_device = 0;
mat->on_host = 0;
mat->is_trans = 0;
mat->owns_data = 1;
return allocate_device_memory(mat);
}
/* ------------------------------ Random number generation ------------------------------ */
extern int fill_with_rand(rnd_struct* rnd_state, cudamat* mat) {
int len = mat->size[0] * mat->size[1];
if (!mat->on_device)
return ERROR_NOT_ON_DEVICE;
hipLaunchKernelGGL(( kRandomUniform), dim3(NUM_RND_BLOCKS),dim3(NUM_RND_THREADS_PER_BLOCK), 0, 0, rnd_state->dev_mults, rnd_state->dev_words, mat->data_device, len);
if (SYNC_THREADS)
hipDeviceSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
else
return 0;
}
extern int fill_with_randn(rnd_struct* rnd_state, cudamat* mat) {
int len = mat->size[0] * mat->size[1];
if (!mat->on_device)
return ERROR_NOT_ON_DEVICE;
hipLaunchKernelGGL(( kRandomGaussian), dim3(NUM_RND_BLOCKS),dim3(NUM_RND_THREADS_PER_BLOCK), 0, 0, rnd_state->dev_mults, rnd_state->dev_words, mat->data_device, len);
if (SYNC_THREADS)
hipDeviceSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
else
return 0;
}
/* ------------------------------ Algebraic operations ------------------------------ */
extern int add_col_vec(cudamat* mat, cudamat* vec, cudamat* target) {
unsigned int h = mat->size[0],
w = mat->size[1];
if (!mat->on_device || !vec->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->is_trans)
return ERROR_TRANSPOSED;
if (mat->size[0] != vec->size[0] || vec->size[1] != 1 ||
mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
hipLaunchKernelGGL(( kAddColVector), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, mat->data_device, vec->data_device, target->data_device, w, h);
if (SYNC_THREADS)
hipDeviceSynchronize();
if (checkCUDAError()) {
return CUDA_ERROR;
}
return 0;
}
extern int add_col_mult(cudamat* mat, cudamat* vec, cudamat* target, float mult) {
unsigned int h = mat->size[0],
w = mat->size[1];
if (!mat->on_device || !vec->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->is_trans)
return ERROR_TRANSPOSED;
if (mat->size[0] != vec->size[0] || vec->size[1] != 1 ||
mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
hipLaunchKernelGGL(( kAddColMult), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, mat->data_device, vec->data_device, target->data_device, mult, w, h);
if (SYNC_THREADS)
hipDeviceSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int add_row_vec(cudamat* mat, cudamat* vec, cudamat* target) {
unsigned int h = mat->size[0],
w = mat->size[1];
if (!mat->on_device || !vec->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->is_trans)
return ERROR_TRANSPOSED;
if (mat->size[1] != vec->size[1] || vec->size[0] != 1 ||
mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
hipLaunchKernelGGL(( kAddRowVector), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, mat->data_device, vec->data_device, target->data_device, w, h);
if (SYNC_THREADS)
hipDeviceSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int mult_by_col_vec(cudamat* mat, cudamat* vec, cudamat* target) {
unsigned int h = mat->size[0],
w = mat->size[1];
if (!mat->on_device || !vec->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->is_trans)
return ERROR_TRANSPOSED;
if (mat->size[0] != vec->size[0] || vec->size[1] != 1 ||
mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
hipLaunchKernelGGL(( kMultByColVector), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, mat->data_device, vec->data_device, target->data_device, w, h);
if (SYNC_THREADS)
hipDeviceSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int mult_by_row_vec(cudamat* mat, cudamat* vec, cudamat* target) {
unsigned int h = mat->size[0],
w = mat->size[1];
if (!mat->on_device || !vec->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->is_trans)
return ERROR_TRANSPOSED;
if (mat->size[1] != vec->size[1] || vec->size[0] != 1 ||
mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
hipLaunchKernelGGL(( kMultByRowVector), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, mat->data_device, vec->data_device, target->data_device, w, h);
if (SYNC_THREADS)
hipDeviceSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int less_than(cudamat* mat1, cudamat* mat2, cudamat* target) {
int len = mat1->size[0]*mat1->size[1];
if (!mat1->on_device || !mat2->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat1->is_trans != mat2->is_trans)
return ERROR_TRANSPOSEDNESS;
if (mat1->size[0] != mat2->size[0] || mat1->size[1] != mat2->size[1] ||
mat1->size[0] != target->size[0] || mat1->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
hipLaunchKernelGGL(( kLessThan), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, mat1->data_device, mat2->data_device, target->data_device, len);
if (SYNC_THREADS)
hipDeviceSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int less_than_scalar(cudamat* mat, float val, cudamat* target) {
int len = mat->size[0]*mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->is_trans != target->is_trans)
return ERROR_TRANSPOSEDNESS;
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
hipLaunchKernelGGL(( kLessThanScalar), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, mat->data_device, val, target->data_device, len);
if (SYNC_THREADS)
hipDeviceSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int greater_than(cudamat* mat1, cudamat* mat2, cudamat* target) {
int len = mat1->size[0]*mat1->size[1];
if (!mat1->on_device || !mat2->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat1->is_trans != mat2->is_trans)
return ERROR_TRANSPOSEDNESS;
if (mat1->size[0] != mat2->size[0] || mat1->size[1] != mat2->size[1] ||
mat1->size[0] != target->size[0] || mat1->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
hipLaunchKernelGGL(( kGreaterThan), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, mat1->data_device, mat2->data_device, target->data_device, len);
if (SYNC_THREADS)
hipDeviceSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int greater_than_scalar(cudamat* mat, float val, cudamat* target) {
int len = mat->size[0]*mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->is_trans != target->is_trans)
return ERROR_TRANSPOSEDNESS;
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
hipLaunchKernelGGL(( kGreaterThanScalar), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, mat->data_device, val, target->data_device, len);
if (SYNC_THREADS)
hipDeviceSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int equals(cudamat* mat1, cudamat* mat2, cudamat* target) {
int len = mat1->size[0]*mat1->size[1];
if (!mat1->on_device || !mat2->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat1->is_trans != mat2->is_trans)
return ERROR_TRANSPOSEDNESS;
if (mat1->size[0] != mat2->size[0] || mat1->size[1] != mat2->size[1] ||
mat1->size[0] != target->size[0] || mat1->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
hipLaunchKernelGGL(( kEquals), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, mat1->data_device, mat2->data_device, target->data_device, len);
hipDeviceSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int equals_scalar(cudamat* mat, float val, cudamat* target) {
int len = mat->size[0]*mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->is_trans != target->is_trans)
return ERROR_TRANSPOSEDNESS;
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
hipLaunchKernelGGL(( kEqualsScalar), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, mat->data_device, val, target->data_device, len);
hipDeviceSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int max_by_axis(cudamat* mat, cudamat* target, int axis) {
unsigned int h = mat->size[0],
w = mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->is_trans)
return ERROR_TRANSPOSED;
if (axis == 0) {
if (target->size[0] != 1 || target->size[1] != mat->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
hipLaunchKernelGGL(( kMaxColumnwise), dim3(w),dim3(32), 0, 0, mat->data_device, target->data_device, w, h);
if (SYNC_THREADS)
hipDeviceSynchronize();
} else
return ERROR_UNSUPPORTED;
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int sign(cudamat* mat, cudamat* target) {
int len = mat->size[0]*mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->is_trans != target->is_trans)
return ERROR_TRANSPOSEDNESS;
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
hipLaunchKernelGGL(( kSign), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, mat->data_device, target->data_device, len);
if (SYNC_THREADS)
hipDeviceSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int apply_sigmoid(cudamat* mat, cudamat* target) {
unsigned int len = mat->size[0] * mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
hipLaunchKernelGGL(( kApplySigmoid), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, mat->data_device, target->data_device, len);
if (SYNC_THREADS)
hipDeviceSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int apply_tanh(cudamat* mat, cudamat* target) {
unsigned int len = mat->size[0] * mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
hipLaunchKernelGGL(( kApplyTanh), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, mat->data_device, target->data_device, len);
if (SYNC_THREADS)
hipDeviceSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int apply_abs(cudamat* mat, cudamat* target) {
unsigned int len = mat->size[0] * mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
hipLaunchKernelGGL(( kApplyAbs), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, mat->data_device, target->data_device, len);
if (SYNC_THREADS)
hipDeviceSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int apply_log_1_plus_exp(cudamat* mat, cudamat* target) {
unsigned int len = mat->size[0] * mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
hipLaunchKernelGGL(( kApplyLog1PlusExp), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, mat->data_device, target->data_device, len);
if (SYNC_THREADS)
hipDeviceSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int apply_log(cudamat* mat, cudamat* target) {
unsigned int len = mat->size[0] * mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
hipLaunchKernelGGL(( kLog), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, mat->data_device, target->data_device, len);
if (SYNC_THREADS)
hipDeviceSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int apply_exp(cudamat* mat, cudamat* target) {
unsigned int len = mat->size[0] * mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
hipLaunchKernelGGL(( kExp), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, mat->data_device, target->data_device, len);
if (SYNC_THREADS)
hipDeviceSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int apply_sqrt(cudamat* mat, cudamat* target) {
unsigned int len = mat->size[0] * mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
hipLaunchKernelGGL(( kSqrt), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, mat->data_device, target->data_device, len);
if (SYNC_THREADS)
hipDeviceSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int apply_pow(cudamat* mat, float pow, cudamat* target) {
unsigned int len = mat->size[0] * mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
hipLaunchKernelGGL(( kPow), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, mat->data_device, pow, target->data_device, len);
if (SYNC_THREADS)
hipDeviceSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int apply_pow_matrix(cudamat* mat, cudamat* pow, cudamat* target) {
unsigned int len = mat->size[0] * mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
if (mat->size[0] != pow->size[0] || mat->size[1] != pow->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
hipLaunchKernelGGL(( kPowMatrix), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, mat->data_device, pow->data_device, target->data_device, len);
if (SYNC_THREADS)
hipDeviceSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int reciprocal(cudamat* mat, cudamat* target) {
unsigned int len = mat->size[0] * mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
hipLaunchKernelGGL(( kReciprocal), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, mat->data_device, target->data_device, len);
if (SYNC_THREADS)
hipDeviceSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int dot(cudamat* mat1, cudamat* mat2, cudamat* target, float beta, float alpha) {
if (!mat1->on_device || !mat2->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (get_leading_dimension(mat1) != get_leading_dimension(target) ||
get_nonleading_dimension(mat2) != get_nonleading_dimension(target) ||
get_nonleading_dimension(mat1) != get_leading_dimension(mat2)) {
return ERROR_INCOMPATIBLE_DIMENSIONS;
}
int m = get_leading_dimension(mat1),
k = get_leading_dimension(mat2),
n = get_nonleading_dimension(mat2);
hipblasSgemm(get_transpose_char(mat1), get_transpose_char(mat2),
m, n, k,
alpha, mat1->data_device, mat1->size[0],
mat2->data_device, mat2->size[0],
beta, target->data_device, target->size[0]);
if (check_cublas_error())
return CUBLAS_ERROR;
if (SYNC_THREADS)
hipDeviceSynchronize();
return 0;
}
extern float vdot(cudamat* mat1, cudamat* mat2, int* err_code) {
int len = mat1->size[0]*mat1->size[1];
float res;
if (!mat1->on_device || !mat2->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat1->is_trans != mat2->is_trans) {
*err_code = ERROR_TRANSPOSEDNESS;
return 0;
}
if (mat1->size[0] != mat2->size[0] || mat1->size[1] != mat2->size[1]) {
*err_code = ERROR_INCOMPATIBLE_DIMENSIONS;
return 0;
}
res = hipblasSdot(len, mat1->data_device, 1, mat2->data_device, 1);
if (check_cublas_error()) {
*err_code = CUBLAS_ERROR;
return -1.;
} else {
*err_code = 0;
return res;
}
}
/* Perform the operation mat1 = mat1 + alpha * mat2. mat1 and mat2 must
have the same transposedness. */
extern int add_mult(cudamat* mat1, cudamat* mat2, float alpha) {
int len = mat1->size[0]*mat1->size[1];
if (!mat1->on_device || !mat2->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat1->is_trans != mat2->is_trans)
return ERROR_TRANSPOSEDNESS;
if (mat1->size[0] != mat2->size[0] || mat1->size[1] != mat2->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
hipblasSaxpy(len, alpha, mat2->data_device, 1, mat1->data_device, 1);
if (check_cublas_error())
return CUBLAS_ERROR;
return 0;
}
extern int add_elementwise(cudamat* mat1, cudamat* mat2, cudamat* target) {
int len = mat1->size[0]*mat1->size[1];
if (!mat1->on_device || !mat2->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat1->is_trans != mat2->is_trans)
return ERROR_TRANSPOSEDNESS;
if (mat1->size[0] != mat2->size[0] || mat1->size[1] != mat2->size[1] ||
mat1->size[0] != target->size[0] || mat1->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
if (mat1 == target) {
hipblasSaxpy(len, 1, mat2->data_device, 1, mat1->data_device, 1);
if (check_cublas_error())
return CUBLAS_ERROR;
} else {
hipLaunchKernelGGL(( kAdd), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, mat1->data_device, mat2->data_device, target->data_device, len);
if (SYNC_THREADS)
hipDeviceSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
}
return 0;
}
extern int subtract_elementwise(cudamat* mat1, cudamat* mat2, cudamat* target) {
int len = mat1->size[0]*mat1->size[1];
if (!mat1->on_device || !mat2->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat1->is_trans != mat2->is_trans)
return ERROR_TRANSPOSEDNESS;
if (mat1->size[0] != mat2->size[0] || mat1->size[1] != mat2->size[1] ||
mat1->size[0] != target->size[0] || mat1->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
hipLaunchKernelGGL(( kSubtract), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, mat1->data_device, mat2->data_device, target->data_device, len);
if (SYNC_THREADS)
hipDeviceSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int divide_elementwise(cudamat* mat1, cudamat* mat2, cudamat* target) {
int len = mat1->size[0]*mat1->size[1];
if (!mat1->on_device || !mat2->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat1->is_trans != mat2->is_trans)
return ERROR_TRANSPOSEDNESS;
if (mat1->size[0] != mat2->size[0] || mat1->size[1] != mat2->size[1] ||
mat1->size[0] != target->size[0] || mat1->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
hipLaunchKernelGGL(( kDivide), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, mat1->data_device, mat2->data_device, target->data_device, len);
if (SYNC_THREADS)
hipDeviceSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
/* Elementwise multiplication of 2 matrices */
extern int mult_elementwise(cudamat* mat1, cudamat* mat2, cudamat* target) {
int len = mat1->size[0]*mat1->size[1];
if (!mat1->on_device || !mat2->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat1->is_trans != mat2->is_trans)
return ERROR_TRANSPOSEDNESS;
if (mat1->size[0] != mat2->size[0] || mat1->size[1] != mat2->size[1] ||
mat1->size[0] != target->size[0] || mat1->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
hipLaunchKernelGGL(( kMult), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, mat1->data_device, mat2->data_device, target->data_device, len);
if (SYNC_THREADS)
hipDeviceSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int assign_scalar(cudamat* mat, float alpha) {
int len = mat->size[0]*mat->size[1];
if (!mat->on_device)
return ERROR_NOT_ON_DEVICE;
hipLaunchKernelGGL(( kAssignScalar), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, mat->data_device, alpha, len);
if (SYNC_THREADS)
hipDeviceSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int mult_by_scalar(cudamat* mat, float alpha, cudamat* target) {
int len = mat->size[0]*mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
if (mat == target) {
hipblasSscal(len, alpha, mat->data_device, 1);
if (check_cublas_error())
return CUBLAS_ERROR;
} else {
hipLaunchKernelGGL(( kMultScalar), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, mat->data_device, alpha, target->data_device, len);
if (SYNC_THREADS)
hipDeviceSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
}
return 0;
}
extern int divide_by_scalar(cudamat* mat, float alpha, cudamat* target) {
int len = mat->size[0]*mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
hipLaunchKernelGGL(( kDivideScalar), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, mat->data_device, alpha, target->data_device, len);
if (SYNC_THREADS)
hipDeviceSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int add_scalar(cudamat* mat, float alpha, cudamat* target) {
int len = mat->size[0]*mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
hipLaunchKernelGGL(( kAddScalar), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, mat->data_device, alpha, target->data_device, len);
if (SYNC_THREADS)
hipDeviceSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern float euclid_norm(cudamat* mat, int* err_code) {
int len = mat->size[0]*mat->size[1];
float res = hipblasSnrm2(len, mat->data_device, 1);
if (!mat->on_device)
return ERROR_NOT_ON_DEVICE;
if (check_cublas_error()) {
*err_code = CUBLAS_ERROR;
return -1.;
} else {
*err_code = 0;
return res;
}
}
extern int selectRows(cudamat* source, cudamat* target, cudamat* indices){
const int nRetRows = indices->size[1];
if (nRetRows==0) return 0;
dim3 gridDim((nRetRows+31)/32);
dim3 blockDim(32);
hipLaunchKernelGGL(( kSelectRows), dim3(gridDim), dim3(blockDim), 0, 0, source->data_device, target->data_device, indices->data_device, nRetRows, source->size[0], source->size[1]);
if (SYNC_THREADS)
hipDeviceSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
else
return 0;
}
extern int setSelectedRows(cudamat* target, cudamat* source, cudamat* indices){
const int nSetRows = indices->size[1];
if (nSetRows==0)
return 0;
dim3 gridDim((nSetRows+31)/32);
dim3 blockDim(32);
hipLaunchKernelGGL(( kSetSelectedRows), dim3(gridDim), dim3(blockDim), 0, 0, target->data_device, source->data_device, indices->data_device, nSetRows, target->size[0], target->size[1]);
if (SYNC_THREADS)
hipDeviceSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
else
return 0;
}
}
| 3b60374ef3160b856ac82dd0a802a0ab2a266ce3.cu | #include <stdio.h>
#include <stdlib.h>
#include <cublas.h>
#include "cudamat_kernels.cuh"
#include "cudamat.cuh"
extern "C" {
/* ------------------------------ CUBLAS init/shutdown ------------------------------ */
inline bool check_cublas_error() {
cublasStatus status = cublasGetError();
return status != CUBLAS_STATUS_SUCCESS;
}
inline bool checkCUDAError() {
cudaError_t err = cudaGetLastError();
if (cudaSuccess != err)
printf("%s\n", cudaGetErrorString( err));
return cudaSuccess != err;
}
extern const char* get_last_cuda_error() {
cudaError_t err = cudaGetLastError();
return cudaGetErrorString( err);
}
extern int cublas_init() {
cublasInit();
if (check_cublas_error())
return CUBLAS_ERROR;
else
return 0;
}
extern int cublas_shutdown() {
cublasShutdown();
cudaThreadExit();
return 0;
}
extern int cuda_set_device(int deviceId) {
cudaSetDevice(deviceId);
if (checkCUDAError())
return CUDA_ERROR;
else
return 0;
}
extern int init_random(rnd_struct* rnd_state, int seed, char* cudamatpath) {
unsigned int * host_mults;
host_mults = (unsigned int*)malloc(NUM_RND_STREAMS * sizeof(unsigned int));
FILE * pFile;
pFile = fopen (cudamatpath,"r");
for (int i = 0; i < NUM_RND_STREAMS; i++) {
fscanf (pFile, "%u", &host_mults[i]);
}
fclose (pFile);
cublasAlloc(NUM_RND_STREAMS, sizeof(unsigned int), (void**)&rnd_state->dev_mults);
cublasAlloc(NUM_RND_STREAMS, sizeof(unsigned long long), (void**)&rnd_state->dev_words);
cublasSetVector(NUM_RND_STREAMS, sizeof(unsigned int), host_mults, 1, rnd_state->dev_mults, 1);
//cudaMalloc((void **)&rnd_state->dev_mults, NUM_RND_STREAMS * sizeof(unsigned int));
//cudaMalloc((void **)&rnd_state->dev_words, NUM_RND_STREAMS * sizeof(unsigned long long));
//cudaMemcpy(rnd_state->dev_mults, host_mults, NUM_RND_STREAMS * sizeof(unsigned int), cudaMemcpyHostToDevice);
cudaThreadSynchronize();
kSeedRandom<<<NUM_RND_BLOCKS, NUM_RND_THREADS_PER_BLOCK>>>(rnd_state->dev_mults, rnd_state->dev_words, seed);
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
else
return 0;
}
/* ------------------------------ Utility routines ------------------------------ */
extern int get_leading_dimension(cudamat* mat) {
return mat->is_trans ? mat->size[1] : mat->size[0];
}
extern int get_nonleading_dimension(cudamat* mat) {
return mat->is_trans ? mat->size[0] : mat->size[1];
}
extern void set_transpose(cudamat* mat, int is_trans) {
mat->is_trans = is_trans;
}
inline char get_transpose_char(cudamat* mat) {
return mat->is_trans ? 't' : 'n';
}
extern void cuda_sync_threads() {
cudaThreadSynchronize();
}
/* ------------------------------ Allocating/moving data ------------------------------ */
extern int allocate_device_memory(cudamat* mat) {
int len = mat->size[0]*mat->size[1];
cublasStatus stat;
stat = cublasAlloc(len, sizeof(mat->data_device[0]), (void**)&mat->data_device);
if (stat != CUBLAS_STATUS_SUCCESS || check_cublas_error()) {
checkCUDAError();
return CUBLAS_ERROR;
}
mat->on_device = 1;
return 0;
}
extern int copy_to_host(cudamat* mat) {
int len = mat->size[0]*mat->size[1];
if (mat->on_device) {
cublasGetVector(len, sizeof(mat->data_host[0]), mat->data_device, 1, mat->data_host, 1);
if (check_cublas_error())
return CUBLAS_ERROR;
} else
return ERROR_NOT_ON_DEVICE;
return 0;
}
extern int copy_to_device(cudamat* mat) {
int len = mat->size[0]*mat->size[1];
int err_code = 0;
//if (!mat->owns_data)
// return VIEW_ERROR;
if (!mat->on_device) {
err_code = allocate_device_memory(mat);
if (err_code)
return err_code;
}
cublasSetVector(len, sizeof(mat->data_host[0]), mat->data_host, 1, mat->data_device, 1);
if (check_cublas_error())
return CUBLAS_ERROR;
return 0;
}
extern int copy_on_device(cudamat* mat1, cudamat* mat2) {
int len = mat1->size[0]*mat1->size[1];
if (mat1->size[0] != mat2->size[0] || mat1->size[1] != mat2->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
cublasScopy(len, mat1->data_device, 1, mat2->data_device, 1);
if (check_cublas_error())
return CUBLAS_ERROR;
else
return 0;
}
extern int get_row_slice(cudamat* source, cudamat* target, unsigned int start, unsigned int end) {
int height = source->size[0];
int width = source->size[1];
if ((end - start) != target->size[0] || source->size[1] != target->size[1] || start >= end || end > height)
return ERROR_INCOMPATIBLE_DIMENSIONS;
dim3 kernelBlockGrid((int)ceil((end - start)/32.), (int)ceil(width/32.), 1);
dim3 kernelBlockDim(32, 1, 1);
kGetRowSlice<<<kernelBlockGrid,kernelBlockDim>>>(source->data_device, target->data_device, start, end, width, height);
if (SYNC_THREADS)
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
else
return 0;
}
extern int set_row_slice(cudamat* source, cudamat* target, unsigned int start, unsigned int end) {
int height = target->size[0];
int width = target->size[1];
if ((end - start) != source->size[0] || source->size[1] != target->size[1] || start >= end || end > height)
return ERROR_INCOMPATIBLE_DIMENSIONS;
dim3 kernelBlockGrid((int)ceil((end - start)/32.), (int)ceil(width/32.), 1);
dim3 kernelBlockDim(32, 1, 1);
kSetRowSlice<<<kernelBlockGrid,kernelBlockDim>>>(source->data_device, target->data_device, start, end, width, height);
if (SYNC_THREADS)
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
else
return 0;
}
extern int copy_transpose(cudamat* source, cudamat* target) {
unsigned int height = source->size[0];
unsigned int width = source->size[1];
if (source->size[0] != target->size[1] || source->size[1] != target->size[0])
return ERROR_INCOMPATIBLE_DIMENSIONS;
// setup execution parameters
unsigned int grid_x = height / COPY_BLOCK_SIZE;
if (height % COPY_BLOCK_SIZE)
grid_x++;
unsigned int grid_y = width / COPY_BLOCK_SIZE;
if (width % COPY_BLOCK_SIZE)
grid_y++;
dim3 grid(grid_x, grid_y, 1);
dim3 threads(COPY_BLOCK_SIZE, COPY_BLOCK_SIZE, 1);
kTranspose<<< grid, threads >>>(target->data_device, source->data_device, height, width);
if (checkCUDAError())
return CUDA_ERROR;
else
return 0;
}
extern int free_device_memory(cudamat* mat) {
if (mat->owns_data && mat->on_device) {
cublasStatus stat;
stat = cublasFree(mat->data_device);
mat->on_device = 0;
if (stat != CUBLAS_STATUS_SUCCESS || check_cublas_error())
return CUBLAS_ERROR;
}
return 0;
}
extern int reshape(cudamat* mat, unsigned int m, unsigned int n) {
if (mat->size[0] * mat->size[1] != m * n)
return ERROR_INCOMPATIBLE_DIMENSIONS;
mat->size[0] = m;
mat->size[1] = n;
return 0;
}
extern int get_slice(cudamat* source, cudamat* target, unsigned int first_col, unsigned int last_col) {
if (source->is_trans)
return ERROR_TRANSPOSED;
if (!source->on_device)
return ERROR_NOT_ON_DEVICE;
if (last_col > source->size[1] || (first_col >= last_col))
return ERROR_INCOMPATIBLE_DIMENSIONS;
int num_rows = source->size[0];
target->data_host = 0;
target->data_device = source->data_device + first_col * num_rows;
target->on_device = 1;
target->on_host = 0;
target->size[0] = source->size[0];
target->size[1] = last_col - first_col;
target->is_trans = 0;
target->owns_data = 0;
return 0;
}
extern int get_vector_slice(cudamat* source, cudamat* target, unsigned int first_ind, unsigned int last_ind) {
// source must be a vector
if (source->size[0] > 1 && source->size[1] > 1)
return ERROR_GENERIC;
if (source->is_trans)
return ERROR_TRANSPOSED;
if (!source->on_device)
return ERROR_NOT_ON_DEVICE;
if (first_ind >= last_ind)
return ERROR_INCOMPATIBLE_DIMENSIONS;
int num_rows = source->size[0];
target->data_host = 0;
target->data_device = source->data_device + first_ind * num_rows;
target->on_device = 1;
target->on_host = 0;
target->is_trans = 0;
target->owns_data = 0;
if (source->size[0] > 1) {
if (last_ind > source->size[0])
return ERROR_INCOMPATIBLE_DIMENSIONS;
target->size[0] = last_ind - first_ind;
target->size[1] = 1;
} else {
if (last_ind > source->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
target->size[0] = 1;
target->size[1] = last_ind - first_ind;
}
return 0;
}
/* ------------------------------ Initialization routines ------------------------------ */
extern void init_from_array(cudamat* mat, float* data, int m, int n) {
mat->data_host = data;
mat->size[0] = m;
mat->size[1] = n;
mat->on_device = 0;
mat->on_host = 1;
mat->is_trans = 0;
mat->owns_data = 1;
}
extern int init_empty(cudamat* mat, int m, int n) {
mat->size[0] = m;
mat->size[1] = n;
mat->on_device = 0;
mat->on_host = 0;
mat->is_trans = 0;
mat->owns_data = 1;
return allocate_device_memory(mat);
}
/* ------------------------------ Random number generation ------------------------------ */
extern int fill_with_rand(rnd_struct* rnd_state, cudamat* mat) {
int len = mat->size[0] * mat->size[1];
if (!mat->on_device)
return ERROR_NOT_ON_DEVICE;
kRandomUniform<<<NUM_RND_BLOCKS,NUM_RND_THREADS_PER_BLOCK>>>(rnd_state->dev_mults, rnd_state->dev_words, mat->data_device, len);
if (SYNC_THREADS)
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
else
return 0;
}
extern int fill_with_randn(rnd_struct* rnd_state, cudamat* mat) {
int len = mat->size[0] * mat->size[1];
if (!mat->on_device)
return ERROR_NOT_ON_DEVICE;
kRandomGaussian<<<NUM_RND_BLOCKS,NUM_RND_THREADS_PER_BLOCK>>>(rnd_state->dev_mults, rnd_state->dev_words, mat->data_device, len);
if (SYNC_THREADS)
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
else
return 0;
}
/* ------------------------------ Algebraic operations ------------------------------ */
extern int add_col_vec(cudamat* mat, cudamat* vec, cudamat* target) {
unsigned int h = mat->size[0],
w = mat->size[1];
if (!mat->on_device || !vec->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->is_trans)
return ERROR_TRANSPOSED;
if (mat->size[0] != vec->size[0] || vec->size[1] != 1 ||
mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kAddColVector<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(mat->data_device, vec->data_device, target->data_device, w, h);
if (SYNC_THREADS)
cudaThreadSynchronize();
if (checkCUDAError()) {
return CUDA_ERROR;
}
return 0;
}
extern int add_col_mult(cudamat* mat, cudamat* vec, cudamat* target, float mult) {
unsigned int h = mat->size[0],
w = mat->size[1];
if (!mat->on_device || !vec->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->is_trans)
return ERROR_TRANSPOSED;
if (mat->size[0] != vec->size[0] || vec->size[1] != 1 ||
mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kAddColMult<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(mat->data_device, vec->data_device, target->data_device, mult, w, h);
if (SYNC_THREADS)
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int add_row_vec(cudamat* mat, cudamat* vec, cudamat* target) {
unsigned int h = mat->size[0],
w = mat->size[1];
if (!mat->on_device || !vec->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->is_trans)
return ERROR_TRANSPOSED;
if (mat->size[1] != vec->size[1] || vec->size[0] != 1 ||
mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kAddRowVector<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(mat->data_device, vec->data_device, target->data_device, w, h);
if (SYNC_THREADS)
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int mult_by_col_vec(cudamat* mat, cudamat* vec, cudamat* target) {
unsigned int h = mat->size[0],
w = mat->size[1];
if (!mat->on_device || !vec->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->is_trans)
return ERROR_TRANSPOSED;
if (mat->size[0] != vec->size[0] || vec->size[1] != 1 ||
mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kMultByColVector<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(mat->data_device, vec->data_device, target->data_device, w, h);
if (SYNC_THREADS)
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int mult_by_row_vec(cudamat* mat, cudamat* vec, cudamat* target) {
unsigned int h = mat->size[0],
w = mat->size[1];
if (!mat->on_device || !vec->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->is_trans)
return ERROR_TRANSPOSED;
if (mat->size[1] != vec->size[1] || vec->size[0] != 1 ||
mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kMultByRowVector<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(mat->data_device, vec->data_device, target->data_device, w, h);
if (SYNC_THREADS)
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int less_than(cudamat* mat1, cudamat* mat2, cudamat* target) {
int len = mat1->size[0]*mat1->size[1];
if (!mat1->on_device || !mat2->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat1->is_trans != mat2->is_trans)
return ERROR_TRANSPOSEDNESS;
if (mat1->size[0] != mat2->size[0] || mat1->size[1] != mat2->size[1] ||
mat1->size[0] != target->size[0] || mat1->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kLessThan<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(mat1->data_device, mat2->data_device, target->data_device, len);
if (SYNC_THREADS)
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int less_than_scalar(cudamat* mat, float val, cudamat* target) {
int len = mat->size[0]*mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->is_trans != target->is_trans)
return ERROR_TRANSPOSEDNESS;
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kLessThanScalar<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(mat->data_device, val, target->data_device, len);
if (SYNC_THREADS)
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int greater_than(cudamat* mat1, cudamat* mat2, cudamat* target) {
int len = mat1->size[0]*mat1->size[1];
if (!mat1->on_device || !mat2->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat1->is_trans != mat2->is_trans)
return ERROR_TRANSPOSEDNESS;
if (mat1->size[0] != mat2->size[0] || mat1->size[1] != mat2->size[1] ||
mat1->size[0] != target->size[0] || mat1->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kGreaterThan<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(mat1->data_device, mat2->data_device, target->data_device, len);
if (SYNC_THREADS)
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int greater_than_scalar(cudamat* mat, float val, cudamat* target) {
int len = mat->size[0]*mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->is_trans != target->is_trans)
return ERROR_TRANSPOSEDNESS;
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kGreaterThanScalar<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(mat->data_device, val, target->data_device, len);
if (SYNC_THREADS)
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int equals(cudamat* mat1, cudamat* mat2, cudamat* target) {
int len = mat1->size[0]*mat1->size[1];
if (!mat1->on_device || !mat2->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat1->is_trans != mat2->is_trans)
return ERROR_TRANSPOSEDNESS;
if (mat1->size[0] != mat2->size[0] || mat1->size[1] != mat2->size[1] ||
mat1->size[0] != target->size[0] || mat1->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kEquals<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(mat1->data_device, mat2->data_device, target->data_device, len);
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int equals_scalar(cudamat* mat, float val, cudamat* target) {
int len = mat->size[0]*mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->is_trans != target->is_trans)
return ERROR_TRANSPOSEDNESS;
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kEqualsScalar<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(mat->data_device, val, target->data_device, len);
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int max_by_axis(cudamat* mat, cudamat* target, int axis) {
unsigned int h = mat->size[0],
w = mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->is_trans)
return ERROR_TRANSPOSED;
if (axis == 0) {
if (target->size[0] != 1 || target->size[1] != mat->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kMaxColumnwise<<<w,32>>>(mat->data_device, target->data_device, w, h);
if (SYNC_THREADS)
cudaThreadSynchronize();
} else
return ERROR_UNSUPPORTED;
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int sign(cudamat* mat, cudamat* target) {
int len = mat->size[0]*mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->is_trans != target->is_trans)
return ERROR_TRANSPOSEDNESS;
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kSign<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(mat->data_device, target->data_device, len);
if (SYNC_THREADS)
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int apply_sigmoid(cudamat* mat, cudamat* target) {
unsigned int len = mat->size[0] * mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kApplySigmoid<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(mat->data_device, target->data_device, len);
if (SYNC_THREADS)
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int apply_tanh(cudamat* mat, cudamat* target) {
unsigned int len = mat->size[0] * mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kApplyTanh<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(mat->data_device, target->data_device, len);
if (SYNC_THREADS)
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int apply_abs(cudamat* mat, cudamat* target) {
unsigned int len = mat->size[0] * mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kApplyAbs<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(mat->data_device, target->data_device, len);
if (SYNC_THREADS)
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int apply_log_1_plus_exp(cudamat* mat, cudamat* target) {
unsigned int len = mat->size[0] * mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kApplyLog1PlusExp<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(mat->data_device, target->data_device, len);
if (SYNC_THREADS)
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int apply_log(cudamat* mat, cudamat* target) {
unsigned int len = mat->size[0] * mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kLog<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(mat->data_device, target->data_device, len);
if (SYNC_THREADS)
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int apply_exp(cudamat* mat, cudamat* target) {
unsigned int len = mat->size[0] * mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kExp<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(mat->data_device, target->data_device, len);
if (SYNC_THREADS)
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int apply_sqrt(cudamat* mat, cudamat* target) {
unsigned int len = mat->size[0] * mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kSqrt<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(mat->data_device, target->data_device, len);
if (SYNC_THREADS)
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int apply_pow(cudamat* mat, float pow, cudamat* target) {
unsigned int len = mat->size[0] * mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kPow<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(mat->data_device, pow, target->data_device, len);
if (SYNC_THREADS)
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int apply_pow_matrix(cudamat* mat, cudamat* pow, cudamat* target) {
unsigned int len = mat->size[0] * mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
if (mat->size[0] != pow->size[0] || mat->size[1] != pow->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kPowMatrix<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(mat->data_device, pow->data_device, target->data_device, len);
if (SYNC_THREADS)
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int reciprocal(cudamat* mat, cudamat* target) {
unsigned int len = mat->size[0] * mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kReciprocal<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(mat->data_device, target->data_device, len);
if (SYNC_THREADS)
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int dot(cudamat* mat1, cudamat* mat2, cudamat* target, float beta, float alpha) {
if (!mat1->on_device || !mat2->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (get_leading_dimension(mat1) != get_leading_dimension(target) ||
get_nonleading_dimension(mat2) != get_nonleading_dimension(target) ||
get_nonleading_dimension(mat1) != get_leading_dimension(mat2)) {
return ERROR_INCOMPATIBLE_DIMENSIONS;
}
int m = get_leading_dimension(mat1),
k = get_leading_dimension(mat2),
n = get_nonleading_dimension(mat2);
cublasSgemm(get_transpose_char(mat1), get_transpose_char(mat2),
m, n, k,
alpha, mat1->data_device, mat1->size[0],
mat2->data_device, mat2->size[0],
beta, target->data_device, target->size[0]);
if (check_cublas_error())
return CUBLAS_ERROR;
if (SYNC_THREADS)
cudaThreadSynchronize();
return 0;
}
extern float vdot(cudamat* mat1, cudamat* mat2, int* err_code) {
int len = mat1->size[0]*mat1->size[1];
float res;
if (!mat1->on_device || !mat2->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat1->is_trans != mat2->is_trans) {
*err_code = ERROR_TRANSPOSEDNESS;
return 0;
}
if (mat1->size[0] != mat2->size[0] || mat1->size[1] != mat2->size[1]) {
*err_code = ERROR_INCOMPATIBLE_DIMENSIONS;
return 0;
}
res = cublasSdot(len, mat1->data_device, 1, mat2->data_device, 1);
if (check_cublas_error()) {
*err_code = CUBLAS_ERROR;
return -1.;
} else {
*err_code = 0;
return res;
}
}
/* Perform the operation mat1 = mat1 + alpha * mat2. mat1 and mat2 must
have the same transposedness. */
extern int add_mult(cudamat* mat1, cudamat* mat2, float alpha) {
int len = mat1->size[0]*mat1->size[1];
if (!mat1->on_device || !mat2->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat1->is_trans != mat2->is_trans)
return ERROR_TRANSPOSEDNESS;
if (mat1->size[0] != mat2->size[0] || mat1->size[1] != mat2->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
cublasSaxpy(len, alpha, mat2->data_device, 1, mat1->data_device, 1);
if (check_cublas_error())
return CUBLAS_ERROR;
return 0;
}
extern int add_elementwise(cudamat* mat1, cudamat* mat2, cudamat* target) {
int len = mat1->size[0]*mat1->size[1];
if (!mat1->on_device || !mat2->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat1->is_trans != mat2->is_trans)
return ERROR_TRANSPOSEDNESS;
if (mat1->size[0] != mat2->size[0] || mat1->size[1] != mat2->size[1] ||
mat1->size[0] != target->size[0] || mat1->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
if (mat1 == target) {
cublasSaxpy(len, 1, mat2->data_device, 1, mat1->data_device, 1);
if (check_cublas_error())
return CUBLAS_ERROR;
} else {
kAdd<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(mat1->data_device, mat2->data_device, target->data_device, len);
if (SYNC_THREADS)
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
}
return 0;
}
extern int subtract_elementwise(cudamat* mat1, cudamat* mat2, cudamat* target) {
int len = mat1->size[0]*mat1->size[1];
if (!mat1->on_device || !mat2->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat1->is_trans != mat2->is_trans)
return ERROR_TRANSPOSEDNESS;
if (mat1->size[0] != mat2->size[0] || mat1->size[1] != mat2->size[1] ||
mat1->size[0] != target->size[0] || mat1->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kSubtract<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(mat1->data_device, mat2->data_device, target->data_device, len);
if (SYNC_THREADS)
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int divide_elementwise(cudamat* mat1, cudamat* mat2, cudamat* target) {
int len = mat1->size[0]*mat1->size[1];
if (!mat1->on_device || !mat2->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat1->is_trans != mat2->is_trans)
return ERROR_TRANSPOSEDNESS;
if (mat1->size[0] != mat2->size[0] || mat1->size[1] != mat2->size[1] ||
mat1->size[0] != target->size[0] || mat1->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kDivide<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(mat1->data_device, mat2->data_device, target->data_device, len);
if (SYNC_THREADS)
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
/* Elementwise multiplication of 2 matrices */
extern int mult_elementwise(cudamat* mat1, cudamat* mat2, cudamat* target) {
int len = mat1->size[0]*mat1->size[1];
if (!mat1->on_device || !mat2->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat1->is_trans != mat2->is_trans)
return ERROR_TRANSPOSEDNESS;
if (mat1->size[0] != mat2->size[0] || mat1->size[1] != mat2->size[1] ||
mat1->size[0] != target->size[0] || mat1->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kMult<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(mat1->data_device, mat2->data_device, target->data_device, len);
if (SYNC_THREADS)
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int assign_scalar(cudamat* mat, float alpha) {
int len = mat->size[0]*mat->size[1];
if (!mat->on_device)
return ERROR_NOT_ON_DEVICE;
kAssignScalar<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(mat->data_device, alpha, len);
if (SYNC_THREADS)
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int mult_by_scalar(cudamat* mat, float alpha, cudamat* target) {
int len = mat->size[0]*mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
if (mat == target) {
cublasSscal(len, alpha, mat->data_device, 1);
if (check_cublas_error())
return CUBLAS_ERROR;
} else {
kMultScalar<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(mat->data_device, alpha, target->data_device, len);
if (SYNC_THREADS)
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
}
return 0;
}
extern int divide_by_scalar(cudamat* mat, float alpha, cudamat* target) {
int len = mat->size[0]*mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kDivideScalar<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(mat->data_device, alpha, target->data_device, len);
if (SYNC_THREADS)
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int add_scalar(cudamat* mat, float alpha, cudamat* target) {
int len = mat->size[0]*mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kAddScalar<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(mat->data_device, alpha, target->data_device, len);
if (SYNC_THREADS)
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern float euclid_norm(cudamat* mat, int* err_code) {
int len = mat->size[0]*mat->size[1];
float res = cublasSnrm2(len, mat->data_device, 1);
if (!mat->on_device)
return ERROR_NOT_ON_DEVICE;
if (check_cublas_error()) {
*err_code = CUBLAS_ERROR;
return -1.;
} else {
*err_code = 0;
return res;
}
}
extern int selectRows(cudamat* source, cudamat* target, cudamat* indices){
const int nRetRows = indices->size[1];
if (nRetRows==0) return 0;
dim3 gridDim((nRetRows+31)/32);
dim3 blockDim(32);
kSelectRows<<<gridDim, blockDim>>>(source->data_device, target->data_device, indices->data_device, nRetRows, source->size[0], source->size[1]);
if (SYNC_THREADS)
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
else
return 0;
}
extern int setSelectedRows(cudamat* target, cudamat* source, cudamat* indices){
const int nSetRows = indices->size[1];
if (nSetRows==0)
return 0;
dim3 gridDim((nSetRows+31)/32);
dim3 blockDim(32);
kSetSelectedRows<<<gridDim, blockDim>>>(target->data_device, source->data_device, indices->data_device, nSetRows, target->size[0], target->size[1]);
if (SYNC_THREADS)
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
else
return 0;
}
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.